summaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/mtd/Kconfig225
-rw-r--r--drivers/mtd/Makefile33
-rw-r--r--drivers/mtd/chips/Kconfig243
-rw-r--r--drivers/mtd/chips/Makefile16
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c2709
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c3114
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c1402
-rw-r--r--drivers/mtd/chips/cfi_probe.c463
-rw-r--r--drivers/mtd/chips/cfi_util.c444
-rw-r--r--drivers/mtd/chips/chipreg.c102
-rw-r--r--drivers/mtd/chips/fwh_lock.h108
-rw-r--r--drivers/mtd/chips/gen_probe.c266
-rw-r--r--drivers/mtd/chips/jedec_probe.c2316
-rw-r--r--drivers/mtd/chips/map_absent.c113
-rw-r--r--drivers/mtd/chips/map_ram.c159
-rw-r--r--drivers/mtd/chips/map_rom.c133
-rw-r--r--drivers/mtd/devices/Kconfig216
-rw-r--r--drivers/mtd/devices/Makefile23
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c382
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.h81
-rw-r--r--drivers/mtd/devices/block2mtd.c527
-rw-r--r--drivers/mtd/devices/docg3.c2086
-rw-r--r--drivers/mtd/devices/docg3.h343
-rw-r--r--drivers/mtd/devices/mchp23k256.c260
-rw-r--r--drivers/mtd/devices/mchp48l640.c384
-rw-r--r--drivers/mtd/devices/ms02-nv.c306
-rw-r--r--drivers/mtd/devices/ms02-nv.h101
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c946
-rw-r--r--drivers/mtd/devices/mtdram.c187
-rw-r--r--drivers/mtd/devices/phram.c442
-rw-r--r--drivers/mtd/devices/pmc551.c847
-rw-r--r--drivers/mtd/devices/powernv_flash.c297
-rw-r--r--drivers/mtd/devices/serial_flash_cmds.h49
-rw-r--r--drivers/mtd/devices/slram.c344
-rw-r--r--drivers/mtd/devices/spear_smi.c1104
-rw-r--r--drivers/mtd/devices/sst25l.c421
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c2148
-rw-r--r--drivers/mtd/ftl.c1063
-rw-r--r--drivers/mtd/hyperbus/Kconfig32
-rw-r--r--drivers/mtd/hyperbus/Makefile5
-rw-r--r--drivers/mtd/hyperbus/hbmc-am654.c271
-rw-r--r--drivers/mtd/hyperbus/hyperbus-core.c140
-rw-r--r--drivers/mtd/hyperbus/rpc-if.c179
-rw-r--r--drivers/mtd/inftlcore.c944
-rw-r--r--drivers/mtd/inftlmount.c776
-rw-r--r--drivers/mtd/lpddr/Kconfig30
-rw-r--r--drivers/mtd/lpddr/Makefile8
-rw-r--r--drivers/mtd/lpddr/lpddr2_nvm.c498
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c760
-rw-r--r--drivers/mtd/lpddr/qinfo_probe.c235
-rw-r--r--drivers/mtd/maps/Kconfig388
-rw-r--r--drivers/mtd/maps/Makefile45
-rw-r--r--drivers/mtd/maps/amd76xrom.c348
-rw-r--r--drivers/mtd/maps/cfi_flagadm.c137
-rw-r--r--drivers/mtd/maps/ck804xrom.c386
-rw-r--r--drivers/mtd/maps/dc21285.c231
-rw-r--r--drivers/mtd/maps/esb2rom.c452
-rw-r--r--drivers/mtd/maps/ichxrom.c381
-rw-r--r--drivers/mtd/maps/impa7.c115
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c265
-rw-r--r--drivers/mtd/maps/l440gx.c167
-rw-r--r--drivers/mtd/maps/lantiq-flash.c200
-rw-r--r--drivers/mtd/maps/map_funcs.c44
-rw-r--r--drivers/mtd/maps/netsc520.c127
-rw-r--r--drivers/mtd/maps/nettel.c462
-rw-r--r--drivers/mtd/maps/pci.c330
-rw-r--r--drivers/mtd/maps/pcmciamtd.c753
-rw-r--r--drivers/mtd/maps/physmap-bt1-rom.c125
-rw-r--r--drivers/mtd/maps/physmap-bt1-rom.h17
-rw-r--r--drivers/mtd/maps/physmap-core.c696
-rw-r--r--drivers/mtd/maps/physmap-gemini.c200
-rw-r--r--drivers/mtd/maps/physmap-gemini.h17
-rw-r--r--drivers/mtd/maps/physmap-ixp4xx.c132
-rw-r--r--drivers/mtd/maps/physmap-ixp4xx.h18
-rw-r--r--drivers/mtd/maps/physmap-versatile.c242
-rw-r--r--drivers/mtd/maps/physmap-versatile.h17
-rw-r--r--drivers/mtd/maps/pismo.c284
-rw-r--r--drivers/mtd/maps/plat-ram.c220
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c140
-rw-r--r--drivers/mtd/maps/sa1100-flash.c311
-rw-r--r--drivers/mtd/maps/sbc_gxx.c224
-rw-r--r--drivers/mtd/maps/sc520cdp.c294
-rw-r--r--drivers/mtd/maps/scb2_flash.c239
-rw-r--r--drivers/mtd/maps/scx200_docflash.c219
-rw-r--r--drivers/mtd/maps/solutionengine.c95
-rw-r--r--drivers/mtd/maps/sun_uflash.c157
-rw-r--r--drivers/mtd/maps/ts5500_flash.c108
-rw-r--r--drivers/mtd/maps/tsunami_flash.c109
-rw-r--r--drivers/mtd/maps/uclinux.c118
-rw-r--r--drivers/mtd/maps/vmu-flash.c817
-rw-r--r--drivers/mtd/mtd_blkdevs.c548
-rw-r--r--drivers/mtd/mtdblock.c361
-rw-r--r--drivers/mtd/mtdblock_ro.c80
-rw-r--r--drivers/mtd/mtdchar.c1439
-rw-r--r--drivers/mtd/mtdconcat.c914
-rw-r--r--drivers/mtd/mtdcore.c2560
-rw-r--r--drivers/mtd/mtdcore.h30
-rw-r--r--drivers/mtd/mtdoops.c457
-rw-r--r--drivers/mtd/mtdpart.c737
-rw-r--r--drivers/mtd/mtdpstore.c579
-rw-r--r--drivers/mtd/mtdsuper.c179
-rw-r--r--drivers/mtd/mtdswap.c1491
-rw-r--r--drivers/mtd/nand/Kconfig66
-rw-r--r--drivers/mtd/nand/Makefile14
-rw-r--r--drivers/mtd/nand/bbt.c128
-rw-r--r--drivers/mtd/nand/core.c405
-rw-r--r--drivers/mtd/nand/ecc-mtk.c621
-rw-r--r--drivers/mtd/nand/ecc-mxic.c878
-rw-r--r--drivers/mtd/nand/ecc-sw-bch.c406
-rw-r--r--drivers/mtd/nand/ecc-sw-hamming.c660
-rw-r--r--drivers/mtd/nand/ecc.c735
-rw-r--r--drivers/mtd/nand/onenand/Kconfig73
-rw-r--r--drivers/mtd/nand/onenand/Makefile14
-rw-r--r--drivers/mtd/nand/onenand/generic.c115
-rw-r--r--drivers/mtd/nand/onenand/onenand_base.c4027
-rw-r--r--drivers/mtd/nand/onenand/onenand_bbt.c245
-rw-r--r--drivers/mtd/nand/onenand/onenand_omap2.c609
-rw-r--r--drivers/mtd/nand/onenand/onenand_samsung.c1001
-rw-r--r--drivers/mtd/nand/onenand/samsung.h56
-rw-r--r--drivers/mtd/nand/raw/Kconfig546
-rw-r--r--drivers/mtd/nand/raw/Makefile71
-rw-r--r--drivers/mtd/nand/raw/ams-delta.c447
-rw-r--r--drivers/mtd/nand/raw/arasan-nand-controller.c1512
-rw-r--r--drivers/mtd/nand/raw/atmel/Makefile5
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c2670
-rw-r--r--drivers/mtd/nand/raw/atmel/pmecc.c1015
-rw-r--r--drivers/mtd/nand/raw/atmel/pmecc.h70
-rw-r--r--drivers/mtd/nand/raw/au1550nd.c367
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/Makefile5
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/bcm47xxnflash.h26
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/main.c79
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c451
-rw-r--r--drivers/mtd/nand/raw/brcmnand/Kconfig49
-rw-r--r--drivers/mtd/nand/raw/brcmnand/Makefile10
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c99
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c131
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcma_nand.c132
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c3328
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.h95
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c37
-rw-r--r--drivers/mtd/nand/raw/brcmnand/iproc_nand.c149
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c3079
-rw-r--r--drivers/mtd/nand/raw/cafe_nand.c892
-rw-r--r--drivers/mtd/nand/raw/cs553x_nand.c421
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c854
-rw-r--r--drivers/mtd/nand/raw/denali.c1381
-rw-r--r--drivers/mtd/nand/raw/denali.h398
-rw-r--r--drivers/mtd/nand/raw/denali_dt.c259
-rw-r--r--drivers/mtd/nand/raw/denali_pci.c139
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c1579
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c1008
-rw-r--r--drivers/mtd/nand/raw/fsl_ifc_nand.c1140
-rw-r--r--drivers/mtd/nand/raw/fsl_upm.c270
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c1236
-rw-r--r--drivers/mtd/nand/raw/gpio.c406
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/Makefile2
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/bch-regs.h115
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c2868
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h179
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-regs.h180
-rw-r--r--drivers/mtd/nand/raw/hisi504_nand.c869
-rw-r--r--drivers/mtd/nand/raw/ingenic/Kconfig45
-rw-r--r--drivers/mtd/nand/raw/ingenic/Makefile9
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.c159
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.h83
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c575
-rw-r--r--drivers/mtd/nand/raw/ingenic/jz4725b_bch.c295
-rw-r--r--drivers/mtd/nand/raw/ingenic/jz4740_ecc.c197
-rw-r--r--drivers/mtd/nand/raw/ingenic/jz4780_bch.c271
-rw-r--r--drivers/mtd/nand/raw/intel-nand-controller.c742
-rw-r--r--drivers/mtd/nand/raw/internals.h175
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c902
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c1023
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c3200
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c1602
-rw-r--r--drivers/mtd/nand/raw/mpc5121_nfc.c849
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c1653
-rw-r--r--drivers/mtd/nand/raw/mxc_nand.c1845
-rw-r--r--drivers/mtd/nand/raw/mxic_nand.c587
-rw-r--r--drivers/mtd/nand/raw/nand_amd.c53
-rw-r--r--drivers/mtd/nand/raw/nand_base.c6677
-rw-r--r--drivers/mtd/nand/raw/nand_bbt.c1491
-rw-r--r--drivers/mtd/nand/raw/nand_esmt.c59
-rw-r--r--drivers/mtd/nand/raw/nand_hynix.c748
-rw-r--r--drivers/mtd/nand/raw/nand_ids.c217
-rw-r--r--drivers/mtd/nand/raw/nand_jedec.c141
-rw-r--r--drivers/mtd/nand/raw/nand_legacy.c644
-rw-r--r--drivers/mtd/nand/raw/nand_macronix.c500
-rw-r--r--drivers/mtd/nand/raw/nand_micron.c599
-rw-r--r--drivers/mtd/nand/raw/nand_onfi.c339
-rw-r--r--drivers/mtd/nand/raw/nand_samsung.c139
-rw-r--r--drivers/mtd/nand/raw/nand_sandisk.c26
-rw-r--r--drivers/mtd/nand/raw/nand_timings.c737
-rw-r--r--drivers/mtd/nand/raw/nand_toshiba.c302
-rw-r--r--drivers/mtd/nand/raw/nandsim.c2460
-rw-r--r--drivers/mtd/nand/raw/ndfc.c276
-rw-r--r--drivers/mtd/nand/raw/omap2.c2305
-rw-r--r--drivers/mtd/nand/raw/omap_elm.c571
-rw-r--r--drivers/mtd/nand/raw/orion_nand.c229
-rw-r--r--drivers/mtd/nand/raw/pasemi_nand.c247
-rw-r--r--drivers/mtd/nand/raw/pl35x-nand-controller.c1199
-rw-r--r--drivers/mtd/nand/raw/plat_nand.c159
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c3546
-rw-r--r--drivers/mtd/nand/raw/r852.c1091
-rw-r--r--drivers/mtd/nand/raw/r852.h155
-rw-r--r--drivers/mtd/nand/raw/renesas-nand-controller.c1417
-rw-r--r--drivers/mtd/nand/raw/rockchip-nand-controller.c1494
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c1232
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c1230
-rw-r--r--drivers/mtd/nand/raw/sharpsl.c244
-rw-r--r--drivers/mtd/nand/raw/sm_common.c210
-rw-r--r--drivers/mtd/nand/raw/sm_common.h58
-rw-r--r--drivers/mtd/nand/raw/socrates_nand.c241
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c2110
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c2208
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c1290
-rw-r--r--drivers/mtd/nand/raw/txx9ndfmc.c421
-rw-r--r--drivers/mtd/nand/raw/vf610_nfc.c953
-rw-r--r--drivers/mtd/nand/raw/xway_nand.c267
-rw-r--r--drivers/mtd/nand/spi/Kconfig9
-rw-r--r--drivers/mtd/nand/spi/Makefile4
-rw-r--r--drivers/mtd/nand/spi/alliancememory.c153
-rw-r--r--drivers/mtd/nand/spi/ato.c86
-rw-r--r--drivers/mtd/nand/spi/core.c1406
-rw-r--r--drivers/mtd/nand/spi/esmt.c144
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c545
-rw-r--r--drivers/mtd/nand/spi/macronix.c333
-rw-r--r--drivers/mtd/nand/spi/micron.c309
-rw-r--r--drivers/mtd/nand/spi/paragon.c131
-rw-r--r--drivers/mtd/nand/spi/toshiba.c313
-rw-r--r--drivers/mtd/nand/spi/winbond.c202
-rw-r--r--drivers/mtd/nand/spi/xtx.c129
-rw-r--r--drivers/mtd/nftlcore.c804
-rw-r--r--drivers/mtd/nftlmount.c782
-rw-r--r--drivers/mtd/parsers/Kconfig222
-rw-r--r--drivers/mtd/parsers/Makefile18
-rw-r--r--drivers/mtd/parsers/afs.c395
-rw-r--r--drivers/mtd/parsers/ar7part.c129
-rw-r--r--drivers/mtd/parsers/bcm47xxpart.c317
-rw-r--r--drivers/mtd/parsers/bcm63xxpart.c171
-rw-r--r--drivers/mtd/parsers/brcm_u-boot.c84
-rw-r--r--drivers/mtd/parsers/cmdlinepart.c437
-rw-r--r--drivers/mtd/parsers/ofpart_bcm4908.c67
-rw-r--r--drivers/mtd/parsers/ofpart_bcm4908.h15
-rw-r--r--drivers/mtd/parsers/ofpart_core.c292
-rw-r--r--drivers/mtd/parsers/ofpart_linksys_ns.c50
-rw-r--r--drivers/mtd/parsers/ofpart_linksys_ns.h18
-rw-r--r--drivers/mtd/parsers/parser_imagetag.c221
-rw-r--r--drivers/mtd/parsers/parser_trx.c136
-rw-r--r--drivers/mtd/parsers/qcomsmempart.c197
-rw-r--r--drivers/mtd/parsers/redboot.c323
-rw-r--r--drivers/mtd/parsers/scpart.c249
-rw-r--r--drivers/mtd/parsers/sharpslpart.c399
-rw-r--r--drivers/mtd/parsers/tplink_safeloader.c152
-rw-r--r--drivers/mtd/rfd_ftl.c830
-rw-r--r--drivers/mtd/sm_ftl.c1299
-rw-r--r--drivers/mtd/sm_ftl.h87
-rw-r--r--drivers/mtd/spi-nor/Kconfig73
-rw-r--r--drivers/mtd/spi-nor/Makefile23
-rw-r--r--drivers/mtd/spi-nor/atmel.c215
-rw-r--r--drivers/mtd/spi-nor/catalyst.c24
-rw-r--r--drivers/mtd/spi-nor/controllers/Kconfig18
-rw-r--r--drivers/mtd/spi-nor/controllers/Makefile3
-rw-r--r--drivers/mtd/spi-nor/controllers/hisi-sfc.c497
-rw-r--r--drivers/mtd/spi-nor/controllers/nxp-spifi.c461
-rw-r--r--drivers/mtd/spi-nor/core.c3813
-rw-r--r--drivers/mtd/spi-nor/core.h745
-rw-r--r--drivers/mtd/spi-nor/debugfs.c257
-rw-r--r--drivers/mtd/spi-nor/eon.c38
-rw-r--r--drivers/mtd/spi-nor/esmt.c28
-rw-r--r--drivers/mtd/spi-nor/everspin.c23
-rw-r--r--drivers/mtd/spi-nor/fujitsu.c21
-rw-r--r--drivers/mtd/spi-nor/gigadevice.c76
-rw-r--r--drivers/mtd/spi-nor/intel.c25
-rw-r--r--drivers/mtd/spi-nor/issi.c108
-rw-r--r--drivers/mtd/spi-nor/macronix.c131
-rw-r--r--drivers/mtd/spi-nor/micron-st.c462
-rw-r--r--drivers/mtd/spi-nor/otp.c507
-rw-r--r--drivers/mtd/spi-nor/sfdp.c1570
-rw-r--r--drivers/mtd/spi-nor/sfdp.h136
-rw-r--r--drivers/mtd/spi-nor/spansion.c989
-rw-r--r--drivers/mtd/spi-nor/sst.c229
-rw-r--r--drivers/mtd/spi-nor/swp.c432
-rw-r--r--drivers/mtd/spi-nor/sysfs.c111
-rw-r--r--drivers/mtd/spi-nor/winbond.c249
-rw-r--r--drivers/mtd/spi-nor/xilinx.c175
-rw-r--r--drivers/mtd/spi-nor/xmc.c25
-rw-r--r--drivers/mtd/ssfdc.c455
-rw-r--r--drivers/mtd/tests/Makefile19
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c329
-rw-r--r--drivers/mtd/tests/mtd_test.c109
-rw-r--r--drivers/mtd/tests/mtd_test.h24
-rw-r--r--drivers/mtd/tests/nandbiterrs.c419
-rw-r--r--drivers/mtd/tests/oobtest.c733
-rw-r--r--drivers/mtd/tests/pagetest.c461
-rw-r--r--drivers/mtd/tests/readtest.c215
-rw-r--r--drivers/mtd/tests/speedtest.c418
-rw-r--r--drivers/mtd/tests/stresstest.c232
-rw-r--r--drivers/mtd/tests/subpagetest.c437
-rw-r--r--drivers/mtd/tests/torturetest.c480
-rw-r--r--drivers/mtd/ubi/Kconfig107
-rw-r--r--drivers/mtd/ubi/Makefile9
-rw-r--r--drivers/mtd/ubi/attach.c1923
-rw-r--r--drivers/mtd/ubi/block.c673
-rw-r--r--drivers/mtd/ubi/build.c1514
-rw-r--r--drivers/mtd/ubi/cdev.c1108
-rw-r--r--drivers/mtd/ubi/debug.c606
-rw-r--r--drivers/mtd/ubi/debug.h129
-rw-r--r--drivers/mtd/ubi/eba.c1709
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c490
-rw-r--r--drivers/mtd/ubi/fastmap.c1697
-rw-r--r--drivers/mtd/ubi/gluebi.c500
-rw-r--r--drivers/mtd/ubi/io.c1398
-rw-r--r--drivers/mtd/ubi/kapi.c855
-rw-r--r--drivers/mtd/ubi/misc.c191
-rw-r--r--drivers/mtd/ubi/ubi-media.h503
-rw-r--r--drivers/mtd/ubi/ubi.h1228
-rw-r--r--drivers/mtd/ubi/upd.c420
-rw-r--r--drivers/mtd/ubi/vmt.c797
-rw-r--r--drivers/mtd/ubi/vtbl.c871
-rw-r--r--drivers/mtd/ubi/wl.c2159
-rw-r--r--drivers/mtd/ubi/wl.h30
322 files changed, 184575 insertions, 0 deletions
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
new file mode 100644
index 0000000000..796a2eccbe
--- /dev/null
+++ b/drivers/mtd/Kconfig
@@ -0,0 +1,225 @@
+menuconfig MTD
+ tristate "Memory Technology Device (MTD) support"
+ imply NVMEM
+ help
+ Memory Technology Devices are flash, RAM and similar chips, often
+ used for solid state file systems on embedded devices. This option
+ will provide the generic support for MTD drivers to register
+ themselves with the kernel and for potential users of MTD devices
+ to enumerate the devices which are present and obtain a handle on
+ them. It will also allow you to select individual drivers for
+ particular hardware and users of MTD devices. If unsure, say N.
+
+if MTD
+
+config MTD_TESTS
+ tristate "MTD tests support (DANGEROUS)"
+ depends on m
+ help
+ This option includes various MTD tests into compilation. The tests
+ should normally be compiled as kernel modules. The modules perform
+ various checks and verifications when loaded.
+
+ WARNING: some of the tests will ERASE entire MTD device which they
+ test. Do not use these tests unless you really know what you do.
+
+menu "Partition parsers"
+source "drivers/mtd/parsers/Kconfig"
+endmenu
+
+comment "User Modules And Translation Layers"
+
+#
+# MTD block device support is select'ed if needed
+#
+config MTD_BLKDEVS
+ tristate
+
+config MTD_BLOCK
+ tristate "Caching block device access to MTD devices"
+ depends on BLOCK
+ select MTD_BLKDEVS
+ help
+ Although most flash chips have an erase size too large to be useful
+ as block devices, it is possible to use MTD devices which are based
+ on RAM chips in this manner. This block device is a user of MTD
+ devices performing that function.
+
+ Note that mounting a JFFS2 filesystem doesn't require using mtdblock.
+ It's possible to mount a rootfs using the MTD device on the "root="
+ bootargs as "root=mtd2" or "root=mtd:name_of_device".
+
+ Later, it may be extended to perform read/erase/modify/write cycles
+ on flash chips to emulate a smaller block size. Needless to say,
+ this is very unsafe, but could be useful for file systems which are
+ almost never written to.
+
+ You do not need this option for use with the DiskOnChip devices. For
+ those, enable NFTL support (CONFIG_NFTL) instead.
+
+config MTD_BLOCK_RO
+ tristate "Readonly block device access to MTD devices"
+ depends on MTD_BLOCK!=y && BLOCK
+ select MTD_BLKDEVS
+ help
+ This allows you to mount read-only file systems (such as cramfs)
+ from an MTD device, without the overhead (and danger) of the caching
+ driver.
+
+ You do not need this option for use with the DiskOnChip devices. For
+ those, enable NFTL support (CONFIG_NFTL) instead.
+
+comment "Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK."
+ depends on MTD_BLOCK || MTD_BLOCK_RO
+
+config FTL
+ tristate "FTL (Flash Translation Layer) support"
+ depends on BLOCK
+ select MTD_BLKDEVS
+ help
+ This provides support for the original Flash Translation Layer which
+ is part of the PCMCIA specification. It uses a kind of pseudo-
+ file system on a flash device to emulate a block device with
+ 512-byte sectors, on top of which you put a 'normal' file system.
+
+ You may find that the algorithms used in this code are patented
+ unless you live in the Free World where software patents aren't
+ legal - in the USA you are only permitted to use this on PCMCIA
+ hardware, although under the terms of the GPL you're obviously
+ permitted to copy, modify and distribute the code as you wish. Just
+ not use it.
+
+config NFTL
+ tristate "NFTL (NAND Flash Translation Layer) support"
+ depends on BLOCK
+ select MTD_BLKDEVS
+ help
+ This provides support for the NAND Flash Translation Layer which is
+ used on M-Systems' DiskOnChip devices. It uses a kind of pseudo-
+ file system on a flash device to emulate a block device with
+ 512-byte sectors, on top of which you put a 'normal' file system.
+
+ You may find that the algorithms used in this code are patented
+ unless you live in the Free World where software patents aren't
+ legal - in the USA you are only permitted to use this on DiskOnChip
+ hardware, although under the terms of the GPL you're obviously
+ permitted to copy, modify and distribute the code as you wish. Just
+ not use it.
+
+config NFTL_RW
+ bool "Write support for NFTL"
+ depends on NFTL
+ help
+ Support for writing to the NAND Flash Translation Layer, as used
+ on the DiskOnChip.
+
+config INFTL
+ tristate "INFTL (Inverse NAND Flash Translation Layer) support"
+ depends on BLOCK
+ select MTD_BLKDEVS
+ help
+ This provides support for the Inverse NAND Flash Translation
+ Layer which is used on M-Systems' newer DiskOnChip devices. It
+ uses a kind of pseudo-file system on a flash device to emulate
+ a block device with 512-byte sectors, on top of which you put
+ a 'normal' file system.
+
+ You may find that the algorithms used in this code are patented
+ unless you live in the Free World where software patents aren't
+ legal - in the USA you are only permitted to use this on DiskOnChip
+ hardware, although under the terms of the GPL you're obviously
+ permitted to copy, modify and distribute the code as you wish. Just
+ not use it.
+
+config RFD_FTL
+ tristate "Resident Flash Disk (Flash Translation Layer) support"
+ depends on BLOCK
+ select MTD_BLKDEVS
+ help
+ This provides support for the flash translation layer known
+ as the Resident Flash Disk (RFD), as used by the Embedded BIOS
+ of General Software. There is a blurb at:
+
+ http://www.gensw.com/pages/prod/bios/rfd.htm
+
+config SSFDC
+ tristate "NAND SSFDC (SmartMedia) read only translation layer"
+ depends on BLOCK
+ select MTD_BLKDEVS
+ help
+ This enables read only access to SmartMedia formatted NAND
+ flash. You can mount it with FAT file system.
+
+config SM_FTL
+ tristate "SmartMedia/xD new translation layer"
+ depends on BLOCK
+ select MTD_BLKDEVS
+ select MTD_NAND_CORE
+ select MTD_NAND_ECC_SW_HAMMING
+ help
+ This enables EXPERIMENTAL R/W support for SmartMedia/xD
+ FTL (Flash translation layer).
+ Write support is only lightly tested, therefore this driver
+ isn't recommended to use with valuable data (anyway if you have
+ valuable data, do backups regardless of software/hardware you
+ use, because you never know what will eat your data...)
+ If you only need R/O access, you can use older R/O driver
+ (CONFIG_SSFDC)
+
+config MTD_OOPS
+ tristate "Log panic/oops to an MTD buffer"
+ help
+ This enables panic and oops messages to be logged to a circular
+ buffer in a flash partition where it can be read back at some
+ later point.
+
+config MTD_PSTORE
+ tristate "Log panic/oops to an MTD buffer based on pstore"
+ depends on PSTORE_BLK
+ help
+ This enables panic and oops messages to be logged to a circular
+ buffer in a flash partition where it can be read back as files after
+ mounting pstore filesystem.
+
+ If unsure, say N.
+
+config MTD_SWAP
+ tristate "Swap on MTD device support"
+ depends on MTD && SWAP
+ select MTD_BLKDEVS
+ help
+ Provides volatile block device driver on top of mtd partition
+ suitable for swapping. The mapping of written blocks is not saved.
+ The driver provides wear leveling by storing erase counter into the
+ OOB.
+
+config MTD_PARTITIONED_MASTER
+ bool "Retain master device when partitioned"
+ default n
+ depends on MTD
+ help
+ For historical reasons, by default, either a master is present or
+ several partitions are present, but not both. The concern was that
+ data listed in multiple partitions was dangerous; however, SCSI does
+ this and it is frequently useful for applications. This config option
+ leaves the master in even if the device is partitioned. It also makes
+ the parent of the partition device be the master device, rather than
+ what lies behind the master.
+
+source "drivers/mtd/chips/Kconfig"
+
+source "drivers/mtd/maps/Kconfig"
+
+source "drivers/mtd/devices/Kconfig"
+
+source "drivers/mtd/nand/Kconfig"
+
+source "drivers/mtd/lpddr/Kconfig"
+
+source "drivers/mtd/spi-nor/Kconfig"
+
+source "drivers/mtd/ubi/Kconfig"
+
+source "drivers/mtd/hyperbus/Kconfig"
+
+endif # MTD
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
new file mode 100644
index 0000000000..593d0593a0
--- /dev/null
+++ b/drivers/mtd/Makefile
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the memory technology device drivers.
+#
+
+# Core functionality.
+obj-$(CONFIG_MTD) += mtd.o
+mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o mtdchar.o
+
+obj-y += parsers/
+
+# 'Users' - code which presents functionality to userspace.
+obj-$(CONFIG_MTD_BLKDEVS) += mtd_blkdevs.o
+obj-$(CONFIG_MTD_BLOCK) += mtdblock.o
+obj-$(CONFIG_MTD_BLOCK_RO) += mtdblock_ro.o
+obj-$(CONFIG_FTL) += ftl.o
+obj-$(CONFIG_NFTL) += nftl.o
+obj-$(CONFIG_INFTL) += inftl.o
+obj-$(CONFIG_RFD_FTL) += rfd_ftl.o
+obj-$(CONFIG_SSFDC) += ssfdc.o
+obj-$(CONFIG_SM_FTL) += sm_ftl.o
+obj-$(CONFIG_MTD_OOPS) += mtdoops.o
+obj-$(CONFIG_MTD_PSTORE) += mtdpstore.o
+obj-$(CONFIG_MTD_SWAP) += mtdswap.o
+
+nftl-objs := nftlcore.o nftlmount.o
+inftl-objs := inftlcore.o inftlmount.o
+
+obj-y += chips/ lpddr/ maps/ devices/ nand/ tests/
+
+obj-$(CONFIG_MTD_SPI_NOR) += spi-nor/
+obj-$(CONFIG_MTD_UBI) += ubi/
+obj-$(CONFIG_MTD_HYPERBUS) += hyperbus/
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
new file mode 100644
index 0000000000..19726ebd97
--- /dev/null
+++ b/drivers/mtd/chips/Kconfig
@@ -0,0 +1,243 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "RAM/ROM/Flash chip drivers"
+ depends on MTD!=n
+
+config MTD_CFI
+ tristate "Detect flash chips by Common Flash Interface (CFI) probe"
+ select MTD_GEN_PROBE
+ select MTD_CFI_UTIL
+ help
+ The Common Flash Interface specification was developed by Intel,
+ AMD and other flash manufactures that provides a universal method
+ for probing the capabilities of flash devices. If you wish to
+ support any device that is CFI-compliant, you need to enable this
+ option. Visit <https://www.amd.com/products/nvd/overview/cfi.html>
+ for more information on CFI.
+
+config MTD_JEDECPROBE
+ tristate "Detect non-CFI AMD/JEDEC-compatible flash chips"
+ select MTD_GEN_PROBE
+ select MTD_CFI_UTIL
+ help
+ This option enables JEDEC-style probing of flash chips which are not
+ compatible with the Common Flash Interface, but will use the common
+ CFI-targeted flash drivers for any chips which are identified which
+ are in fact compatible in all but the probe method. This actually
+ covers most AMD/Fujitsu-compatible chips and also non-CFI
+ Intel chips.
+
+config MTD_GEN_PROBE
+ tristate
+
+config MTD_CFI_ADV_OPTIONS
+ bool "Flash chip driver advanced configuration options"
+ depends on MTD_GEN_PROBE
+ help
+ If you need to specify a specific endianness for access to flash
+ chips, or if you wish to reduce the size of the kernel by including
+ support for only specific arrangements of flash chips, say 'Y'. This
+ option does not directly affect the code, but will enable other
+ configuration options which allow you to do so.
+
+ If unsure, say 'N'.
+
+choice
+ prompt "Flash cmd/query data swapping"
+ depends on MTD_CFI_ADV_OPTIONS
+ default MTD_CFI_NOSWAP
+ help
+ This option defines the way in which the CPU attempts to arrange
+ data bits when writing the 'magic' commands to the chips. Saying
+ 'NO', which is the default when CONFIG_MTD_CFI_ADV_OPTIONS isn't
+ enabled, means that the CPU will not do any swapping; the chips
+ are expected to be wired to the CPU in 'host-endian' form.
+ Specific arrangements are possible with the BIG_ENDIAN_BYTE and
+ LITTLE_ENDIAN_BYTE, if the bytes are reversed.
+
+config MTD_CFI_NOSWAP
+ depends on !ARCH_IXP4XX || CPU_BIG_ENDIAN
+ bool "NO"
+
+config MTD_CFI_BE_BYTE_SWAP
+ bool "BIG_ENDIAN_BYTE"
+
+config MTD_CFI_LE_BYTE_SWAP
+ depends on !ARCH_IXP4XX
+ bool "LITTLE_ENDIAN_BYTE"
+
+endchoice
+
+config MTD_CFI_GEOMETRY
+ bool "Specific CFI Flash geometry selection"
+ depends on MTD_CFI_ADV_OPTIONS
+ select MTD_MAP_BANK_WIDTH_1 if !(MTD_MAP_BANK_WIDTH_2 || \
+ MTD_MAP_BANK_WIDTH_4 || MTD_MAP_BANK_WIDTH_8 || \
+ MTD_MAP_BANK_WIDTH_16 || MTD_MAP_BANK_WIDTH_32)
+ select MTD_CFI_I1 if !(MTD_CFI_I2 || MTD_CFI_I4 || MTD_CFI_I8)
+ help
+ This option does not affect the code directly, but will enable
+ some other configuration options which would allow you to reduce
+ the size of the kernel by including support for only certain
+ arrangements of CFI chips. If unsure, say 'N' and all options
+ which are supported by the current code will be enabled.
+
+config MTD_MAP_BANK_WIDTH_1
+ bool "Support 8-bit buswidth" if MTD_CFI_GEOMETRY
+ default y
+ help
+ If you wish to support CFI devices on a physical bus which is
+ 8 bits wide, say 'Y'.
+
+config MTD_MAP_BANK_WIDTH_2
+ bool "Support 16-bit buswidth" if MTD_CFI_GEOMETRY
+ default y
+ help
+ If you wish to support CFI devices on a physical bus which is
+ 16 bits wide, say 'Y'.
+
+config MTD_MAP_BANK_WIDTH_4
+ bool "Support 32-bit buswidth" if MTD_CFI_GEOMETRY
+ default y
+ help
+ If you wish to support CFI devices on a physical bus which is
+ 32 bits wide, say 'Y'.
+
+config MTD_MAP_BANK_WIDTH_8
+ bool "Support 64-bit buswidth" if MTD_CFI_GEOMETRY
+ default n
+ help
+ If you wish to support CFI devices on a physical bus which is
+ 64 bits wide, say 'Y'.
+
+config MTD_MAP_BANK_WIDTH_16
+ bool "Support 128-bit buswidth" if MTD_CFI_GEOMETRY
+ default n
+ help
+ If you wish to support CFI devices on a physical bus which is
+ 128 bits wide, say 'Y'.
+
+config MTD_MAP_BANK_WIDTH_32
+ bool "Support 256-bit buswidth" if MTD_CFI_GEOMETRY
+ select MTD_COMPLEX_MAPPINGS if HAS_IOMEM
+ default n
+ help
+ If you wish to support CFI devices on a physical bus which is
+ 256 bits wide, say 'Y'.
+
+config MTD_CFI_I1
+ bool "Support 1-chip flash interleave" if MTD_CFI_GEOMETRY
+ default y
+ help
+ If your flash chips are not interleaved - i.e. you only have one
+ flash chip addressed by each bus cycle, then say 'Y'.
+
+config MTD_CFI_I2
+ bool "Support 2-chip flash interleave" if MTD_CFI_GEOMETRY
+ default y
+ help
+ If your flash chips are interleaved in pairs - i.e. you have two
+ flash chips addressed by each bus cycle, then say 'Y'.
+
+config MTD_CFI_I4
+ bool "Support 4-chip flash interleave" if MTD_CFI_GEOMETRY
+ default n
+ help
+ If your flash chips are interleaved in fours - i.e. you have four
+ flash chips addressed by each bus cycle, then say 'Y'.
+
+config MTD_CFI_I8
+ bool "Support 8-chip flash interleave" if MTD_CFI_GEOMETRY
+ default n
+ help
+ If your flash chips are interleaved in eights - i.e. you have eight
+ flash chips addressed by each bus cycle, then say 'Y'.
+
+config MTD_OTP
+ bool "Protection Registers aka one-time programmable (OTP) bits"
+ depends on MTD_CFI_ADV_OPTIONS
+ default n
+ help
+ This enables support for reading, writing and locking so called
+ "Protection Registers" present on some flash chips.
+ A subset of them are pre-programmed at the factory with a
+ unique set of values. The rest is user-programmable.
+
+ The user-programmable Protection Registers contain one-time
+ programmable (OTP) bits; when programmed, register bits cannot be
+ erased. Each Protection Register can be accessed multiple times to
+ program individual bits, as long as the register remains unlocked.
+
+ Each Protection Register has an associated Lock Register bit. When a
+ Lock Register bit is programmed, the associated Protection Register
+ can only be read; it can no longer be programmed. Additionally,
+ because the Lock Register bits themselves are OTP, when programmed,
+ Lock Register bits cannot be erased. Therefore, when a Protection
+ Register is locked, it cannot be unlocked.
+
+ This feature should therefore be used with extreme care. Any mistake
+ in the programming of OTP bits will waste them.
+
+config MTD_CFI_INTELEXT
+ tristate "Support for CFI command set 0001 (Intel/Sharp chips)"
+ depends on MTD_GEN_PROBE
+ select MTD_CFI_UTIL
+ help
+ The Common Flash Interface defines a number of different command
+ sets which a CFI-compliant chip may claim to implement. This code
+ provides support for command set 0001, used on Intel StrataFlash
+ and other parts.
+
+config MTD_CFI_AMDSTD
+ tristate "Support for CFI command set 0002 (AMD/Fujitsu/Spansion chips)"
+ depends on MTD_GEN_PROBE
+ select MTD_CFI_UTIL
+ help
+ The Common Flash Interface defines a number of different command
+ sets which a CFI-compliant chip may claim to implement. This code
+ provides support for command set 0002, used on chips including
+ the AMD Am29LV320.
+
+config MTD_CFI_STAA
+ tristate "Support for CFI command set 0020 (ST (Advanced Architecture) chips)"
+ depends on MTD_GEN_PROBE
+ select MTD_CFI_UTIL
+ help
+ The Common Flash Interface defines a number of different command
+ sets which a CFI-compliant chip may claim to implement. This code
+ provides support for command set 0020.
+
+config MTD_CFI_UTIL
+ tristate
+
+config MTD_RAM
+ tristate "Support for RAM chips in bus mapping"
+ help
+ This option enables basic support for RAM chips accessed through
+ a bus mapping driver.
+
+config MTD_ROM
+ tristate "Support for ROM chips in bus mapping"
+ help
+ This option enables basic support for ROM chips accessed through
+ a bus mapping driver.
+
+config MTD_ABSENT
+ tristate "Support for absent chips in bus mapping"
+ help
+ This option enables support for a dummy probing driver used to
+ allocated placeholder MTD devices on systems that have socketed
+ or removable media. Use of this driver as a fallback chip probe
+ preserves the expected registration order of MTD device nodes on
+ the system regardless of media presence. Device nodes created
+ with this driver will return -ENODEV upon access.
+
+config MTD_XIP
+ bool "XIP aware MTD support"
+ depends on !SMP && (MTD_CFI_INTELEXT || MTD_CFI_AMDSTD) && ARCH_MTD_XIP
+ default y if XIP_KERNEL
+ help
+ This allows MTD support to work with flash memory which is also
+ used for XIP purposes. If you're not sure what this is all about
+ then say N.
+
+endmenu
diff --git a/drivers/mtd/chips/Makefile b/drivers/mtd/chips/Makefile
new file mode 100644
index 0000000000..1f4e84f1cd
--- /dev/null
+++ b/drivers/mtd/chips/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# linux/drivers/chips/Makefile
+#
+
+obj-$(CONFIG_MTD) += chipreg.o
+obj-$(CONFIG_MTD_CFI) += cfi_probe.o
+obj-$(CONFIG_MTD_CFI_UTIL) += cfi_util.o
+obj-$(CONFIG_MTD_CFI_STAA) += cfi_cmdset_0020.o
+obj-$(CONFIG_MTD_CFI_AMDSTD) += cfi_cmdset_0002.o
+obj-$(CONFIG_MTD_CFI_INTELEXT) += cfi_cmdset_0001.o
+obj-$(CONFIG_MTD_GEN_PROBE) += gen_probe.o
+obj-$(CONFIG_MTD_JEDECPROBE) += jedec_probe.o
+obj-$(CONFIG_MTD_RAM) += map_ram.o
+obj-$(CONFIG_MTD_ROM) += map_rom.o
+obj-$(CONFIG_MTD_ABSENT) += map_absent.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
new file mode 100644
index 0000000000..c10693ba26
--- /dev/null
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -0,0 +1,2709 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common Flash Interface support:
+ * Intel Extended Vendor Command Set (ID 0x0001)
+ *
+ * (C) 2000 Red Hat.
+ *
+ *
+ * 10/10/2000 Nicolas Pitre <nico@fluxnic.net>
+ * - completely revamped method functions so they are aware and
+ * independent of the flash geometry (buswidth, interleave, etc.)
+ * - scalability vs code size is completely set at compile-time
+ * (see include/linux/mtd/cfi.h for selection)
+ * - optimized write buffer method
+ * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
+ * - reworked lock/unlock/erase support for var size flash
+ * 21/03/2007 Rodolfo Giometti <giometti@linux.it>
+ * - auto unlock sectors on resume for auto locking flash on power up
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/reboot.h>
+#include <linux/bitmap.h>
+#include <linux/mtd/xip.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/cfi.h>
+
+/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
+/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
+
+// debugging, turns off buffer write mode if set to 1
+#define FORCE_WORD_WRITE 0
+
+/* Intel chips */
+#define I82802AB 0x00ad
+#define I82802AC 0x00ac
+#define PF38F4476 0x881c
+#define M28F00AP30 0x8963
+/* STMicroelectronics chips */
+#define M50LPW080 0x002F
+#define M50FLW080A 0x0080
+#define M50FLW080B 0x0081
+/* Atmel chips */
+#define AT49BV640D 0x02de
+#define AT49BV640DT 0x02db
+/* Sharp chips */
+#define LH28F640BFHE_PTTL90 0x00b0
+#define LH28F640BFHE_PBTL90 0x00b1
+#define LH28F640BFHE_PTTL70A 0x00b2
+#define LH28F640BFHE_PBTL70A 0x00b3
+
+static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
+static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
+static void cfi_intelext_sync (struct mtd_info *);
+static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len);
+#ifdef CONFIG_MTD_OTP
+static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int cfi_intelext_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
+ size_t *, const u_char *);
+static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
+static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
+ size_t *, struct otp_info *);
+static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
+ size_t *, struct otp_info *);
+#endif
+static int cfi_intelext_suspend (struct mtd_info *);
+static void cfi_intelext_resume (struct mtd_info *);
+static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
+
+static void cfi_intelext_destroy(struct mtd_info *);
+
+struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
+
+static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
+static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
+
+static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys);
+static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
+
+static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
+static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
+static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
+#include "fwh_lock.h"
+
+
+
+/*
+ * *********** SETUP AND PROBE BITS ***********
+ */
+
+static struct mtd_chip_driver cfi_intelext_chipdrv = {
+ .probe = NULL, /* Not usable directly */
+ .destroy = cfi_intelext_destroy,
+ .name = "cfi_cmdset_0001",
+ .module = THIS_MODULE
+};
+
+/* #define DEBUG_LOCK_BITS */
+/* #define DEBUG_CFI_FEATURES */
+
+#ifdef DEBUG_CFI_FEATURES
+static void cfi_tell_features(struct cfi_pri_intelext *extp)
+{
+ int i;
+ printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
+ printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
+ printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
+ printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
+ printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
+ printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
+ printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
+ printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
+ printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
+ printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
+ printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
+ printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
+ printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
+ for (i=11; i<32; i++) {
+ if (extp->FeatureSupport & (1<<i))
+ printk(" - Unknown Bit %X: supported\n", i);
+ }
+
+ printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
+ printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
+ for (i=1; i<8; i++) {
+ if (extp->SuspendCmdSupport & (1<<i))
+ printk(" - Unknown Bit %X: supported\n", i);
+ }
+
+ printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
+ printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
+ printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
+ for (i=2; i<3; i++) {
+ if (extp->BlkStatusRegMask & (1<<i))
+ printk(" - Unknown Bit %X Active: yes\n",i);
+ }
+ printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
+ printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
+ for (i=6; i<16; i++) {
+ if (extp->BlkStatusRegMask & (1<<i))
+ printk(" - Unknown Bit %X Active: yes\n",i);
+ }
+
+ printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
+ extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
+ if (extp->VppOptimal)
+ printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
+ extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
+}
+#endif
+
+/* Atmel chips don't use the same PRI format as Intel chips */
+static void fixup_convert_atmel_pri(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+ struct cfi_pri_atmel atmel_pri;
+ uint32_t features = 0;
+
+ /* Reverse byteswapping */
+ extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
+ extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
+ extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
+
+ memcpy(&atmel_pri, extp, sizeof(atmel_pri));
+ memset((char *)extp + 5, 0, sizeof(*extp) - 5);
+
+ printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
+
+ if (atmel_pri.Features & 0x01) /* chip erase supported */
+ features |= (1<<0);
+ if (atmel_pri.Features & 0x02) /* erase suspend supported */
+ features |= (1<<1);
+ if (atmel_pri.Features & 0x04) /* program suspend supported */
+ features |= (1<<2);
+ if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
+ features |= (1<<9);
+ if (atmel_pri.Features & 0x20) /* page mode read supported */
+ features |= (1<<7);
+ if (atmel_pri.Features & 0x40) /* queued erase supported */
+ features |= (1<<4);
+ if (atmel_pri.Features & 0x80) /* Protection bits supported */
+ features |= (1<<6);
+
+ extp->FeatureSupport = features;
+
+ /* burst write mode not supported */
+ cfi->cfiq->BufWriteTimeoutTyp = 0;
+ cfi->cfiq->BufWriteTimeoutMax = 0;
+}
+
+static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
+
+ cfip->FeatureSupport |= (1 << 5);
+ mtd->flags |= MTD_POWERUP_LOCK;
+}
+
+#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
+/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
+static void fixup_intel_strataflash(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+
+ printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
+ "erase on write disabled.\n");
+ extp->SuspendCmdSupport &= ~1;
+}
+#endif
+
+#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
+static void fixup_no_write_suspend(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
+
+ if (cfip && (cfip->FeatureSupport&4)) {
+ cfip->FeatureSupport &= ~4;
+ printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
+ }
+}
+#endif
+
+static void fixup_st_m28w320ct(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
+ cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
+}
+
+static void fixup_st_m28w320cb(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ /* Note this is done after the region info is endian swapped */
+ cfi->cfiq->EraseRegionInfo[1] =
+ (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
+};
+
+static int is_LH28F640BF(struct cfi_private *cfi)
+{
+ /* Sharp LH28F640BF Family */
+ if (cfi->mfr == CFI_MFR_SHARP && (
+ cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
+ cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
+ return 1;
+ return 0;
+}
+
+static void fixup_LH28F640BF(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+
+ /* Reset the Partition Configuration Register on LH28F640BF
+ * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
+ if (is_LH28F640BF(cfi)) {
+ printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
+ map_write(map, CMD(0x60), 0);
+ map_write(map, CMD(0x04), 0);
+
+ /* We have set one single partition thus
+ * Simultaneous Operations are not allowed */
+ printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
+ extp->FeatureSupport &= ~512;
+ }
+}
+
+static void fixup_use_point(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ if (!mtd->_point && map_is_linear(map)) {
+ mtd->_point = cfi_intelext_point;
+ mtd->_unpoint = cfi_intelext_unpoint;
+ }
+}
+
+static void fixup_use_write_buffers(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ if (cfi->cfiq->BufWriteTimeoutTyp) {
+ printk(KERN_INFO "Using buffer write method\n" );
+ mtd->_write = cfi_intelext_write_buffers;
+ mtd->_writev = cfi_intelext_writev;
+ }
+}
+
+/*
+ * Some chips power-up with all sectors locked by default.
+ */
+static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
+
+ if (cfip->FeatureSupport&32) {
+ printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
+ mtd->flags |= MTD_POWERUP_LOCK;
+ }
+}
+
+static struct cfi_fixup cfi_fixup_table[] = {
+ { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
+ { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
+ { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
+#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
+#endif
+#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
+#endif
+#if !FORCE_WORD_WRITE
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
+#endif
+ { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
+ { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
+ { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
+ { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
+ { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
+ { 0, 0, NULL }
+};
+
+static struct cfi_fixup jedec_fixup_table[] = {
+ { CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock },
+ { CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock },
+ { CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock },
+ { CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock },
+ { CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock },
+ { 0, 0, NULL }
+};
+static struct cfi_fixup fixup_table[] = {
+ /* The CFI vendor ids and the JEDEC vendor IDs appear
+ * to be common. It is like the devices id's are as
+ * well. This table is to pick all cases where
+ * we know that is the case.
+ */
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
+ { 0, 0, NULL }
+};
+
+static void cfi_fixup_major_minor(struct cfi_private *cfi,
+ struct cfi_pri_intelext *extp)
+{
+ if (cfi->mfr == CFI_MFR_INTEL &&
+ cfi->id == PF38F4476 && extp->MinorVersion == '3')
+ extp->MinorVersion = '1';
+}
+
+static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
+{
+ /*
+ * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
+ * Erase Supend for their small Erase Blocks(0x8000)
+ */
+ if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
+ return 1;
+ return 0;
+}
+
+static inline struct cfi_pri_intelext *
+read_pri_intelext(struct map_info *map, __u16 adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *extp;
+ unsigned int extra_size = 0;
+ unsigned int extp_size = sizeof(*extp);
+
+ again:
+ extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
+ if (!extp)
+ return NULL;
+
+ cfi_fixup_major_minor(cfi, extp);
+
+ if (extp->MajorVersion != '1' ||
+ (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
+ printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
+ "version %c.%c.\n", extp->MajorVersion,
+ extp->MinorVersion);
+ kfree(extp);
+ return NULL;
+ }
+
+ /* Do some byteswapping if necessary */
+ extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
+ extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
+ extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
+
+ if (extp->MinorVersion >= '0') {
+ extra_size = 0;
+
+ /* Protection Register info */
+ if (extp->NumProtectionFields) {
+ struct cfi_intelext_otpinfo *otp =
+ (struct cfi_intelext_otpinfo *)&extp->extra[0];
+
+ extra_size += (extp->NumProtectionFields - 1) *
+ sizeof(struct cfi_intelext_otpinfo);
+
+ if (extp_size >= sizeof(*extp) + extra_size) {
+ int i;
+
+ /* Do some byteswapping if necessary */
+ for (i = 0; i < extp->NumProtectionFields - 1; i++) {
+ otp->ProtRegAddr = le32_to_cpu(otp->ProtRegAddr);
+ otp->FactGroups = le16_to_cpu(otp->FactGroups);
+ otp->UserGroups = le16_to_cpu(otp->UserGroups);
+ otp++;
+ }
+ }
+ }
+ }
+
+ if (extp->MinorVersion >= '1') {
+ /* Burst Read info */
+ extra_size += 2;
+ if (extp_size < sizeof(*extp) + extra_size)
+ goto need_more;
+ extra_size += extp->extra[extra_size - 1];
+ }
+
+ if (extp->MinorVersion >= '3') {
+ int nb_parts, i;
+
+ /* Number of hardware-partitions */
+ extra_size += 1;
+ if (extp_size < sizeof(*extp) + extra_size)
+ goto need_more;
+ nb_parts = extp->extra[extra_size - 1];
+
+ /* skip the sizeof(partregion) field in CFI 1.4 */
+ if (extp->MinorVersion >= '4')
+ extra_size += 2;
+
+ for (i = 0; i < nb_parts; i++) {
+ struct cfi_intelext_regioninfo *rinfo;
+ rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
+ extra_size += sizeof(*rinfo);
+ if (extp_size < sizeof(*extp) + extra_size)
+ goto need_more;
+ rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
+ extra_size += (rinfo->NumBlockTypes - 1)
+ * sizeof(struct cfi_intelext_blockinfo);
+ }
+
+ if (extp->MinorVersion >= '4')
+ extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
+
+ if (extp_size < sizeof(*extp) + extra_size) {
+ need_more:
+ extp_size = sizeof(*extp) + extra_size;
+ kfree(extp);
+ if (extp_size > 4096) {
+ printk(KERN_ERR
+ "%s: cfi_pri_intelext is too fat\n",
+ __func__);
+ return NULL;
+ }
+ goto again;
+ }
+ }
+
+ return extp;
+}
+
+struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct mtd_info *mtd;
+ int i;
+
+ mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
+ if (!mtd)
+ return NULL;
+ mtd->priv = map;
+ mtd->type = MTD_NORFLASH;
+
+ /* Fill in the default mtd operations */
+ mtd->_erase = cfi_intelext_erase_varsize;
+ mtd->_read = cfi_intelext_read;
+ mtd->_write = cfi_intelext_write_words;
+ mtd->_sync = cfi_intelext_sync;
+ mtd->_lock = cfi_intelext_lock;
+ mtd->_unlock = cfi_intelext_unlock;
+ mtd->_is_locked = cfi_intelext_is_locked;
+ mtd->_suspend = cfi_intelext_suspend;
+ mtd->_resume = cfi_intelext_resume;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->name = map->name;
+ mtd->writesize = 1;
+ mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
+
+ mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
+
+ if (cfi->cfi_mode == CFI_MODE_CFI) {
+ /*
+ * It's a real CFI chip, not one for which the probe
+ * routine faked a CFI structure. So we read the feature
+ * table from it.
+ */
+ __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
+ struct cfi_pri_intelext *extp;
+
+ extp = read_pri_intelext(map, adr);
+ if (!extp) {
+ kfree(mtd);
+ return NULL;
+ }
+
+ /* Install our own private info structure */
+ cfi->cmdset_priv = extp;
+
+ cfi_fixup(mtd, cfi_fixup_table);
+
+#ifdef DEBUG_CFI_FEATURES
+ /* Tell the user about it in lots of lovely detail */
+ cfi_tell_features(extp);
+#endif
+
+ if(extp->SuspendCmdSupport & 1) {
+ printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
+ }
+ }
+ else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
+ /* Apply jedec specific fixups */
+ cfi_fixup(mtd, jedec_fixup_table);
+ }
+ /* Apply generic fixups */
+ cfi_fixup(mtd, fixup_table);
+
+ for (i=0; i< cfi->numchips; i++) {
+ if (cfi->cfiq->WordWriteTimeoutTyp)
+ cfi->chips[i].word_write_time =
+ 1<<cfi->cfiq->WordWriteTimeoutTyp;
+ else
+ cfi->chips[i].word_write_time = 50000;
+
+ if (cfi->cfiq->BufWriteTimeoutTyp)
+ cfi->chips[i].buffer_write_time =
+ 1<<cfi->cfiq->BufWriteTimeoutTyp;
+ /* No default; if it isn't specified, we won't use it */
+
+ if (cfi->cfiq->BlockEraseTimeoutTyp)
+ cfi->chips[i].erase_time =
+ 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
+ else
+ cfi->chips[i].erase_time = 2000000;
+
+ if (cfi->cfiq->WordWriteTimeoutTyp &&
+ cfi->cfiq->WordWriteTimeoutMax)
+ cfi->chips[i].word_write_time_max =
+ 1<<(cfi->cfiq->WordWriteTimeoutTyp +
+ cfi->cfiq->WordWriteTimeoutMax);
+ else
+ cfi->chips[i].word_write_time_max = 50000 * 8;
+
+ if (cfi->cfiq->BufWriteTimeoutTyp &&
+ cfi->cfiq->BufWriteTimeoutMax)
+ cfi->chips[i].buffer_write_time_max =
+ 1<<(cfi->cfiq->BufWriteTimeoutTyp +
+ cfi->cfiq->BufWriteTimeoutMax);
+
+ if (cfi->cfiq->BlockEraseTimeoutTyp &&
+ cfi->cfiq->BlockEraseTimeoutMax)
+ cfi->chips[i].erase_time_max =
+ 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
+ cfi->cfiq->BlockEraseTimeoutMax);
+ else
+ cfi->chips[i].erase_time_max = 2000000 * 8;
+
+ cfi->chips[i].ref_point_counter = 0;
+ init_waitqueue_head(&(cfi->chips[i].wq));
+ }
+
+ map->fldrv = &cfi_intelext_chipdrv;
+
+ return cfi_intelext_setup(mtd);
+}
+struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
+struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
+EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
+EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
+EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
+
+static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long offset = 0;
+ int i,j;
+ unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
+
+ //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
+
+ mtd->size = devsize * cfi->numchips;
+
+ mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
+ mtd->eraseregions = kcalloc(mtd->numeraseregions,
+ sizeof(struct mtd_erase_region_info),
+ GFP_KERNEL);
+ if (!mtd->eraseregions)
+ goto setup_err;
+
+ for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
+ unsigned long ernum, ersize;
+ ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
+ ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
+
+ if (mtd->erasesize < ersize) {
+ mtd->erasesize = ersize;
+ }
+ for (j=0; j<cfi->numchips; j++) {
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
+ if (!mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap)
+ goto setup_err;
+ }
+ offset += (ersize * ernum);
+ }
+
+ if (offset != devsize) {
+ /* Argh */
+ printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
+ goto setup_err;
+ }
+
+ for (i=0; i<mtd->numeraseregions;i++){
+ printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
+ i,(unsigned long long)mtd->eraseregions[i].offset,
+ mtd->eraseregions[i].erasesize,
+ mtd->eraseregions[i].numblocks);
+ }
+
+#ifdef CONFIG_MTD_OTP
+ mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
+ mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
+ mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
+ mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
+ mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
+ mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
+#endif
+
+ /* This function has the potential to distort the reality
+ a bit and therefore should be called last. */
+ if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
+ goto setup_err;
+
+ __module_get(THIS_MODULE);
+ register_reboot_notifier(&mtd->reboot_notifier);
+ return mtd;
+
+ setup_err:
+ if (mtd->eraseregions)
+ for (i=0; i<cfi->cfiq->NumEraseRegions; i++)
+ for (j=0; j<cfi->numchips; j++)
+ kfree(mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap);
+ kfree(mtd->eraseregions);
+ kfree(mtd);
+ kfree(cfi->cmdset_priv);
+ return NULL;
+}
+
+static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
+ struct cfi_private **pcfi)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = *pcfi;
+ struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+
+ /*
+ * Probing of multi-partition flash chips.
+ *
+ * To support multiple partitions when available, we simply arrange
+ * for each of them to have their own flchip structure even if they
+ * are on the same physical chip. This means completely recreating
+ * a new cfi_private structure right here which is a blatent code
+ * layering violation, but this is still the least intrusive
+ * arrangement at this point. This can be rearranged in the future
+ * if someone feels motivated enough. --nico
+ */
+ if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
+ && extp->FeatureSupport & (1 << 9)) {
+ int offs = 0;
+ struct cfi_private *newcfi;
+ struct flchip *chip;
+ struct flchip_shared *shared;
+ int numregions, numparts, partshift, numvirtchips, i, j;
+
+ /* Protection Register info */
+ if (extp->NumProtectionFields)
+ offs = (extp->NumProtectionFields - 1) *
+ sizeof(struct cfi_intelext_otpinfo);
+
+ /* Burst Read info */
+ offs += extp->extra[offs+1]+2;
+
+ /* Number of partition regions */
+ numregions = extp->extra[offs];
+ offs += 1;
+
+ /* skip the sizeof(partregion) field in CFI 1.4 */
+ if (extp->MinorVersion >= '4')
+ offs += 2;
+
+ /* Number of hardware partitions */
+ numparts = 0;
+ for (i = 0; i < numregions; i++) {
+ struct cfi_intelext_regioninfo *rinfo;
+ rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
+ numparts += rinfo->NumIdentPartitions;
+ offs += sizeof(*rinfo)
+ + (rinfo->NumBlockTypes - 1) *
+ sizeof(struct cfi_intelext_blockinfo);
+ }
+
+ if (!numparts)
+ numparts = 1;
+
+ /* Programming Region info */
+ if (extp->MinorVersion >= '4') {
+ struct cfi_intelext_programming_regioninfo *prinfo;
+ prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
+ mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
+ mtd->flags &= ~MTD_BIT_WRITEABLE;
+ printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
+ map->name, mtd->writesize,
+ cfi->interleave * prinfo->ControlValid,
+ cfi->interleave * prinfo->ControlInvalid);
+ }
+
+ /*
+ * All functions below currently rely on all chips having
+ * the same geometry so we'll just assume that all hardware
+ * partitions are of the same size too.
+ */
+ partshift = cfi->chipshift - __ffs(numparts);
+
+ if ((1 << partshift) < mtd->erasesize) {
+ printk( KERN_ERR
+ "%s: bad number of hw partitions (%d)\n",
+ __func__, numparts);
+ return -EINVAL;
+ }
+
+ numvirtchips = cfi->numchips * numparts;
+ newcfi = kmalloc(struct_size(newcfi, chips, numvirtchips),
+ GFP_KERNEL);
+ if (!newcfi)
+ return -ENOMEM;
+ shared = kmalloc_array(cfi->numchips,
+ sizeof(struct flchip_shared),
+ GFP_KERNEL);
+ if (!shared) {
+ kfree(newcfi);
+ return -ENOMEM;
+ }
+ memcpy(newcfi, cfi, sizeof(struct cfi_private));
+ newcfi->numchips = numvirtchips;
+ newcfi->chipshift = partshift;
+
+ chip = &newcfi->chips[0];
+ for (i = 0; i < cfi->numchips; i++) {
+ shared[i].writing = shared[i].erasing = NULL;
+ mutex_init(&shared[i].lock);
+ for (j = 0; j < numparts; j++) {
+ *chip = cfi->chips[i];
+ chip->start += j << partshift;
+ chip->priv = &shared[i];
+ /* those should be reset too since
+ they create memory references. */
+ init_waitqueue_head(&chip->wq);
+ mutex_init(&chip->mutex);
+ chip++;
+ }
+ }
+
+ printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
+ "--> %d partitions of %d KiB\n",
+ map->name, cfi->numchips, cfi->interleave,
+ newcfi->numchips, 1<<(newcfi->chipshift-10));
+
+ map->fldrv_priv = newcfi;
+ *pcfi = newcfi;
+ kfree(cfi);
+ }
+
+ return 0;
+}
+
+/*
+ * *********** CHIP ACCESS FUNCTIONS ***********
+ */
+static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
+ struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
+ unsigned long timeo = jiffies + HZ;
+
+ /* Prevent setting state FL_SYNCING for chip in suspended state. */
+ if (mode == FL_SYNCING && chip->oldstate != FL_READY)
+ goto sleep;
+
+ switch (chip->state) {
+
+ case FL_STATUS:
+ for (;;) {
+ status = map_read(map, adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ /* At this point we're fine with write operations
+ in other partitions as they don't conflict. */
+ if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
+ break;
+
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ mutex_lock(&chip->mutex);
+ /* Someone else might have been playing with it. */
+ return -EAGAIN;
+ }
+ fallthrough;
+ case FL_READY:
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ return 0;
+
+ case FL_ERASING:
+ if (!cfip ||
+ !(cfip->FeatureSupport & 2) ||
+ !(mode == FL_READY || mode == FL_POINT ||
+ (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
+ goto sleep;
+
+ /* Do not allow suspend iff read/write to EB address */
+ if ((adr & chip->in_progress_block_mask) ==
+ chip->in_progress_block_addr)
+ goto sleep;
+
+ /* do not suspend small EBs, buggy Micron Chips */
+ if (cfi_is_micron_28F00AP30(cfi, chip) &&
+ (chip->in_progress_block_mask == ~(0x8000-1)))
+ goto sleep;
+
+ /* Erase suspend */
+ map_write(map, CMD(0xB0), chip->in_progress_block_addr);
+
+ /* If the flash has finished erasing, then 'erase suspend'
+ * appears to make some (28F320) flash devices switch to
+ * 'read' mode. Make sure that we switch to 'read status'
+ * mode so we get the right data. --rmk
+ */
+ map_write(map, CMD(0x70), chip->in_progress_block_addr);
+ chip->oldstate = FL_ERASING;
+ chip->state = FL_ERASE_SUSPENDING;
+ chip->erase_suspended = 1;
+ for (;;) {
+ status = map_read(map, chip->in_progress_block_addr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ if (time_after(jiffies, timeo)) {
+ /* Urgh. Resume and pretend we weren't here.
+ * Make sure we're in 'read status' mode if it had finished */
+ put_chip(map, chip, adr);
+ printk(KERN_ERR "%s: Chip not ready after erase "
+ "suspended: status = 0x%lx\n", map->name, status.x[0]);
+ return -EIO;
+ }
+
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ mutex_lock(&chip->mutex);
+ /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
+ So we can just loop here. */
+ }
+ chip->state = FL_STATUS;
+ return 0;
+
+ case FL_XIP_WHILE_ERASING:
+ if (mode != FL_READY && mode != FL_POINT &&
+ (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
+ goto sleep;
+ chip->oldstate = chip->state;
+ chip->state = FL_READY;
+ return 0;
+
+ case FL_SHUTDOWN:
+ /* The machine is rebooting now,so no one can get chip anymore */
+ return -EIO;
+ case FL_POINT:
+ /* Only if there's no operation suspended... */
+ if (mode == FL_READY && chip->oldstate == FL_READY)
+ return 0;
+ fallthrough;
+ default:
+ sleep:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ mutex_lock(&chip->mutex);
+ return -EAGAIN;
+ }
+}
+
+static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
+{
+ int ret;
+ DECLARE_WAITQUEUE(wait, current);
+
+ retry:
+ if (chip->priv &&
+ (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
+ || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
+ /*
+ * OK. We have possibility for contention on the write/erase
+ * operations which are global to the real chip and not per
+ * partition. So let's fight it over in the partition which
+ * currently has authority on the operation.
+ *
+ * The rules are as follows:
+ *
+ * - any write operation must own shared->writing.
+ *
+ * - any erase operation must own _both_ shared->writing and
+ * shared->erasing.
+ *
+ * - contention arbitration is handled in the owner's context.
+ *
+ * The 'shared' struct can be read and/or written only when
+ * its lock is taken.
+ */
+ struct flchip_shared *shared = chip->priv;
+ struct flchip *contender;
+ mutex_lock(&shared->lock);
+ contender = shared->writing;
+ if (contender && contender != chip) {
+ /*
+ * The engine to perform desired operation on this
+ * partition is already in use by someone else.
+ * Let's fight over it in the context of the chip
+ * currently using it. If it is possible to suspend,
+ * that other partition will do just that, otherwise
+ * it'll happily send us to sleep. In any case, when
+ * get_chip returns success we're clear to go ahead.
+ */
+ ret = mutex_trylock(&contender->mutex);
+ mutex_unlock(&shared->lock);
+ if (!ret)
+ goto retry;
+ mutex_unlock(&chip->mutex);
+ ret = chip_ready(map, contender, contender->start, mode);
+ mutex_lock(&chip->mutex);
+
+ if (ret == -EAGAIN) {
+ mutex_unlock(&contender->mutex);
+ goto retry;
+ }
+ if (ret) {
+ mutex_unlock(&contender->mutex);
+ return ret;
+ }
+ mutex_lock(&shared->lock);
+
+ /* We should not own chip if it is already
+ * in FL_SYNCING state. Put contender and retry. */
+ if (chip->state == FL_SYNCING) {
+ put_chip(map, contender, contender->start);
+ mutex_unlock(&contender->mutex);
+ goto retry;
+ }
+ mutex_unlock(&contender->mutex);
+ }
+
+ /* Check if we already have suspended erase
+ * on this chip. Sleep. */
+ if (mode == FL_ERASING && shared->erasing
+ && shared->erasing->oldstate == FL_ERASING) {
+ mutex_unlock(&shared->lock);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ mutex_lock(&chip->mutex);
+ goto retry;
+ }
+
+ /* We now own it */
+ shared->writing = chip;
+ if (mode == FL_ERASING)
+ shared->erasing = chip;
+ mutex_unlock(&shared->lock);
+ }
+ ret = chip_ready(map, chip, adr, mode);
+ if (ret == -EAGAIN)
+ goto retry;
+
+ return ret;
+}
+
+static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ if (chip->priv) {
+ struct flchip_shared *shared = chip->priv;
+ mutex_lock(&shared->lock);
+ if (shared->writing == chip && chip->oldstate == FL_READY) {
+ /* We own the ability to write, but we're done */
+ shared->writing = shared->erasing;
+ if (shared->writing && shared->writing != chip) {
+ /* give back ownership to who we loaned it from */
+ struct flchip *loaner = shared->writing;
+ mutex_lock(&loaner->mutex);
+ mutex_unlock(&shared->lock);
+ mutex_unlock(&chip->mutex);
+ put_chip(map, loaner, loaner->start);
+ mutex_lock(&chip->mutex);
+ mutex_unlock(&loaner->mutex);
+ wake_up(&chip->wq);
+ return;
+ }
+ shared->erasing = NULL;
+ shared->writing = NULL;
+ } else if (shared->erasing == chip && shared->writing != chip) {
+ /*
+ * We own the ability to erase without the ability
+ * to write, which means the erase was suspended
+ * and some other partition is currently writing.
+ * Don't let the switch below mess things up since
+ * we don't have ownership to resume anything.
+ */
+ mutex_unlock(&shared->lock);
+ wake_up(&chip->wq);
+ return;
+ }
+ mutex_unlock(&shared->lock);
+ }
+
+ switch(chip->oldstate) {
+ case FL_ERASING:
+ /* What if one interleaved chip has finished and the
+ other hasn't? The old code would leave the finished
+ one in READY mode. That's bad, and caused -EROFS
+ errors to be returned from do_erase_oneblock because
+ that's the only bit it checked for at the time.
+ As the state machine appears to explicitly allow
+ sending the 0x70 (Read Status) command to an erasing
+ chip and expecting it to be ignored, that's what we
+ do. */
+ map_write(map, CMD(0xd0), chip->in_progress_block_addr);
+ map_write(map, CMD(0x70), chip->in_progress_block_addr);
+ chip->oldstate = FL_READY;
+ chip->state = FL_ERASING;
+ break;
+
+ case FL_XIP_WHILE_ERASING:
+ chip->state = chip->oldstate;
+ chip->oldstate = FL_READY;
+ break;
+
+ case FL_READY:
+ case FL_STATUS:
+ case FL_JEDEC_QUERY:
+ break;
+ default:
+ printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
+ }
+ wake_up(&chip->wq);
+}
+
+#ifdef CONFIG_MTD_XIP
+
+/*
+ * No interrupt what so ever can be serviced while the flash isn't in array
+ * mode. This is ensured by the xip_disable() and xip_enable() functions
+ * enclosing any code path where the flash is known not to be in array mode.
+ * And within a XIP disabled code path, only functions marked with __xipram
+ * may be called and nothing else (it's a good thing to inspect generated
+ * assembly to make sure inline functions were actually inlined and that gcc
+ * didn't emit calls to its own support functions). Also configuring MTD CFI
+ * support to a single buswidth and a single interleave is also recommended.
+ */
+
+static void xip_disable(struct map_info *map, struct flchip *chip,
+ unsigned long adr)
+{
+ /* TODO: chips with no XIP use should ignore and return */
+ (void) map_read(map, adr); /* ensure mmu mapping is up to date */
+ local_irq_disable();
+}
+
+static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
+ unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ if (chip->state != FL_POINT && chip->state != FL_READY) {
+ map_write(map, CMD(0xff), adr);
+ chip->state = FL_READY;
+ }
+ (void) map_read(map, adr);
+ xip_iprefetch();
+ local_irq_enable();
+}
+
+/*
+ * When a delay is required for the flash operation to complete, the
+ * xip_wait_for_operation() function is polling for both the given timeout
+ * and pending (but still masked) hardware interrupts. Whenever there is an
+ * interrupt pending then the flash erase or write operation is suspended,
+ * array mode restored and interrupts unmasked. Task scheduling might also
+ * happen at that point. The CPU eventually returns from the interrupt or
+ * the call to schedule() and the suspended flash operation is resumed for
+ * the remaining of the delay period.
+ *
+ * Warning: this function _will_ fool interrupt latency tracing tools.
+ */
+
+static int __xipram xip_wait_for_operation(
+ struct map_info *map, struct flchip *chip,
+ unsigned long adr, unsigned int chip_op_time_max)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
+ map_word status, OK = CMD(0x80);
+ unsigned long usec, suspended, start, done;
+ flstate_t oldstate, newstate;
+
+ start = xip_currtime();
+ usec = chip_op_time_max;
+ if (usec == 0)
+ usec = 500000;
+ done = 0;
+
+ do {
+ cpu_relax();
+ if (xip_irqpending() && cfip &&
+ ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
+ (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
+ (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
+ /*
+ * Let's suspend the erase or write operation when
+ * supported. Note that we currently don't try to
+ * suspend interleaved chips if there is already
+ * another operation suspended (imagine what happens
+ * when one chip was already done with the current
+ * operation while another chip suspended it, then
+ * we resume the whole thing at once). Yes, it
+ * can happen!
+ */
+ usec -= done;
+ map_write(map, CMD(0xb0), adr);
+ map_write(map, CMD(0x70), adr);
+ suspended = xip_currtime();
+ do {
+ if (xip_elapsed_since(suspended) > 100000) {
+ /*
+ * The chip doesn't want to suspend
+ * after waiting for 100 msecs.
+ * This is a critical error but there
+ * is not much we can do here.
+ */
+ return -EIO;
+ }
+ status = map_read(map, adr);
+ } while (!map_word_andequal(map, status, OK, OK));
+
+ /* Suspend succeeded */
+ oldstate = chip->state;
+ if (oldstate == FL_ERASING) {
+ if (!map_word_bitsset(map, status, CMD(0x40)))
+ break;
+ newstate = FL_XIP_WHILE_ERASING;
+ chip->erase_suspended = 1;
+ } else {
+ if (!map_word_bitsset(map, status, CMD(0x04)))
+ break;
+ newstate = FL_XIP_WHILE_WRITING;
+ chip->write_suspended = 1;
+ }
+ chip->state = newstate;
+ map_write(map, CMD(0xff), adr);
+ (void) map_read(map, adr);
+ xip_iprefetch();
+ local_irq_enable();
+ mutex_unlock(&chip->mutex);
+ xip_iprefetch();
+ cond_resched();
+
+ /*
+ * We're back. However someone else might have
+ * decided to go write to the chip if we are in
+ * a suspended erase state. If so let's wait
+ * until it's done.
+ */
+ mutex_lock(&chip->mutex);
+ while (chip->state != newstate) {
+ DECLARE_WAITQUEUE(wait, current);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ mutex_lock(&chip->mutex);
+ }
+ /* Disallow XIP again */
+ local_irq_disable();
+
+ /* Resume the write or erase operation */
+ map_write(map, CMD(0xd0), adr);
+ map_write(map, CMD(0x70), adr);
+ chip->state = oldstate;
+ start = xip_currtime();
+ } else if (usec >= 1000000/HZ) {
+ /*
+ * Try to save on CPU power when waiting delay
+ * is at least a system timer tick period.
+ * No need to be extremely accurate here.
+ */
+ xip_cpu_idle();
+ }
+ status = map_read(map, adr);
+ done = xip_elapsed_since(start);
+ } while (!map_word_andequal(map, status, OK, OK)
+ && done < usec);
+
+ return (done >= usec) ? -ETIME : 0;
+}
+
+/*
+ * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
+ * the flash is actively programming or erasing since we have to poll for
+ * the operation to complete anyway. We can't do that in a generic way with
+ * a XIP setup so do it before the actual flash operation in this case
+ * and stub it out from INVAL_CACHE_AND_WAIT.
+ */
+#define XIP_INVAL_CACHED_RANGE(map, from, size) \
+ INVALIDATE_CACHED_RANGE(map, from, size)
+
+#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
+ xip_wait_for_operation(map, chip, cmd_adr, usec_max)
+
+#else
+
+#define xip_disable(map, chip, adr)
+#define xip_enable(map, chip, adr)
+#define XIP_INVAL_CACHED_RANGE(x...)
+#define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
+
+static int inval_cache_and_wait_for_operation(
+ struct map_info *map, struct flchip *chip,
+ unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
+ unsigned int chip_op_time, unsigned int chip_op_time_max)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word status, status_OK = CMD(0x80);
+ int chip_state = chip->state;
+ unsigned int timeo, sleep_time, reset_timeo;
+
+ mutex_unlock(&chip->mutex);
+ if (inval_len)
+ INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
+ mutex_lock(&chip->mutex);
+
+ timeo = chip_op_time_max;
+ if (!timeo)
+ timeo = 500000;
+ reset_timeo = timeo;
+ sleep_time = chip_op_time / 2;
+
+ for (;;) {
+ if (chip->state != chip_state) {
+ /* Someone's suspended the operation: sleep */
+ DECLARE_WAITQUEUE(wait, current);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ mutex_lock(&chip->mutex);
+ continue;
+ }
+
+ status = map_read(map, cmd_adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ if (chip->erase_suspended && chip_state == FL_ERASING) {
+ /* Erase suspend occurred while sleep: reset timeout */
+ timeo = reset_timeo;
+ chip->erase_suspended = 0;
+ }
+ if (chip->write_suspended && chip_state == FL_WRITING) {
+ /* Write suspend occurred while sleep: reset timeout */
+ timeo = reset_timeo;
+ chip->write_suspended = 0;
+ }
+ if (!timeo) {
+ map_write(map, CMD(0x70), cmd_adr);
+ chip->state = FL_STATUS;
+ return -ETIME;
+ }
+
+ /* OK Still waiting. Drop the lock, wait a while and retry. */
+ mutex_unlock(&chip->mutex);
+ if (sleep_time >= 1000000/HZ) {
+ /*
+ * Half of the normal delay still remaining
+ * can be performed with a sleeping delay instead
+ * of busy waiting.
+ */
+ msleep(sleep_time/1000);
+ timeo -= sleep_time;
+ sleep_time = 1000000/HZ;
+ } else {
+ udelay(1);
+ cond_resched();
+ timeo--;
+ }
+ mutex_lock(&chip->mutex);
+ }
+
+ /* Done and happy. */
+ chip->state = FL_STATUS;
+ return 0;
+}
+
+#endif
+
+#define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
+ INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
+
+
+static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
+{
+ unsigned long cmd_addr;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret;
+
+ adr += chip->start;
+
+ /* Ensure cmd read/writes are aligned. */
+ cmd_addr = adr & ~(map_bankwidth(map)-1);
+
+ mutex_lock(&chip->mutex);
+
+ ret = get_chip(map, chip, cmd_addr, FL_POINT);
+
+ if (!ret) {
+ if (chip->state != FL_POINT && chip->state != FL_READY)
+ map_write(map, CMD(0xff), cmd_addr);
+
+ chip->state = FL_POINT;
+ chip->ref_point_counter++;
+ }
+ mutex_unlock(&chip->mutex);
+
+ return ret;
+}
+
+static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long ofs, last_end = 0;
+ int chipnum;
+ int ret;
+
+ if (!map->virt)
+ return -EINVAL;
+
+ /* Now lock the chip(s) to POINT state */
+
+ /* ofs: offset within the first chip that the first read should start */
+ chipnum = (from >> cfi->chipshift);
+ ofs = from - (chipnum << cfi->chipshift);
+
+ *virt = map->virt + cfi->chips[chipnum].start + ofs;
+ if (phys)
+ *phys = map->phys + cfi->chips[chipnum].start + ofs;
+
+ while (len) {
+ unsigned long thislen;
+
+ if (chipnum >= cfi->numchips)
+ break;
+
+ /* We cannot point across chips that are virtually disjoint */
+ if (!last_end)
+ last_end = cfi->chips[chipnum].start;
+ else if (cfi->chips[chipnum].start != last_end)
+ break;
+
+ if ((len + ofs -1) >> cfi->chipshift)
+ thislen = (1<<cfi->chipshift) - ofs;
+ else
+ thislen = len;
+
+ ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
+ if (ret)
+ break;
+
+ *retlen += thislen;
+ len -= thislen;
+
+ ofs = 0;
+ last_end += 1 << cfi->chipshift;
+ chipnum++;
+ }
+ return 0;
+}
+
+static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long ofs;
+ int chipnum, err = 0;
+
+ /* Now unlock the chip(s) POINT state */
+
+ /* ofs: offset within the first chip that the first read should start */
+ chipnum = (from >> cfi->chipshift);
+ ofs = from - (chipnum << cfi->chipshift);
+
+ while (len && !err) {
+ unsigned long thislen;
+ struct flchip *chip;
+
+ chip = &cfi->chips[chipnum];
+ if (chipnum >= cfi->numchips)
+ break;
+
+ if ((len + ofs -1) >> cfi->chipshift)
+ thislen = (1<<cfi->chipshift) - ofs;
+ else
+ thislen = len;
+
+ mutex_lock(&chip->mutex);
+ if (chip->state == FL_POINT) {
+ chip->ref_point_counter--;
+ if(chip->ref_point_counter == 0)
+ chip->state = FL_READY;
+ } else {
+ printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
+ err = -EINVAL;
+ }
+
+ put_chip(map, chip, chip->start);
+ mutex_unlock(&chip->mutex);
+
+ len -= thislen;
+ ofs = 0;
+ chipnum++;
+ }
+
+ return err;
+}
+
+static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
+{
+ unsigned long cmd_addr;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret;
+
+ adr += chip->start;
+
+ /* Ensure cmd read/writes are aligned. */
+ cmd_addr = adr & ~(map_bankwidth(map)-1);
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, cmd_addr, FL_READY);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ if (chip->state != FL_POINT && chip->state != FL_READY) {
+ map_write(map, CMD(0xff), cmd_addr);
+
+ chip->state = FL_READY;
+ }
+
+ map_copy_from(map, buf, adr, len);
+
+ put_chip(map, chip, cmd_addr);
+
+ mutex_unlock(&chip->mutex);
+ return 0;
+}
+
+static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long ofs;
+ int chipnum;
+ int ret = 0;
+
+ /* ofs: offset within the first chip that the first read should start */
+ chipnum = (from >> cfi->chipshift);
+ ofs = from - (chipnum << cfi->chipshift);
+
+ while (len) {
+ unsigned long thislen;
+
+ if (chipnum >= cfi->numchips)
+ break;
+
+ if ((len + ofs -1) >> cfi->chipshift)
+ thislen = (1<<cfi->chipshift) - ofs;
+ else
+ thislen = len;
+
+ ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
+ if (ret)
+ break;
+
+ *retlen += thislen;
+ len -= thislen;
+ buf += thislen;
+
+ ofs = 0;
+ chipnum++;
+ }
+ return ret;
+}
+
+static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
+ unsigned long adr, map_word datum, int mode)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word status, write_cmd;
+ int ret;
+
+ adr += chip->start;
+
+ switch (mode) {
+ case FL_WRITING:
+ write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
+ break;
+ case FL_OTP_WRITE:
+ write_cmd = CMD(0xc0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr, mode);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
+ ENABLE_VPP(map);
+ xip_disable(map, chip, adr);
+ map_write(map, write_cmd, adr);
+ map_write(map, datum, adr);
+ chip->state = mode;
+
+ ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
+ adr, map_bankwidth(map),
+ chip->word_write_time,
+ chip->word_write_time_max);
+ if (ret) {
+ xip_enable(map, chip, adr);
+ printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
+ goto out;
+ }
+
+ /* check for errors */
+ status = map_read(map, adr);
+ if (map_word_bitsset(map, status, CMD(0x1a))) {
+ unsigned long chipstatus = MERGESTATUS(status);
+
+ /* reset status */
+ map_write(map, CMD(0x50), adr);
+ map_write(map, CMD(0x70), adr);
+ xip_enable(map, chip, adr);
+
+ if (chipstatus & 0x02) {
+ ret = -EROFS;
+ } else if (chipstatus & 0x08) {
+ printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
+ ret = -EIO;
+ } else {
+ printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
+ ret = -EINVAL;
+ }
+
+ goto out;
+ }
+
+ xip_enable(map, chip, adr);
+ out: DISABLE_VPP(map);
+ put_chip(map, chip, adr);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+
+static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret;
+ int chipnum;
+ unsigned long ofs;
+
+ chipnum = to >> cfi->chipshift;
+ ofs = to - (chipnum << cfi->chipshift);
+
+ /* If it's not bus-aligned, do the first byte write */
+ if (ofs & (map_bankwidth(map)-1)) {
+ unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
+ int gap = ofs - bus_ofs;
+ int n;
+ map_word datum;
+
+ n = min_t(int, len, map_bankwidth(map)-gap);
+ datum = map_word_ff(map);
+ datum = map_word_load_partial(map, datum, buf, gap, n);
+
+ ret = do_write_oneword(map, &cfi->chips[chipnum],
+ bus_ofs, datum, FL_WRITING);
+ if (ret)
+ return ret;
+
+ len -= n;
+ ofs += n;
+ buf += n;
+ (*retlen) += n;
+
+ if (ofs >> cfi->chipshift) {
+ chipnum ++;
+ ofs = 0;
+ if (chipnum == cfi->numchips)
+ return 0;
+ }
+ }
+
+ while(len >= map_bankwidth(map)) {
+ map_word datum = map_word_load(map, buf);
+
+ ret = do_write_oneword(map, &cfi->chips[chipnum],
+ ofs, datum, FL_WRITING);
+ if (ret)
+ return ret;
+
+ ofs += map_bankwidth(map);
+ buf += map_bankwidth(map);
+ (*retlen) += map_bankwidth(map);
+ len -= map_bankwidth(map);
+
+ if (ofs >> cfi->chipshift) {
+ chipnum ++;
+ ofs = 0;
+ if (chipnum == cfi->numchips)
+ return 0;
+ }
+ }
+
+ if (len & (map_bankwidth(map)-1)) {
+ map_word datum;
+
+ datum = map_word_ff(map);
+ datum = map_word_load_partial(map, datum, buf, 0, len);
+
+ ret = do_write_oneword(map, &cfi->chips[chipnum],
+ ofs, datum, FL_WRITING);
+ if (ret)
+ return ret;
+
+ (*retlen) += len;
+ }
+
+ return 0;
+}
+
+
+static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
+ unsigned long adr, const struct kvec **pvec,
+ unsigned long *pvec_seek, int len)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word status, write_cmd, datum;
+ unsigned long cmd_adr;
+ int ret, wbufsize, word_gap, words;
+ const struct kvec *vec;
+ unsigned long vec_seek;
+ unsigned long initial_adr;
+ int initial_len = len;
+
+ wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
+ adr += chip->start;
+ initial_adr = adr;
+ cmd_adr = adr & ~(wbufsize-1);
+
+ /* Sharp LH28F640BF chips need the first address for the
+ * Page Buffer Program command. See Table 5 of
+ * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
+ if (is_LH28F640BF(cfi))
+ cmd_adr = adr;
+
+ /* Let's determine this according to the interleave only once */
+ write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, cmd_adr, FL_WRITING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
+ ENABLE_VPP(map);
+ xip_disable(map, chip, cmd_adr);
+
+ /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
+ [...], the device will not accept any more Write to Buffer commands".
+ So we must check here and reset those bits if they're set. Otherwise
+ we're just pissing in the wind */
+ if (chip->state != FL_STATUS) {
+ map_write(map, CMD(0x70), cmd_adr);
+ chip->state = FL_STATUS;
+ }
+ status = map_read(map, cmd_adr);
+ if (map_word_bitsset(map, status, CMD(0x30))) {
+ xip_enable(map, chip, cmd_adr);
+ printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
+ xip_disable(map, chip, cmd_adr);
+ map_write(map, CMD(0x50), cmd_adr);
+ map_write(map, CMD(0x70), cmd_adr);
+ }
+
+ chip->state = FL_WRITING_TO_BUFFER;
+ map_write(map, write_cmd, cmd_adr);
+ ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
+ if (ret) {
+ /* Argh. Not ready for write to buffer */
+ map_word Xstatus = map_read(map, cmd_adr);
+ map_write(map, CMD(0x70), cmd_adr);
+ chip->state = FL_STATUS;
+ status = map_read(map, cmd_adr);
+ map_write(map, CMD(0x50), cmd_adr);
+ map_write(map, CMD(0x70), cmd_adr);
+ xip_enable(map, chip, cmd_adr);
+ printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
+ map->name, Xstatus.x[0], status.x[0]);
+ goto out;
+ }
+
+ /* Figure out the number of words to write */
+ word_gap = (-adr & (map_bankwidth(map)-1));
+ words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
+ if (!word_gap) {
+ words--;
+ } else {
+ word_gap = map_bankwidth(map) - word_gap;
+ adr -= word_gap;
+ datum = map_word_ff(map);
+ }
+
+ /* Write length of data to come */
+ map_write(map, CMD(words), cmd_adr );
+
+ /* Write data */
+ vec = *pvec;
+ vec_seek = *pvec_seek;
+ do {
+ int n = map_bankwidth(map) - word_gap;
+ if (n > vec->iov_len - vec_seek)
+ n = vec->iov_len - vec_seek;
+ if (n > len)
+ n = len;
+
+ if (!word_gap && len < map_bankwidth(map))
+ datum = map_word_ff(map);
+
+ datum = map_word_load_partial(map, datum,
+ vec->iov_base + vec_seek,
+ word_gap, n);
+
+ len -= n;
+ word_gap += n;
+ if (!len || word_gap == map_bankwidth(map)) {
+ map_write(map, datum, adr);
+ adr += map_bankwidth(map);
+ word_gap = 0;
+ }
+
+ vec_seek += n;
+ if (vec_seek == vec->iov_len) {
+ vec++;
+ vec_seek = 0;
+ }
+ } while (len);
+ *pvec = vec;
+ *pvec_seek = vec_seek;
+
+ /* GO GO GO */
+ map_write(map, CMD(0xd0), cmd_adr);
+ chip->state = FL_WRITING;
+
+ ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
+ initial_adr, initial_len,
+ chip->buffer_write_time,
+ chip->buffer_write_time_max);
+ if (ret) {
+ map_write(map, CMD(0x70), cmd_adr);
+ chip->state = FL_STATUS;
+ xip_enable(map, chip, cmd_adr);
+ printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
+ goto out;
+ }
+
+ /* check for errors */
+ status = map_read(map, cmd_adr);
+ if (map_word_bitsset(map, status, CMD(0x1a))) {
+ unsigned long chipstatus = MERGESTATUS(status);
+
+ /* reset status */
+ map_write(map, CMD(0x50), cmd_adr);
+ map_write(map, CMD(0x70), cmd_adr);
+ xip_enable(map, chip, cmd_adr);
+
+ if (chipstatus & 0x02) {
+ ret = -EROFS;
+ } else if (chipstatus & 0x08) {
+ printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
+ ret = -EIO;
+ } else {
+ printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
+ ret = -EINVAL;
+ }
+
+ goto out;
+ }
+
+ xip_enable(map, chip, cmd_adr);
+ out: DISABLE_VPP(map);
+ put_chip(map, chip, cmd_adr);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
+ int ret;
+ int chipnum;
+ unsigned long ofs, vec_seek, i;
+ size_t len = 0;
+
+ for (i = 0; i < count; i++)
+ len += vecs[i].iov_len;
+
+ if (!len)
+ return 0;
+
+ chipnum = to >> cfi->chipshift;
+ ofs = to - (chipnum << cfi->chipshift);
+ vec_seek = 0;
+
+ do {
+ /* We must not cross write block boundaries */
+ int size = wbufsize - (ofs & (wbufsize-1));
+
+ if (size > len)
+ size = len;
+ ret = do_write_buffer(map, &cfi->chips[chipnum],
+ ofs, &vecs, &vec_seek, size);
+ if (ret)
+ return ret;
+
+ ofs += size;
+ (*retlen) += size;
+ len -= size;
+
+ if (ofs >> cfi->chipshift) {
+ chipnum ++;
+ ofs = 0;
+ if (chipnum == cfi->numchips)
+ return 0;
+ }
+
+ /* Be nice and reschedule with the chip in a usable state for other
+ processes. */
+ cond_resched();
+
+ } while (len);
+
+ return 0;
+}
+
+static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
+ size_t len, size_t *retlen, const u_char *buf)
+{
+ struct kvec vec;
+
+ vec.iov_base = (void *) buf;
+ vec.iov_len = len;
+
+ return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
+}
+
+static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
+ unsigned long adr, int len, void *thunk)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word status;
+ int retries = 3;
+ int ret;
+
+ adr += chip->start;
+
+ retry:
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr, FL_ERASING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ XIP_INVAL_CACHED_RANGE(map, adr, len);
+ ENABLE_VPP(map);
+ xip_disable(map, chip, adr);
+
+ /* Clear the status register first */
+ map_write(map, CMD(0x50), adr);
+
+ /* Now erase */
+ map_write(map, CMD(0x20), adr);
+ map_write(map, CMD(0xD0), adr);
+ chip->state = FL_ERASING;
+ chip->erase_suspended = 0;
+ chip->in_progress_block_addr = adr;
+ chip->in_progress_block_mask = ~(len - 1);
+
+ ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
+ adr, len,
+ chip->erase_time,
+ chip->erase_time_max);
+ if (ret) {
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+ xip_enable(map, chip, adr);
+ printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
+ goto out;
+ }
+
+ /* We've broken this before. It doesn't hurt to be safe */
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+ status = map_read(map, adr);
+
+ /* check for errors */
+ if (map_word_bitsset(map, status, CMD(0x3a))) {
+ unsigned long chipstatus = MERGESTATUS(status);
+
+ /* Reset the error bits */
+ map_write(map, CMD(0x50), adr);
+ map_write(map, CMD(0x70), adr);
+ xip_enable(map, chip, adr);
+
+ if ((chipstatus & 0x30) == 0x30) {
+ printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
+ ret = -EINVAL;
+ } else if (chipstatus & 0x02) {
+ /* Protection bit set */
+ ret = -EROFS;
+ } else if (chipstatus & 0x8) {
+ /* Voltage */
+ printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
+ ret = -EIO;
+ } else if (chipstatus & 0x20 && retries--) {
+ printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
+ DISABLE_VPP(map);
+ put_chip(map, chip, adr);
+ mutex_unlock(&chip->mutex);
+ goto retry;
+ } else {
+ printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
+ ret = -EIO;
+ }
+
+ goto out;
+ }
+
+ xip_enable(map, chip, adr);
+ out: DISABLE_VPP(map);
+ put_chip(map, chip, adr);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
+{
+ return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
+ instr->len, NULL);
+}
+
+static void cfi_intelext_sync (struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i;
+ struct flchip *chip;
+ int ret = 0;
+
+ for (i=0; !ret && i<cfi->numchips; i++) {
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, chip->start, FL_SYNCING);
+
+ if (!ret) {
+ chip->oldstate = chip->state;
+ chip->state = FL_SYNCING;
+ /* No need to wake_up() on this state change -
+ * as the whole point is that nobody can do anything
+ * with the chip now anyway.
+ */
+ }
+ mutex_unlock(&chip->mutex);
+ }
+
+ /* Unlock the chips again */
+
+ for (i--; i >=0; i--) {
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ if (chip->state == FL_SYNCING) {
+ chip->state = chip->oldstate;
+ chip->oldstate = FL_READY;
+ wake_up(&chip->wq);
+ }
+ mutex_unlock(&chip->mutex);
+ }
+}
+
+static int __xipram do_getlockstatus_oneblock(struct map_info *map,
+ struct flchip *chip,
+ unsigned long adr,
+ int len, void *thunk)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ int status, ofs_factor = cfi->interleave * cfi->device_type;
+
+ adr += chip->start;
+ xip_disable(map, chip, adr+(2*ofs_factor));
+ map_write(map, CMD(0x90), adr+(2*ofs_factor));
+ chip->state = FL_JEDEC_QUERY;
+ status = cfi_read_query(map, adr+(2*ofs_factor));
+ xip_enable(map, chip, 0);
+ return status;
+}
+
+#ifdef DEBUG_LOCK_BITS
+static int __xipram do_printlockstatus_oneblock(struct map_info *map,
+ struct flchip *chip,
+ unsigned long adr,
+ int len, void *thunk)
+{
+ printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
+ adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
+ return 0;
+}
+#endif
+
+#define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
+#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
+
+static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
+ unsigned long adr, int len, void *thunk)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+ int mdelay;
+ int ret;
+
+ adr += chip->start;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr, FL_LOCKING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ ENABLE_VPP(map);
+ xip_disable(map, chip, adr);
+
+ map_write(map, CMD(0x60), adr);
+ if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
+ map_write(map, CMD(0x01), adr);
+ chip->state = FL_LOCKING;
+ } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
+ map_write(map, CMD(0xD0), adr);
+ chip->state = FL_UNLOCKING;
+ } else
+ BUG();
+
+ /*
+ * If Instant Individual Block Locking supported then no need
+ * to delay.
+ */
+ /*
+ * Unlocking may take up to 1.4 seconds on some Intel flashes. So
+ * lets use a max of 1.5 seconds (1500ms) as timeout.
+ *
+ * See "Clear Block Lock-Bits Time" on page 40 in
+ * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
+ * from February 2003
+ */
+ mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
+
+ ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
+ if (ret) {
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+ xip_enable(map, chip, adr);
+ printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
+ goto out;
+ }
+
+ xip_enable(map, chip, adr);
+ out: DISABLE_VPP(map);
+ put_chip(map, chip, adr);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret;
+
+#ifdef DEBUG_LOCK_BITS
+ printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
+ __func__, ofs, len);
+ cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
+ ofs, len, NULL);
+#endif
+
+ ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
+ ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
+
+#ifdef DEBUG_LOCK_BITS
+ printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
+ __func__, ret);
+ cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
+ ofs, len, NULL);
+#endif
+
+ return ret;
+}
+
+static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret;
+
+#ifdef DEBUG_LOCK_BITS
+ printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
+ __func__, ofs, len);
+ cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
+ ofs, len, NULL);
+#endif
+
+ ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
+ ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
+
+#ifdef DEBUG_LOCK_BITS
+ printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
+ __func__, ret);
+ cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
+ ofs, len, NULL);
+#endif
+
+ return ret;
+}
+
+static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len)
+{
+ return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
+ ofs, len, NULL) ? 1 : 0;
+}
+
+#ifdef CONFIG_MTD_OTP
+
+typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
+ u_long data_offset, u_char *buf, u_int size,
+ u_long prot_offset, u_int groupno, u_int groupsize);
+
+static int __xipram
+do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
+ u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ /* let's ensure we're not reading back cached data from array mode */
+ INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
+
+ xip_disable(map, chip, chip->start);
+ if (chip->state != FL_JEDEC_QUERY) {
+ map_write(map, CMD(0x90), chip->start);
+ chip->state = FL_JEDEC_QUERY;
+ }
+ map_copy_from(map, buf, chip->start + offset, size);
+ xip_enable(map, chip, chip->start);
+
+ /* then ensure we don't keep OTP data in the cache */
+ INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
+
+ put_chip(map, chip, chip->start);
+ mutex_unlock(&chip->mutex);
+ return 0;
+}
+
+static int
+do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
+ u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
+{
+ int ret;
+
+ while (size) {
+ unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
+ int gap = offset - bus_ofs;
+ int n = min_t(int, size, map_bankwidth(map)-gap);
+ map_word datum = map_word_ff(map);
+
+ datum = map_word_load_partial(map, datum, buf, gap, n);
+ ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
+ if (ret)
+ return ret;
+
+ offset += n;
+ buf += n;
+ size -= n;
+ }
+
+ return 0;
+}
+
+static int
+do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
+ u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word datum;
+
+ /* make sure area matches group boundaries */
+ if (size != grpsz)
+ return -EXDEV;
+
+ datum = map_word_ff(map);
+ datum = map_word_clr(map, datum, CMD(1 << grpno));
+ return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
+}
+
+static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf,
+ otp_op_t action, int user_regs)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+ struct flchip *chip;
+ struct cfi_intelext_otpinfo *otp;
+ u_long devsize, reg_prot_offset, data_offset;
+ u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
+ u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
+ int ret;
+
+ *retlen = 0;
+
+ /* Check that we actually have some OTP registers */
+ if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
+ return -ENODATA;
+
+ /* we need real chips here not virtual ones */
+ devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
+ chip_step = devsize >> cfi->chipshift;
+ chip_num = 0;
+
+ /* Some chips have OTP located in the _top_ partition only.
+ For example: Intel 28F256L18T (T means top-parameter device) */
+ if (cfi->mfr == CFI_MFR_INTEL) {
+ switch (cfi->id) {
+ case 0x880b:
+ case 0x880c:
+ case 0x880d:
+ chip_num = chip_step - 1;
+ }
+ }
+
+ for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
+ chip = &cfi->chips[chip_num];
+ otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
+
+ /* first OTP region */
+ field = 0;
+ reg_prot_offset = extp->ProtRegAddr;
+ reg_fact_groups = 1;
+ reg_fact_size = 1 << extp->FactProtRegSize;
+ reg_user_groups = 1;
+ reg_user_size = 1 << extp->UserProtRegSize;
+
+ while (len > 0) {
+ /* flash geometry fixup */
+ data_offset = reg_prot_offset + 1;
+ data_offset *= cfi->interleave * cfi->device_type;
+ reg_prot_offset *= cfi->interleave * cfi->device_type;
+ reg_fact_size *= cfi->interleave;
+ reg_user_size *= cfi->interleave;
+
+ if (user_regs) {
+ groups = reg_user_groups;
+ groupsize = reg_user_size;
+ /* skip over factory reg area */
+ groupno = reg_fact_groups;
+ data_offset += reg_fact_groups * reg_fact_size;
+ } else {
+ groups = reg_fact_groups;
+ groupsize = reg_fact_size;
+ groupno = 0;
+ }
+
+ while (len > 0 && groups > 0) {
+ if (!action) {
+ /*
+ * Special case: if action is NULL
+ * we fill buf with otp_info records.
+ */
+ struct otp_info *otpinfo;
+ map_word lockword;
+ len -= sizeof(struct otp_info);
+ if (len <= 0)
+ return -ENOSPC;
+ ret = do_otp_read(map, chip,
+ reg_prot_offset,
+ (u_char *)&lockword,
+ map_bankwidth(map),
+ 0, 0, 0);
+ if (ret)
+ return ret;
+ otpinfo = (struct otp_info *)buf;
+ otpinfo->start = from;
+ otpinfo->length = groupsize;
+ otpinfo->locked =
+ !map_word_bitsset(map, lockword,
+ CMD(1 << groupno));
+ from += groupsize;
+ buf += sizeof(*otpinfo);
+ *retlen += sizeof(*otpinfo);
+ } else if (from >= groupsize) {
+ from -= groupsize;
+ data_offset += groupsize;
+ } else {
+ int size = groupsize;
+ data_offset += from;
+ size -= from;
+ from = 0;
+ if (size > len)
+ size = len;
+ ret = action(map, chip, data_offset,
+ buf, size, reg_prot_offset,
+ groupno, groupsize);
+ if (ret < 0)
+ return ret;
+ buf += size;
+ len -= size;
+ *retlen += size;
+ data_offset += size;
+ }
+ groupno++;
+ groups--;
+ }
+
+ /* next OTP region */
+ if (++field == extp->NumProtectionFields)
+ break;
+ reg_prot_offset = otp->ProtRegAddr;
+ reg_fact_groups = otp->FactGroups;
+ reg_fact_size = 1 << otp->FactProtRegSize;
+ reg_user_groups = otp->UserGroups;
+ reg_user_size = 1 << otp->UserProtRegSize;
+ otp++;
+ }
+ }
+
+ return 0;
+}
+
+static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen,
+ u_char *buf)
+{
+ return cfi_intelext_otp_walk(mtd, from, len, retlen,
+ buf, do_otp_read, 0);
+}
+
+static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen,
+ u_char *buf)
+{
+ return cfi_intelext_otp_walk(mtd, from, len, retlen,
+ buf, do_otp_read, 1);
+}
+
+static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen,
+ const u_char *buf)
+{
+ return cfi_intelext_otp_walk(mtd, from, len, retlen,
+ (u_char *)buf, do_otp_write, 1);
+}
+
+static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
+ loff_t from, size_t len)
+{
+ size_t retlen;
+ return cfi_intelext_otp_walk(mtd, from, len, &retlen,
+ NULL, do_otp_lock, 1);
+}
+
+static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+
+{
+ return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
+ NULL, 0);
+}
+
+static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+{
+ return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
+ NULL, 1);
+}
+
+#endif
+
+static void cfi_intelext_save_locks(struct mtd_info *mtd)
+{
+ struct mtd_erase_region_info *region;
+ int block, status, i;
+ unsigned long adr;
+ size_t len;
+
+ for (i = 0; i < mtd->numeraseregions; i++) {
+ region = &mtd->eraseregions[i];
+ if (!region->lockmap)
+ continue;
+
+ for (block = 0; block < region->numblocks; block++){
+ len = region->erasesize;
+ adr = region->offset + block * len;
+
+ status = cfi_varsize_frob(mtd,
+ do_getlockstatus_oneblock, adr, len, NULL);
+ if (status)
+ set_bit(block, region->lockmap);
+ else
+ clear_bit(block, region->lockmap);
+ }
+ }
+}
+
+static int cfi_intelext_suspend(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+ int i;
+ struct flchip *chip;
+ int ret = 0;
+
+ if ((mtd->flags & MTD_POWERUP_LOCK)
+ && extp && (extp->FeatureSupport & (1 << 5)))
+ cfi_intelext_save_locks(mtd);
+
+ for (i=0; !ret && i<cfi->numchips; i++) {
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ switch (chip->state) {
+ case FL_READY:
+ case FL_STATUS:
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ if (chip->oldstate == FL_READY) {
+ /* place the chip in a known state before suspend */
+ map_write(map, CMD(0xFF), cfi->chips[i].start);
+ chip->oldstate = chip->state;
+ chip->state = FL_PM_SUSPENDED;
+ /* No need to wake_up() on this state change -
+ * as the whole point is that nobody can do anything
+ * with the chip now anyway.
+ */
+ } else {
+ /* There seems to be an operation pending. We must wait for it. */
+ printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
+ ret = -EAGAIN;
+ }
+ break;
+ default:
+ /* Should we actually wait? Once upon a time these routines weren't
+ allowed to. Or should we return -EAGAIN, because the upper layers
+ ought to have already shut down anything which was using the device
+ anyway? The latter for now. */
+ printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
+ ret = -EAGAIN;
+ break;
+ case FL_PM_SUSPENDED:
+ break;
+ }
+ mutex_unlock(&chip->mutex);
+ }
+
+ /* Unlock the chips again */
+
+ if (ret) {
+ for (i--; i >=0; i--) {
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ if (chip->state == FL_PM_SUSPENDED) {
+ /* No need to force it into a known state here,
+ because we're returning failure, and it didn't
+ get power cycled */
+ chip->state = chip->oldstate;
+ chip->oldstate = FL_READY;
+ wake_up(&chip->wq);
+ }
+ mutex_unlock(&chip->mutex);
+ }
+ }
+
+ return ret;
+}
+
+static void cfi_intelext_restore_locks(struct mtd_info *mtd)
+{
+ struct mtd_erase_region_info *region;
+ int block, i;
+ unsigned long adr;
+ size_t len;
+
+ for (i = 0; i < mtd->numeraseregions; i++) {
+ region = &mtd->eraseregions[i];
+ if (!region->lockmap)
+ continue;
+
+ for_each_clear_bit(block, region->lockmap, region->numblocks) {
+ len = region->erasesize;
+ adr = region->offset + block * len;
+ cfi_intelext_unlock(mtd, adr, len);
+ }
+ }
+}
+
+static void cfi_intelext_resume(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+ int i;
+ struct flchip *chip;
+
+ for (i=0; i<cfi->numchips; i++) {
+
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ /* Go to known state. Chip may have been power cycled */
+ if (chip->state == FL_PM_SUSPENDED) {
+ /* Refresh LH28F640BF Partition Config. Register */
+ fixup_LH28F640BF(mtd);
+ map_write(map, CMD(0xFF), cfi->chips[i].start);
+ chip->oldstate = chip->state = FL_READY;
+ wake_up(&chip->wq);
+ }
+
+ mutex_unlock(&chip->mutex);
+ }
+
+ if ((mtd->flags & MTD_POWERUP_LOCK)
+ && extp && (extp->FeatureSupport & (1 << 5)))
+ cfi_intelext_restore_locks(mtd);
+}
+
+static int cfi_intelext_reset(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i, ret;
+
+ for (i=0; i < cfi->numchips; i++) {
+ struct flchip *chip = &cfi->chips[i];
+
+ /* force the completion of any ongoing operation
+ and switch to array mode so any bootloader in
+ flash is accessible for soft reboot. */
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
+ if (!ret) {
+ map_write(map, CMD(0xff), chip->start);
+ chip->state = FL_SHUTDOWN;
+ put_chip(map, chip, chip->start);
+ }
+ mutex_unlock(&chip->mutex);
+ }
+
+ return 0;
+}
+
+static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
+ void *v)
+{
+ struct mtd_info *mtd;
+
+ mtd = container_of(nb, struct mtd_info, reboot_notifier);
+ cfi_intelext_reset(mtd);
+ return NOTIFY_DONE;
+}
+
+static void cfi_intelext_destroy(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct mtd_erase_region_info *region;
+ int i;
+ cfi_intelext_reset(mtd);
+ unregister_reboot_notifier(&mtd->reboot_notifier);
+ kfree(cfi->cmdset_priv);
+ kfree(cfi->cfiq);
+ kfree(cfi->chips[0].priv);
+ kfree(cfi);
+ for (i = 0; i < mtd->numeraseregions; i++) {
+ region = &mtd->eraseregions[i];
+ kfree(region->lockmap);
+ }
+ kfree(mtd->eraseregions);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
+MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
+MODULE_ALIAS("cfi_cmdset_0003");
+MODULE_ALIAS("cfi_cmdset_0200");
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
new file mode 100644
index 0000000000..df589d9b4d
--- /dev/null
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -0,0 +1,3114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common Flash Interface support:
+ * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
+ *
+ * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
+ * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
+ * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
+ *
+ * 2_by_8 routines added by Simon Munton
+ *
+ * 4_by_16 work by Carolyn J. Smith
+ *
+ * XIP support hooks by Vitaly Wool (based on code for Intel flash
+ * by Nicolas Pitre)
+ *
+ * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
+ *
+ * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/reboot.h>
+#include <linux/of.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/xip.h>
+
+#define AMD_BOOTLOC_BUG
+#define FORCE_WORD_WRITE 0
+
+#define MAX_RETRIES 3
+
+#define SST49LF004B 0x0060
+#define SST49LF040B 0x0050
+#define SST49LF008A 0x005a
+#define AT49BV6416 0x00d6
+#define S29GL064N_MN12 0x0c01
+
+/*
+ * Status Register bit description. Used by flash devices that don't
+ * support DQ polling (e.g. HyperFlash)
+ */
+#define CFI_SR_DRB BIT(7)
+#define CFI_SR_ESB BIT(5)
+#define CFI_SR_PSB BIT(4)
+#define CFI_SR_WBASB BIT(3)
+#define CFI_SR_SLSB BIT(1)
+
+enum cfi_quirks {
+ CFI_QUIRK_DQ_TRUE_DATA = BIT(0),
+};
+
+static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+#if !FORCE_WORD_WRITE
+static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+#endif
+static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
+static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
+static void cfi_amdstd_sync (struct mtd_info *);
+static int cfi_amdstd_suspend (struct mtd_info *);
+static void cfi_amdstd_resume (struct mtd_info *);
+static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
+static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t,
+ size_t *, struct otp_info *);
+static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t,
+ size_t *, struct otp_info *);
+static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t,
+ size_t *, u_char *);
+static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t,
+ size_t *, u_char *);
+static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
+ size_t *, const u_char *);
+static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t);
+
+static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf);
+
+static void cfi_amdstd_destroy(struct mtd_info *);
+
+struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
+static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
+
+static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
+static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
+#include "fwh_lock.h"
+
+static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+
+static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+
+static struct mtd_chip_driver cfi_amdstd_chipdrv = {
+ .probe = NULL, /* Not usable directly */
+ .destroy = cfi_amdstd_destroy,
+ .name = "cfi_cmdset_0002",
+ .module = THIS_MODULE
+};
+
+/*
+ * Use status register to poll for Erase/write completion when DQ is not
+ * supported. This is indicated by Bit[1:0] of SoftwareFeatures field in
+ * CFI Primary Vendor-Specific Extended Query table 1.5
+ */
+static int cfi_use_status_reg(struct cfi_private *cfi)
+{
+ struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
+ u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
+
+ return extp && extp->MinorVersion >= '5' &&
+ (extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
+}
+
+static int cfi_check_err_status(struct map_info *map, struct flchip *chip,
+ unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word status;
+
+ if (!cfi_use_status_reg(cfi))
+ return 0;
+
+ cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ status = map_read(map, adr);
+
+ /* The error bits are invalid while the chip's busy */
+ if (!map_word_bitsset(map, status, CMD(CFI_SR_DRB)))
+ return 0;
+
+ if (map_word_bitsset(map, status, CMD(0x3a))) {
+ unsigned long chipstatus = MERGESTATUS(status);
+
+ if (chipstatus & CFI_SR_ESB)
+ pr_err("%s erase operation failed, status %lx\n",
+ map->name, chipstatus);
+ if (chipstatus & CFI_SR_PSB)
+ pr_err("%s program operation failed, status %lx\n",
+ map->name, chipstatus);
+ if (chipstatus & CFI_SR_WBASB)
+ pr_err("%s buffer program command aborted, status %lx\n",
+ map->name, chipstatus);
+ if (chipstatus & CFI_SR_SLSB)
+ pr_err("%s sector write protected, status %lx\n",
+ map->name, chipstatus);
+
+ /* Erase/Program status bits are set on the operation failure */
+ if (chipstatus & (CFI_SR_ESB | CFI_SR_PSB))
+ return 1;
+ }
+ return 0;
+}
+
+/* #define DEBUG_CFI_FEATURES */
+
+
+#ifdef DEBUG_CFI_FEATURES
+static void cfi_tell_features(struct cfi_pri_amdstd *extp)
+{
+ const char* erase_suspend[3] = {
+ "Not supported", "Read only", "Read/write"
+ };
+ const char* top_bottom[6] = {
+ "No WP", "8x8KiB sectors at top & bottom, no WP",
+ "Bottom boot", "Top boot",
+ "Uniform, Bottom WP", "Uniform, Top WP"
+ };
+
+ printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
+ printk(" Address sensitive unlock: %s\n",
+ (extp->SiliconRevision & 1) ? "Not required" : "Required");
+
+ if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
+ printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
+ else
+ printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
+
+ if (extp->BlkProt == 0)
+ printk(" Block protection: Not supported\n");
+ else
+ printk(" Block protection: %d sectors per group\n", extp->BlkProt);
+
+
+ printk(" Temporary block unprotect: %s\n",
+ extp->TmpBlkUnprotect ? "Supported" : "Not supported");
+ printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
+ printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
+ printk(" Burst mode: %s\n",
+ extp->BurstMode ? "Supported" : "Not supported");
+ if (extp->PageMode == 0)
+ printk(" Page mode: Not supported\n");
+ else
+ printk(" Page mode: %d word page\n", extp->PageMode << 2);
+
+ printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
+ extp->VppMin >> 4, extp->VppMin & 0xf);
+ printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
+ extp->VppMax >> 4, extp->VppMax & 0xf);
+
+ if (extp->TopBottom < ARRAY_SIZE(top_bottom))
+ printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
+ else
+ printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
+}
+#endif
+
+#ifdef AMD_BOOTLOC_BUG
+/* Wheee. Bring me the head of someone at AMD. */
+static void fixup_amd_bootblock(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
+ __u8 major = extp->MajorVersion;
+ __u8 minor = extp->MinorVersion;
+
+ if (((major << 8) | minor) < 0x3131) {
+ /* CFI version 1.0 => don't trust bootloc */
+
+ pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
+ map->name, cfi->mfr, cfi->id);
+
+ /* AFAICS all 29LV400 with a bottom boot block have a device ID
+ * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
+ * These were badly detected as they have the 0x80 bit set
+ * so treat them as a special case.
+ */
+ if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
+
+ /* Macronix added CFI to their 2nd generation
+ * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
+ * Fujitsu, Spansion, EON, ESI and older Macronix)
+ * has CFI.
+ *
+ * Therefore also check the manufacturer.
+ * This reduces the risk of false detection due to
+ * the 8-bit device ID.
+ */
+ (cfi->mfr == CFI_MFR_MACRONIX)) {
+ pr_debug("%s: Macronix MX29LV400C with bottom boot block"
+ " detected\n", map->name);
+ extp->TopBottom = 2; /* bottom boot */
+ } else
+ if (cfi->id & 0x80) {
+ printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
+ extp->TopBottom = 3; /* top boot */
+ } else {
+ extp->TopBottom = 2; /* bottom boot */
+ }
+
+ pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
+ " deduced %s from Device ID\n", map->name, major, minor,
+ extp->TopBottom == 2 ? "bottom" : "top");
+ }
+}
+#endif
+
+#if !FORCE_WORD_WRITE
+static void fixup_use_write_buffers(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ if (cfi->mfr == CFI_MFR_AMD && cfi->id == 0x2201)
+ return;
+
+ if (cfi->cfiq->BufWriteTimeoutTyp) {
+ pr_debug("Using buffer write method\n");
+ mtd->_write = cfi_amdstd_write_buffers;
+ }
+}
+#endif /* !FORCE_WORD_WRITE */
+
+/* Atmel chips don't use the same PRI format as AMD chips */
+static void fixup_convert_atmel_pri(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
+ struct cfi_pri_atmel atmel_pri;
+
+ memcpy(&atmel_pri, extp, sizeof(atmel_pri));
+ memset((char *)extp + 5, 0, sizeof(*extp) - 5);
+
+ if (atmel_pri.Features & 0x02)
+ extp->EraseSuspend = 2;
+
+ /* Some chips got it backwards... */
+ if (cfi->id == AT49BV6416) {
+ if (atmel_pri.BottomBoot)
+ extp->TopBottom = 3;
+ else
+ extp->TopBottom = 2;
+ } else {
+ if (atmel_pri.BottomBoot)
+ extp->TopBottom = 2;
+ else
+ extp->TopBottom = 3;
+ }
+
+ /* burst write mode not supported */
+ cfi->cfiq->BufWriteTimeoutTyp = 0;
+ cfi->cfiq->BufWriteTimeoutMax = 0;
+}
+
+static void fixup_use_secsi(struct mtd_info *mtd)
+{
+ /* Setup for chips with a secsi area */
+ mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
+ mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
+}
+
+static void fixup_use_erase_chip(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ if ((cfi->cfiq->NumEraseRegions == 1) &&
+ ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
+ mtd->_erase = cfi_amdstd_erase_chip;
+ }
+
+}
+
+/*
+ * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
+ * locked by default.
+ */
+static void fixup_use_atmel_lock(struct mtd_info *mtd)
+{
+ mtd->_lock = cfi_atmel_lock;
+ mtd->_unlock = cfi_atmel_unlock;
+ mtd->flags |= MTD_POWERUP_LOCK;
+}
+
+static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ /*
+ * These flashes report two separate eraseblock regions based on the
+ * sector_erase-size and block_erase-size, although they both operate on the
+ * same memory. This is not allowed according to CFI, so we just pick the
+ * sector_erase-size.
+ */
+ cfi->cfiq->NumEraseRegions = 1;
+}
+
+static void fixup_sst39vf(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ fixup_old_sst_eraseregion(mtd);
+
+ cfi->addr_unlock1 = 0x5555;
+ cfi->addr_unlock2 = 0x2AAA;
+}
+
+static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ fixup_old_sst_eraseregion(mtd);
+
+ cfi->addr_unlock1 = 0x555;
+ cfi->addr_unlock2 = 0x2AA;
+
+ cfi->sector_erase_cmd = CMD(0x50);
+}
+
+static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ fixup_sst39vf_rev_b(mtd);
+
+ /*
+ * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
+ * it should report a size of 8KBytes (0x0020*256).
+ */
+ cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
+ pr_warn("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n",
+ mtd->name);
+}
+
+static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
+ cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
+ pr_warn("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n",
+ mtd->name);
+ }
+}
+
+static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
+ cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
+ pr_warn("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n",
+ mtd->name);
+ }
+}
+
+static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ /*
+ * S29NS512P flash uses more than 8bits to report number of sectors,
+ * which is not permitted by CFI.
+ */
+ cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
+ pr_warn("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n",
+ mtd->name);
+}
+
+static void fixup_quirks(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ if (cfi->mfr == CFI_MFR_AMD && cfi->id == S29GL064N_MN12)
+ cfi->quirks |= CFI_QUIRK_DQ_TRUE_DATA;
+}
+
+/* Used to fix CFI-Tables of chips without Extended Query Tables */
+static struct cfi_fixup cfi_nopri_fixup_table[] = {
+ { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
+ { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
+ { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
+ { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
+ { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
+ { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
+ { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
+ { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
+ { 0, 0, NULL }
+};
+
+static struct cfi_fixup cfi_fixup_table[] = {
+ { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
+#ifdef AMD_BOOTLOC_BUG
+ { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
+ { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
+ { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
+#endif
+ { CFI_MFR_AMD, 0x0050, fixup_use_secsi },
+ { CFI_MFR_AMD, 0x0053, fixup_use_secsi },
+ { CFI_MFR_AMD, 0x0055, fixup_use_secsi },
+ { CFI_MFR_AMD, 0x0056, fixup_use_secsi },
+ { CFI_MFR_AMD, 0x005C, fixup_use_secsi },
+ { CFI_MFR_AMD, 0x005F, fixup_use_secsi },
+ { CFI_MFR_AMD, S29GL064N_MN12, fixup_s29gl064n_sectors },
+ { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
+ { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
+ { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
+ { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
+ { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
+ { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
+ { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
+ { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
+#if !FORCE_WORD_WRITE
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
+#endif
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_quirks },
+ { 0, 0, NULL }
+};
+static struct cfi_fixup jedec_fixup_table[] = {
+ { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
+ { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
+ { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
+ { 0, 0, NULL }
+};
+
+static struct cfi_fixup fixup_table[] = {
+ /* The CFI vendor ids and the JEDEC vendor IDs appear
+ * to be common. It is like the devices id's are as
+ * well. This table is to pick all cases where
+ * we know that is the case.
+ */
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
+ { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
+ { 0, 0, NULL }
+};
+
+
+static void cfi_fixup_major_minor(struct cfi_private *cfi,
+ struct cfi_pri_amdstd *extp)
+{
+ if (cfi->mfr == CFI_MFR_SAMSUNG) {
+ if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
+ (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
+ /*
+ * Samsung K8P2815UQB and K8D6x16UxM chips
+ * report major=0 / minor=0.
+ * K8D3x16UxC chips report major=3 / minor=3.
+ */
+ printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu"
+ " Extended Query version to 1.%c\n",
+ extp->MinorVersion);
+ extp->MajorVersion = '1';
+ }
+ }
+
+ /*
+ * SST 38VF640x chips report major=0xFF / minor=0xFF.
+ */
+ if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
+ extp->MajorVersion = '1';
+ extp->MinorVersion = '0';
+ }
+}
+
+static int is_m29ew(struct cfi_private *cfi)
+{
+ if (cfi->mfr == CFI_MFR_INTEL &&
+ ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
+ (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
+ return 1;
+ return 0;
+}
+
+/*
+ * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
+ * Some revisions of the M29EW suffer from erase suspend hang ups. In
+ * particular, it can occur when the sequence
+ * Erase Confirm -> Suspend -> Program -> Resume
+ * causes a lockup due to internal timing issues. The consequence is that the
+ * erase cannot be resumed without inserting a dummy command after programming
+ * and prior to resuming. [...] The work-around is to issue a dummy write cycle
+ * that writes an F0 command code before the RESUME command.
+ */
+static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
+ unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */
+ if (is_m29ew(cfi))
+ map_write(map, CMD(0xF0), adr);
+}
+
+/*
+ * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
+ *
+ * Some revisions of the M29EW (for example, A1 and A2 step revisions)
+ * are affected by a problem that could cause a hang up when an ERASE SUSPEND
+ * command is issued after an ERASE RESUME operation without waiting for a
+ * minimum delay. The result is that once the ERASE seems to be completed
+ * (no bits are toggling), the contents of the Flash memory block on which
+ * the erase was ongoing could be inconsistent with the expected values
+ * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84
+ * values), causing a consequent failure of the ERASE operation.
+ * The occurrence of this issue could be high, especially when file system
+ * operations on the Flash are intensive. As a result, it is recommended
+ * that a patch be applied. Intensive file system operations can cause many
+ * calls to the garbage routine to free Flash space (also by erasing physical
+ * Flash blocks) and as a result, many consecutive SUSPEND and RESUME
+ * commands can occur. The problem disappears when a delay is inserted after
+ * the RESUME command by using the udelay() function available in Linux.
+ * The DELAY value must be tuned based on the customer's platform.
+ * The maximum value that fixes the problem in all cases is 500us.
+ * But, in our experience, a delay of 30 µs to 50 µs is sufficient
+ * in most cases.
+ * We have chosen 500µs because this latency is acceptable.
+ */
+static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
+{
+ /*
+ * Resolving the Delay After Resume Issue see Micron TN-13-07
+ * Worst case delay must be 500µs but 30-50µs should be ok as well
+ */
+ if (is_m29ew(cfi))
+ cfi_udelay(500);
+}
+
+struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct device_node __maybe_unused *np = map->device_node;
+ struct mtd_info *mtd;
+ int i;
+
+ mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
+ if (!mtd)
+ return NULL;
+ mtd->priv = map;
+ mtd->type = MTD_NORFLASH;
+
+ /* Fill in the default mtd operations */
+ mtd->_erase = cfi_amdstd_erase_varsize;
+ mtd->_write = cfi_amdstd_write_words;
+ mtd->_read = cfi_amdstd_read;
+ mtd->_sync = cfi_amdstd_sync;
+ mtd->_suspend = cfi_amdstd_suspend;
+ mtd->_resume = cfi_amdstd_resume;
+ mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg;
+ mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg;
+ mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info;
+ mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info;
+ mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg;
+ mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->name = map->name;
+ mtd->writesize = 1;
+ mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
+
+ pr_debug("MTD %s(): write buffer size %d\n", __func__,
+ mtd->writebufsize);
+
+ mtd->_panic_write = cfi_amdstd_panic_write;
+ mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
+
+ if (cfi->cfi_mode==CFI_MODE_CFI){
+ unsigned char bootloc;
+ __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
+ struct cfi_pri_amdstd *extp;
+
+ extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
+ if (extp) {
+ /*
+ * It's a real CFI chip, not one for which the probe
+ * routine faked a CFI structure.
+ */
+ cfi_fixup_major_minor(cfi, extp);
+
+ /*
+ * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
+ * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
+ * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
+ * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
+ * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
+ */
+ if (extp->MajorVersion != '1' ||
+ (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
+ printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
+ "version %c.%c (%#02x/%#02x).\n",
+ extp->MajorVersion, extp->MinorVersion,
+ extp->MajorVersion, extp->MinorVersion);
+ kfree(extp);
+ kfree(mtd);
+ return NULL;
+ }
+
+ printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n",
+ extp->MajorVersion, extp->MinorVersion);
+
+ /* Install our own private info structure */
+ cfi->cmdset_priv = extp;
+
+ /* Apply cfi device specific fixups */
+ cfi_fixup(mtd, cfi_fixup_table);
+
+#ifdef DEBUG_CFI_FEATURES
+ /* Tell the user about it in lots of lovely detail */
+ cfi_tell_features(extp);
+#endif
+
+#ifdef CONFIG_OF
+ if (np && of_property_read_bool(
+ np, "use-advanced-sector-protection")
+ && extp->BlkProtUnprot == 8) {
+ printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n");
+ mtd->_lock = cfi_ppb_lock;
+ mtd->_unlock = cfi_ppb_unlock;
+ mtd->_is_locked = cfi_ppb_is_locked;
+ }
+#endif
+
+ bootloc = extp->TopBottom;
+ if ((bootloc < 2) || (bootloc > 5)) {
+ printk(KERN_WARNING "%s: CFI contains unrecognised boot "
+ "bank location (%d). Assuming bottom.\n",
+ map->name, bootloc);
+ bootloc = 2;
+ }
+
+ if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
+ printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
+
+ for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
+ int j = (cfi->cfiq->NumEraseRegions-1)-i;
+
+ swap(cfi->cfiq->EraseRegionInfo[i],
+ cfi->cfiq->EraseRegionInfo[j]);
+ }
+ }
+ /* Set the default CFI lock/unlock addresses */
+ cfi->addr_unlock1 = 0x555;
+ cfi->addr_unlock2 = 0x2aa;
+ }
+ cfi_fixup(mtd, cfi_nopri_fixup_table);
+
+ if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
+ kfree(mtd);
+ return NULL;
+ }
+
+ } /* CFI mode */
+ else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
+ /* Apply jedec specific fixups */
+ cfi_fixup(mtd, jedec_fixup_table);
+ }
+ /* Apply generic fixups */
+ cfi_fixup(mtd, fixup_table);
+
+ for (i=0; i< cfi->numchips; i++) {
+ cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
+ cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
+ cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
+ /*
+ * First calculate the timeout max according to timeout field
+ * of struct cfi_ident that probed from chip's CFI aera, if
+ * available. Specify a minimum of 2000us, in case the CFI data
+ * is wrong.
+ */
+ if (cfi->cfiq->BufWriteTimeoutTyp &&
+ cfi->cfiq->BufWriteTimeoutMax)
+ cfi->chips[i].buffer_write_time_max =
+ 1 << (cfi->cfiq->BufWriteTimeoutTyp +
+ cfi->cfiq->BufWriteTimeoutMax);
+ else
+ cfi->chips[i].buffer_write_time_max = 0;
+
+ cfi->chips[i].buffer_write_time_max =
+ max(cfi->chips[i].buffer_write_time_max, 2000);
+
+ cfi->chips[i].ref_point_counter = 0;
+ init_waitqueue_head(&(cfi->chips[i].wq));
+ }
+
+ map->fldrv = &cfi_amdstd_chipdrv;
+
+ return cfi_amdstd_setup(mtd);
+}
+struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
+struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
+EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
+EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
+EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
+
+static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
+ unsigned long offset = 0;
+ int i,j;
+
+ printk(KERN_NOTICE "number of %s chips: %d\n",
+ (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
+ /* Select the correct geometry setup */
+ mtd->size = devsize * cfi->numchips;
+
+ mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
+ mtd->eraseregions = kmalloc_array(mtd->numeraseregions,
+ sizeof(struct mtd_erase_region_info),
+ GFP_KERNEL);
+ if (!mtd->eraseregions)
+ goto setup_err;
+
+ for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
+ unsigned long ernum, ersize;
+ ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
+ ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
+
+ if (mtd->erasesize < ersize) {
+ mtd->erasesize = ersize;
+ }
+ for (j=0; j<cfi->numchips; j++) {
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
+ }
+ offset += (ersize * ernum);
+ }
+ if (offset != devsize) {
+ /* Argh */
+ printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
+ goto setup_err;
+ }
+
+ __module_get(THIS_MODULE);
+ register_reboot_notifier(&mtd->reboot_notifier);
+ return mtd;
+
+ setup_err:
+ kfree(mtd->eraseregions);
+ kfree(mtd);
+ kfree(cfi->cmdset_priv);
+ return NULL;
+}
+
+/*
+ * Return true if the chip is ready and has the correct value.
+ *
+ * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
+ * non-suspended sector) and is indicated by no toggle bits toggling.
+ *
+ * Error are indicated by toggling bits or bits held with the wrong value,
+ * or with bits toggling.
+ *
+ * Note that anything more complicated than checking if no bits are toggling
+ * (including checking DQ5 for an error status) is tricky to get working
+ * correctly and is therefore not done (particularly with interleaved chips
+ * as each chip must be checked independently of the others).
+ */
+static int __xipram chip_ready(struct map_info *map, struct flchip *chip,
+ unsigned long addr, map_word *expected)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word oldd, curd;
+ int ret;
+
+ if (cfi_use_status_reg(cfi)) {
+ map_word ready = CMD(CFI_SR_DRB);
+ /*
+ * For chips that support status register, check device
+ * ready bit
+ */
+ cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ curd = map_read(map, addr);
+
+ return map_word_andequal(map, curd, ready, ready);
+ }
+
+ oldd = map_read(map, addr);
+ curd = map_read(map, addr);
+
+ ret = map_word_equal(map, oldd, curd);
+
+ if (!ret || !expected)
+ return ret;
+
+ return map_word_equal(map, curd, *expected);
+}
+
+static int __xipram chip_good(struct map_info *map, struct flchip *chip,
+ unsigned long addr, map_word *expected)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word *datum = expected;
+
+ if (cfi->quirks & CFI_QUIRK_DQ_TRUE_DATA)
+ datum = NULL;
+
+ return chip_ready(map, chip, addr, datum);
+}
+
+static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long timeo;
+ struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
+
+ resettime:
+ timeo = jiffies + HZ;
+ retry:
+ switch (chip->state) {
+
+ case FL_STATUS:
+ for (;;) {
+ if (chip_ready(map, chip, adr, NULL))
+ break;
+
+ if (time_after(jiffies, timeo)) {
+ printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
+ return -EIO;
+ }
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ mutex_lock(&chip->mutex);
+ /* Someone else might have been playing with it. */
+ goto retry;
+ }
+ return 0;
+
+ case FL_READY:
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ return 0;
+
+ case FL_ERASING:
+ if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
+ !(mode == FL_READY || mode == FL_POINT ||
+ (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
+ goto sleep;
+
+ /* Do not allow suspend iff read/write to EB address */
+ if ((adr & chip->in_progress_block_mask) ==
+ chip->in_progress_block_addr)
+ goto sleep;
+
+ /* Erase suspend */
+ /* It's harmless to issue the Erase-Suspend and Erase-Resume
+ * commands when the erase algorithm isn't in progress. */
+ map_write(map, CMD(0xB0), chip->in_progress_block_addr);
+ chip->oldstate = FL_ERASING;
+ chip->state = FL_ERASE_SUSPENDING;
+ chip->erase_suspended = 1;
+ for (;;) {
+ if (chip_ready(map, chip, adr, NULL))
+ break;
+
+ if (time_after(jiffies, timeo)) {
+ /* Should have suspended the erase by now.
+ * Send an Erase-Resume command as either
+ * there was an error (so leave the erase
+ * routine to recover from it) or we trying to
+ * use the erase-in-progress sector. */
+ put_chip(map, chip, adr);
+ printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
+ return -EIO;
+ }
+
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ mutex_lock(&chip->mutex);
+ /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
+ So we can just loop here. */
+ }
+ chip->state = FL_READY;
+ return 0;
+
+ case FL_XIP_WHILE_ERASING:
+ if (mode != FL_READY && mode != FL_POINT &&
+ (!cfip || !(cfip->EraseSuspend&2)))
+ goto sleep;
+ chip->oldstate = chip->state;
+ chip->state = FL_READY;
+ return 0;
+
+ case FL_SHUTDOWN:
+ /* The machine is rebooting */
+ return -EIO;
+
+ case FL_POINT:
+ /* Only if there's no operation suspended... */
+ if (mode == FL_READY && chip->oldstate == FL_READY)
+ return 0;
+ fallthrough;
+ default:
+ sleep:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ mutex_lock(&chip->mutex);
+ goto resettime;
+ }
+}
+
+
+static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ switch(chip->oldstate) {
+ case FL_ERASING:
+ cfi_fixup_m29ew_erase_suspend(map,
+ chip->in_progress_block_addr);
+ map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
+ cfi_fixup_m29ew_delay_after_resume(cfi);
+ chip->oldstate = FL_READY;
+ chip->state = FL_ERASING;
+ break;
+
+ case FL_XIP_WHILE_ERASING:
+ chip->state = chip->oldstate;
+ chip->oldstate = FL_READY;
+ break;
+
+ case FL_READY:
+ case FL_STATUS:
+ break;
+ default:
+ printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
+ }
+ wake_up(&chip->wq);
+}
+
+#ifdef CONFIG_MTD_XIP
+
+/*
+ * No interrupt what so ever can be serviced while the flash isn't in array
+ * mode. This is ensured by the xip_disable() and xip_enable() functions
+ * enclosing any code path where the flash is known not to be in array mode.
+ * And within a XIP disabled code path, only functions marked with __xipram
+ * may be called and nothing else (it's a good thing to inspect generated
+ * assembly to make sure inline functions were actually inlined and that gcc
+ * didn't emit calls to its own support functions). Also configuring MTD CFI
+ * support to a single buswidth and a single interleave is also recommended.
+ */
+
+static void xip_disable(struct map_info *map, struct flchip *chip,
+ unsigned long adr)
+{
+ /* TODO: chips with no XIP use should ignore and return */
+ (void) map_read(map, adr); /* ensure mmu mapping is up to date */
+ local_irq_disable();
+}
+
+static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
+ unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ if (chip->state != FL_POINT && chip->state != FL_READY) {
+ map_write(map, CMD(0xf0), adr);
+ chip->state = FL_READY;
+ }
+ (void) map_read(map, adr);
+ xip_iprefetch();
+ local_irq_enable();
+}
+
+/*
+ * When a delay is required for the flash operation to complete, the
+ * xip_udelay() function is polling for both the given timeout and pending
+ * (but still masked) hardware interrupts. Whenever there is an interrupt
+ * pending then the flash erase operation is suspended, array mode restored
+ * and interrupts unmasked. Task scheduling might also happen at that
+ * point. The CPU eventually returns from the interrupt or the call to
+ * schedule() and the suspended flash operation is resumed for the remaining
+ * of the delay period.
+ *
+ * Warning: this function _will_ fool interrupt latency tracing tools.
+ */
+
+static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
+ unsigned long adr, int usec)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
+ map_word status, OK = CMD(0x80);
+ unsigned long suspended, start = xip_currtime();
+ flstate_t oldstate;
+
+ do {
+ cpu_relax();
+ if (xip_irqpending() && extp &&
+ ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
+ (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
+ /*
+ * Let's suspend the erase operation when supported.
+ * Note that we currently don't try to suspend
+ * interleaved chips if there is already another
+ * operation suspended (imagine what happens
+ * when one chip was already done with the current
+ * operation while another chip suspended it, then
+ * we resume the whole thing at once). Yes, it
+ * can happen!
+ */
+ map_write(map, CMD(0xb0), adr);
+ usec -= xip_elapsed_since(start);
+ suspended = xip_currtime();
+ do {
+ if (xip_elapsed_since(suspended) > 100000) {
+ /*
+ * The chip doesn't want to suspend
+ * after waiting for 100 msecs.
+ * This is a critical error but there
+ * is not much we can do here.
+ */
+ return;
+ }
+ status = map_read(map, adr);
+ } while (!map_word_andequal(map, status, OK, OK));
+
+ /* Suspend succeeded */
+ oldstate = chip->state;
+ if (!map_word_bitsset(map, status, CMD(0x40)))
+ break;
+ chip->state = FL_XIP_WHILE_ERASING;
+ chip->erase_suspended = 1;
+ map_write(map, CMD(0xf0), adr);
+ (void) map_read(map, adr);
+ xip_iprefetch();
+ local_irq_enable();
+ mutex_unlock(&chip->mutex);
+ xip_iprefetch();
+ cond_resched();
+
+ /*
+ * We're back. However someone else might have
+ * decided to go write to the chip if we are in
+ * a suspended erase state. If so let's wait
+ * until it's done.
+ */
+ mutex_lock(&chip->mutex);
+ while (chip->state != FL_XIP_WHILE_ERASING) {
+ DECLARE_WAITQUEUE(wait, current);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ mutex_lock(&chip->mutex);
+ }
+ /* Disallow XIP again */
+ local_irq_disable();
+
+ /* Correct Erase Suspend Hangups for M29EW */
+ cfi_fixup_m29ew_erase_suspend(map, adr);
+ /* Resume the write or erase operation */
+ map_write(map, cfi->sector_erase_cmd, adr);
+ chip->state = oldstate;
+ start = xip_currtime();
+ } else if (usec >= 1000000/HZ) {
+ /*
+ * Try to save on CPU power when waiting delay
+ * is at least a system timer tick period.
+ * No need to be extremely accurate here.
+ */
+ xip_cpu_idle();
+ }
+ status = map_read(map, adr);
+ } while (!map_word_andequal(map, status, OK, OK)
+ && xip_elapsed_since(start) < usec);
+}
+
+#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
+
+/*
+ * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
+ * the flash is actively programming or erasing since we have to poll for
+ * the operation to complete anyway. We can't do that in a generic way with
+ * a XIP setup so do it before the actual flash operation in this case
+ * and stub it out from INVALIDATE_CACHE_UDELAY.
+ */
+#define XIP_INVAL_CACHED_RANGE(map, from, size) \
+ INVALIDATE_CACHED_RANGE(map, from, size)
+
+#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
+ UDELAY(map, chip, adr, usec)
+
+/*
+ * Extra notes:
+ *
+ * Activating this XIP support changes the way the code works a bit. For
+ * example the code to suspend the current process when concurrent access
+ * happens is never executed because xip_udelay() will always return with the
+ * same chip state as it was entered with. This is why there is no care for
+ * the presence of add_wait_queue() or schedule() calls from within a couple
+ * xip_disable()'d areas of code, like in do_erase_oneblock for example.
+ * The queueing and scheduling are always happening within xip_udelay().
+ *
+ * Similarly, get_chip() and put_chip() just happen to always be executed
+ * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
+ * is in array mode, therefore never executing many cases therein and not
+ * causing any problem with XIP.
+ */
+
+#else
+
+#define xip_disable(map, chip, adr)
+#define xip_enable(map, chip, adr)
+#define XIP_INVAL_CACHED_RANGE(x...)
+
+#define UDELAY(map, chip, adr, usec) \
+do { \
+ mutex_unlock(&chip->mutex); \
+ cfi_udelay(usec); \
+ mutex_lock(&chip->mutex); \
+} while (0)
+
+#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
+do { \
+ mutex_unlock(&chip->mutex); \
+ INVALIDATE_CACHED_RANGE(map, adr, len); \
+ cfi_udelay(usec); \
+ mutex_lock(&chip->mutex); \
+} while (0)
+
+#endif
+
+static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
+{
+ unsigned long cmd_addr;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret;
+
+ adr += chip->start;
+
+ /* Ensure cmd read/writes are aligned. */
+ cmd_addr = adr & ~(map_bankwidth(map)-1);
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, cmd_addr, FL_READY);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ if (chip->state != FL_POINT && chip->state != FL_READY) {
+ map_write(map, CMD(0xf0), cmd_addr);
+ chip->state = FL_READY;
+ }
+
+ map_copy_from(map, buf, adr, len);
+
+ put_chip(map, chip, cmd_addr);
+
+ mutex_unlock(&chip->mutex);
+ return 0;
+}
+
+
+static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long ofs;
+ int chipnum;
+ int ret = 0;
+
+ /* ofs: offset within the first chip that the first read should start */
+ chipnum = (from >> cfi->chipshift);
+ ofs = from - (chipnum << cfi->chipshift);
+
+ while (len) {
+ unsigned long thislen;
+
+ if (chipnum >= cfi->numchips)
+ break;
+
+ if ((len + ofs -1) >> cfi->chipshift)
+ thislen = (1<<cfi->chipshift) - ofs;
+ else
+ thislen = len;
+
+ ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
+ if (ret)
+ break;
+
+ *retlen += thislen;
+ len -= thislen;
+ buf += thislen;
+
+ ofs = 0;
+ chipnum++;
+ }
+ return ret;
+}
+
+typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
+ loff_t adr, size_t len, u_char *buf, size_t grouplen);
+
+static inline void otp_enter(struct map_info *map, struct flchip *chip,
+ loff_t adr, size_t len)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+
+ INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
+}
+
+static inline void otp_exit(struct map_info *map, struct flchip *chip,
+ loff_t adr, size_t len)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+
+ INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
+}
+
+static inline int do_read_secsi_onechip(struct map_info *map,
+ struct flchip *chip, loff_t adr,
+ size_t len, u_char *buf,
+ size_t grouplen)
+{
+ DECLARE_WAITQUEUE(wait, current);
+
+ retry:
+ mutex_lock(&chip->mutex);
+
+ if (chip->state != FL_READY){
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+
+ mutex_unlock(&chip->mutex);
+
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+
+ goto retry;
+ }
+
+ adr += chip->start;
+
+ chip->state = FL_READY;
+
+ otp_enter(map, chip, adr, len);
+ map_copy_from(map, buf, adr, len);
+ otp_exit(map, chip, adr, len);
+
+ wake_up(&chip->wq);
+ mutex_unlock(&chip->mutex);
+
+ return 0;
+}
+
+static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long ofs;
+ int chipnum;
+ int ret = 0;
+
+ /* ofs: offset within the first chip that the first read should start */
+ /* 8 secsi bytes per chip */
+ chipnum=from>>3;
+ ofs=from & 7;
+
+ while (len) {
+ unsigned long thislen;
+
+ if (chipnum >= cfi->numchips)
+ break;
+
+ if ((len + ofs -1) >> 3)
+ thislen = (1<<3) - ofs;
+ else
+ thislen = len;
+
+ ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs,
+ thislen, buf, 0);
+ if (ret)
+ break;
+
+ *retlen += thislen;
+ len -= thislen;
+ buf += thislen;
+
+ ofs = 0;
+ chipnum++;
+ }
+ return ret;
+}
+
+static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
+ unsigned long adr, map_word datum,
+ int mode);
+
+static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr,
+ size_t len, u_char *buf, size_t grouplen)
+{
+ int ret;
+ while (len) {
+ unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1);
+ int gap = adr - bus_ofs;
+ int n = min_t(int, len, map_bankwidth(map) - gap);
+ map_word datum = map_word_ff(map);
+
+ if (n != map_bankwidth(map)) {
+ /* partial write of a word, load old contents */
+ otp_enter(map, chip, bus_ofs, map_bankwidth(map));
+ datum = map_read(map, bus_ofs);
+ otp_exit(map, chip, bus_ofs, map_bankwidth(map));
+ }
+
+ datum = map_word_load_partial(map, datum, buf, gap, n);
+ ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
+ if (ret)
+ return ret;
+
+ adr += n;
+ buf += n;
+ len -= n;
+ }
+
+ return 0;
+}
+
+static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr,
+ size_t len, u_char *buf, size_t grouplen)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ uint8_t lockreg;
+ unsigned long timeo;
+ int ret;
+
+ /* make sure area matches group boundaries */
+ if ((adr != 0) || (len != grouplen))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, chip->start, FL_LOCKING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+ chip->state = FL_LOCKING;
+
+ /* Enter lock register command */
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+
+ /* read lock register */
+ lockreg = cfi_read_query(map, 0);
+
+ /* set bit 0 to protect extended memory block */
+ lockreg &= ~0x01;
+
+ /* set bit 0 to protect extended memory block */
+ /* write lock register */
+ map_write(map, CMD(0xA0), chip->start);
+ map_write(map, CMD(lockreg), chip->start);
+
+ /* wait for chip to become ready */
+ timeo = jiffies + msecs_to_jiffies(2);
+ for (;;) {
+ if (chip_ready(map, chip, adr, NULL))
+ break;
+
+ if (time_after(jiffies, timeo)) {
+ pr_err("Waiting for chip to be ready timed out.\n");
+ ret = -EIO;
+ break;
+ }
+ UDELAY(map, chip, 0, 1);
+ }
+
+ /* exit protection commands */
+ map_write(map, CMD(0x90), chip->start);
+ map_write(map, CMD(0x00), chip->start);
+
+ chip->state = FL_READY;
+ put_chip(map, chip, chip->start);
+ mutex_unlock(&chip->mutex);
+
+ return ret;
+}
+
+static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf,
+ otp_op_t action, int user_regs)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ofs_factor = cfi->interleave * cfi->device_type;
+ unsigned long base;
+ int chipnum;
+ struct flchip *chip;
+ uint8_t otp, lockreg;
+ int ret;
+
+ size_t user_size, factory_size, otpsize;
+ loff_t user_offset, factory_offset, otpoffset;
+ int user_locked = 0, otplocked;
+
+ *retlen = 0;
+
+ for (chipnum = 0; chipnum < cfi->numchips; chipnum++) {
+ chip = &cfi->chips[chipnum];
+ factory_size = 0;
+ user_size = 0;
+
+ /* Micron M29EW family */
+ if (is_m29ew(cfi)) {
+ base = chip->start;
+
+ /* check whether secsi area is factory locked
+ or user lockable */
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, base, FL_CFI_QUERY);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+ cfi_qry_mode_on(base, map, cfi);
+ otp = cfi_read_query(map, base + 0x3 * ofs_factor);
+ cfi_qry_mode_off(base, map, cfi);
+ put_chip(map, chip, base);
+ mutex_unlock(&chip->mutex);
+
+ if (otp & 0x80) {
+ /* factory locked */
+ factory_offset = 0;
+ factory_size = 0x100;
+ } else {
+ /* customer lockable */
+ user_offset = 0;
+ user_size = 0x100;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, base, FL_LOCKING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ /* Enter lock register command */
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1,
+ chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2,
+ chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x40, cfi->addr_unlock1,
+ chip->start, map, cfi,
+ cfi->device_type, NULL);
+ /* read lock register */
+ lockreg = cfi_read_query(map, 0);
+ /* exit protection commands */
+ map_write(map, CMD(0x90), chip->start);
+ map_write(map, CMD(0x00), chip->start);
+ put_chip(map, chip, chip->start);
+ mutex_unlock(&chip->mutex);
+
+ user_locked = ((lockreg & 0x01) == 0x00);
+ }
+ }
+
+ otpsize = user_regs ? user_size : factory_size;
+ if (!otpsize)
+ continue;
+ otpoffset = user_regs ? user_offset : factory_offset;
+ otplocked = user_regs ? user_locked : 1;
+
+ if (!action) {
+ /* return otpinfo */
+ struct otp_info *otpinfo;
+ len -= sizeof(*otpinfo);
+ if (len <= 0)
+ return -ENOSPC;
+ otpinfo = (struct otp_info *)buf;
+ otpinfo->start = from;
+ otpinfo->length = otpsize;
+ otpinfo->locked = otplocked;
+ buf += sizeof(*otpinfo);
+ *retlen += sizeof(*otpinfo);
+ from += otpsize;
+ } else if ((from < otpsize) && (len > 0)) {
+ size_t size;
+ size = (len < otpsize - from) ? len : otpsize - from;
+ ret = action(map, chip, otpoffset + from, size, buf,
+ otpsize);
+ if (ret < 0)
+ return ret;
+
+ buf += size;
+ len -= size;
+ *retlen += size;
+ from = 0;
+ } else {
+ from -= otpsize;
+ }
+ }
+ return 0;
+}
+
+static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+{
+ return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
+ NULL, 0);
+}
+
+static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+{
+ return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
+ NULL, 1);
+}
+
+static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen,
+ u_char *buf)
+{
+ return cfi_amdstd_otp_walk(mtd, from, len, retlen,
+ buf, do_read_secsi_onechip, 0);
+}
+
+static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen,
+ u_char *buf)
+{
+ return cfi_amdstd_otp_walk(mtd, from, len, retlen,
+ buf, do_read_secsi_onechip, 1);
+}
+
+static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen,
+ const u_char *buf)
+{
+ return cfi_amdstd_otp_walk(mtd, from, len, retlen, (u_char *)buf,
+ do_otp_write, 1);
+}
+
+static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len)
+{
+ size_t retlen;
+ return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL,
+ do_otp_lock, 1);
+}
+
+static int __xipram do_write_oneword_once(struct map_info *map,
+ struct flchip *chip,
+ unsigned long adr, map_word datum,
+ int mode, struct cfi_private *cfi)
+{
+ unsigned long timeo;
+ /*
+ * We use a 1ms + 1 jiffies generic timeout for writes (most devices
+ * have a max write time of a few hundreds usec). However, we should
+ * use the maximum timeout value given by the chip at probe time
+ * instead. Unfortunately, struct flchip does have a field for
+ * maximum timeout, only for typical which can be far too short
+ * depending of the conditions. The ' + 1' is to avoid having a
+ * timeout of 0 jiffies if HZ is smaller than 1000.
+ */
+ unsigned long uWriteTimeout = (HZ / 1000) + 1;
+ int ret = 0;
+
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ map_write(map, datum, adr);
+ chip->state = mode;
+
+ INVALIDATE_CACHE_UDELAY(map, chip,
+ adr, map_bankwidth(map),
+ chip->word_write_time);
+
+ /* See comment above for timeout value. */
+ timeo = jiffies + uWriteTimeout;
+ for (;;) {
+ if (chip->state != mode) {
+ /* Someone's suspended the write. Sleep */
+ DECLARE_WAITQUEUE(wait, current);
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + (HZ / 2); /* FIXME */
+ mutex_lock(&chip->mutex);
+ continue;
+ }
+
+ /*
+ * We check "time_after" and "!chip_good" before checking
+ * "chip_good" to avoid the failure due to scheduling.
+ */
+ if (time_after(jiffies, timeo) &&
+ !chip_good(map, chip, adr, &datum)) {
+ xip_enable(map, chip, adr);
+ printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
+ xip_disable(map, chip, adr);
+ ret = -EIO;
+ break;
+ }
+
+ if (chip_good(map, chip, adr, &datum)) {
+ if (cfi_check_err_status(map, chip, adr))
+ ret = -EIO;
+ break;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ UDELAY(map, chip, adr, 1);
+ }
+
+ return ret;
+}
+
+static int __xipram do_write_oneword_start(struct map_info *map,
+ struct flchip *chip,
+ unsigned long adr, int mode)
+{
+ int ret;
+
+ mutex_lock(&chip->mutex);
+
+ ret = get_chip(map, chip, adr, mode);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ if (mode == FL_OTP_WRITE)
+ otp_enter(map, chip, adr, map_bankwidth(map));
+
+ return ret;
+}
+
+static void __xipram do_write_oneword_done(struct map_info *map,
+ struct flchip *chip,
+ unsigned long adr, int mode)
+{
+ if (mode == FL_OTP_WRITE)
+ otp_exit(map, chip, adr, map_bankwidth(map));
+
+ chip->state = FL_READY;
+ DISABLE_VPP(map);
+ put_chip(map, chip, adr);
+
+ mutex_unlock(&chip->mutex);
+}
+
+static int __xipram do_write_oneword_retry(struct map_info *map,
+ struct flchip *chip,
+ unsigned long adr, map_word datum,
+ int mode)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret = 0;
+ map_word oldd;
+ int retry_cnt = 0;
+
+ /*
+ * Check for a NOP for the case when the datum to write is already
+ * present - it saves time and works around buggy chips that corrupt
+ * data at other locations when 0xff is written to a location that
+ * already contains 0xff.
+ */
+ oldd = map_read(map, adr);
+ if (map_word_equal(map, oldd, datum)) {
+ pr_debug("MTD %s(): NOP\n", __func__);
+ return ret;
+ }
+
+ XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
+ ENABLE_VPP(map);
+ xip_disable(map, chip, adr);
+
+ retry:
+ ret = do_write_oneword_once(map, chip, adr, datum, mode, cfi);
+ if (ret) {
+ /* reset on all failures. */
+ map_write(map, CMD(0xF0), chip->start);
+ /* FIXME - should have reset delay before continuing */
+
+ if (++retry_cnt <= MAX_RETRIES) {
+ ret = 0;
+ goto retry;
+ }
+ }
+ xip_enable(map, chip, adr);
+
+ return ret;
+}
+
+static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
+ unsigned long adr, map_word datum,
+ int mode)
+{
+ int ret;
+
+ adr += chip->start;
+
+ pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", __func__, adr,
+ datum.x[0]);
+
+ ret = do_write_oneword_start(map, chip, adr, mode);
+ if (ret)
+ return ret;
+
+ ret = do_write_oneword_retry(map, chip, adr, datum, mode);
+
+ do_write_oneword_done(map, chip, adr, mode);
+
+ return ret;
+}
+
+
+static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret;
+ int chipnum;
+ unsigned long ofs, chipstart;
+ DECLARE_WAITQUEUE(wait, current);
+
+ chipnum = to >> cfi->chipshift;
+ ofs = to - (chipnum << cfi->chipshift);
+ chipstart = cfi->chips[chipnum].start;
+
+ /* If it's not bus-aligned, do the first byte write */
+ if (ofs & (map_bankwidth(map)-1)) {
+ unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
+ int i = ofs - bus_ofs;
+ int n = 0;
+ map_word tmp_buf;
+
+ retry:
+ mutex_lock(&cfi->chips[chipnum].mutex);
+
+ if (cfi->chips[chipnum].state != FL_READY) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&cfi->chips[chipnum].wq, &wait);
+
+ mutex_unlock(&cfi->chips[chipnum].mutex);
+
+ schedule();
+ remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
+ goto retry;
+ }
+
+ /* Load 'tmp_buf' with old contents of flash */
+ tmp_buf = map_read(map, bus_ofs+chipstart);
+
+ mutex_unlock(&cfi->chips[chipnum].mutex);
+
+ /* Number of bytes to copy from buffer */
+ n = min_t(int, len, map_bankwidth(map)-i);
+
+ tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
+
+ ret = do_write_oneword(map, &cfi->chips[chipnum],
+ bus_ofs, tmp_buf, FL_WRITING);
+ if (ret)
+ return ret;
+
+ ofs += n;
+ buf += n;
+ (*retlen) += n;
+ len -= n;
+
+ if (ofs >> cfi->chipshift) {
+ chipnum ++;
+ ofs = 0;
+ if (chipnum == cfi->numchips)
+ return 0;
+ }
+ }
+
+ /* We are now aligned, write as much as possible */
+ while(len >= map_bankwidth(map)) {
+ map_word datum;
+
+ datum = map_word_load(map, buf);
+
+ ret = do_write_oneword(map, &cfi->chips[chipnum],
+ ofs, datum, FL_WRITING);
+ if (ret)
+ return ret;
+
+ ofs += map_bankwidth(map);
+ buf += map_bankwidth(map);
+ (*retlen) += map_bankwidth(map);
+ len -= map_bankwidth(map);
+
+ if (ofs >> cfi->chipshift) {
+ chipnum ++;
+ ofs = 0;
+ if (chipnum == cfi->numchips)
+ return 0;
+ chipstart = cfi->chips[chipnum].start;
+ }
+ }
+
+ /* Write the trailing bytes if any */
+ if (len & (map_bankwidth(map)-1)) {
+ map_word tmp_buf;
+
+ retry1:
+ mutex_lock(&cfi->chips[chipnum].mutex);
+
+ if (cfi->chips[chipnum].state != FL_READY) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&cfi->chips[chipnum].wq, &wait);
+
+ mutex_unlock(&cfi->chips[chipnum].mutex);
+
+ schedule();
+ remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
+ goto retry1;
+ }
+
+ tmp_buf = map_read(map, ofs + chipstart);
+
+ mutex_unlock(&cfi->chips[chipnum].mutex);
+
+ tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
+
+ ret = do_write_oneword(map, &cfi->chips[chipnum],
+ ofs, tmp_buf, FL_WRITING);
+ if (ret)
+ return ret;
+
+ (*retlen) += len;
+ }
+
+ return 0;
+}
+
+#if !FORCE_WORD_WRITE
+static int __xipram do_write_buffer_wait(struct map_info *map,
+ struct flchip *chip, unsigned long adr,
+ map_word datum)
+{
+ unsigned long timeo;
+ unsigned long u_write_timeout;
+ int ret = 0;
+
+ /*
+ * Timeout is calculated according to CFI data, if available.
+ * See more comments in cfi_cmdset_0002().
+ */
+ u_write_timeout = usecs_to_jiffies(chip->buffer_write_time_max);
+ timeo = jiffies + u_write_timeout;
+
+ for (;;) {
+ if (chip->state != FL_WRITING) {
+ /* Someone's suspended the write. Sleep */
+ DECLARE_WAITQUEUE(wait, current);
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + (HZ / 2); /* FIXME */
+ mutex_lock(&chip->mutex);
+ continue;
+ }
+
+ /*
+ * We check "time_after" and "!chip_good" before checking
+ * "chip_good" to avoid the failure due to scheduling.
+ */
+ if (time_after(jiffies, timeo) &&
+ !chip_good(map, chip, adr, &datum)) {
+ pr_err("MTD %s(): software timeout, address:0x%.8lx.\n",
+ __func__, adr);
+ ret = -EIO;
+ break;
+ }
+
+ if (chip_good(map, chip, adr, &datum)) {
+ if (cfi_check_err_status(map, chip, adr))
+ ret = -EIO;
+ break;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ UDELAY(map, chip, adr, 1);
+ }
+
+ return ret;
+}
+
+static void __xipram do_write_buffer_reset(struct map_info *map,
+ struct flchip *chip,
+ struct cfi_private *cfi)
+{
+ /*
+ * Recovery from write-buffer programming failures requires
+ * the write-to-buffer-reset sequence. Since the last part
+ * of the sequence also works as a normal reset, we can run
+ * the same commands regardless of why we are here.
+ * See e.g.
+ * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
+ */
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+
+ /* FIXME - should have reset delay before continuing */
+}
+
+/*
+ * FIXME: interleaved mode not tested, and probably not supported!
+ */
+static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
+ unsigned long adr, const u_char *buf,
+ int len)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret;
+ unsigned long cmd_adr;
+ int z, words;
+ map_word datum;
+
+ adr += chip->start;
+ cmd_adr = adr;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr, FL_WRITING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ datum = map_word_load(map, buf);
+
+ pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
+ __func__, adr, datum.x[0]);
+
+ XIP_INVAL_CACHED_RANGE(map, adr, len);
+ ENABLE_VPP(map);
+ xip_disable(map, chip, cmd_adr);
+
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
+
+ /* Write Buffer Load */
+ map_write(map, CMD(0x25), cmd_adr);
+
+ chip->state = FL_WRITING_TO_BUFFER;
+
+ /* Write length of data to come */
+ words = len / map_bankwidth(map);
+ map_write(map, CMD(words - 1), cmd_adr);
+ /* Write data */
+ z = 0;
+ while(z < words * map_bankwidth(map)) {
+ datum = map_word_load(map, buf);
+ map_write(map, datum, adr + z);
+
+ z += map_bankwidth(map);
+ buf += map_bankwidth(map);
+ }
+ z -= map_bankwidth(map);
+
+ adr += z;
+
+ /* Write Buffer Program Confirm: GO GO GO */
+ map_write(map, CMD(0x29), cmd_adr);
+ chip->state = FL_WRITING;
+
+ INVALIDATE_CACHE_UDELAY(map, chip,
+ adr, map_bankwidth(map),
+ chip->word_write_time);
+
+ ret = do_write_buffer_wait(map, chip, adr, datum);
+ if (ret)
+ do_write_buffer_reset(map, chip, cfi);
+
+ xip_enable(map, chip, adr);
+
+ chip->state = FL_READY;
+ DISABLE_VPP(map);
+ put_chip(map, chip, adr);
+ mutex_unlock(&chip->mutex);
+
+ return ret;
+}
+
+
+static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
+ int ret;
+ int chipnum;
+ unsigned long ofs;
+
+ chipnum = to >> cfi->chipshift;
+ ofs = to - (chipnum << cfi->chipshift);
+
+ /* If it's not bus-aligned, do the first word write */
+ if (ofs & (map_bankwidth(map)-1)) {
+ size_t local_len = (-ofs)&(map_bankwidth(map)-1);
+ if (local_len > len)
+ local_len = len;
+ ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
+ local_len, retlen, buf);
+ if (ret)
+ return ret;
+ ofs += local_len;
+ buf += local_len;
+ len -= local_len;
+
+ if (ofs >> cfi->chipshift) {
+ chipnum ++;
+ ofs = 0;
+ if (chipnum == cfi->numchips)
+ return 0;
+ }
+ }
+
+ /* Write buffer is worth it only if more than one word to write... */
+ while (len >= map_bankwidth(map) * 2) {
+ /* We must not cross write block boundaries */
+ int size = wbufsize - (ofs & (wbufsize-1));
+
+ if (size > len)
+ size = len;
+ if (size % map_bankwidth(map))
+ size -= size % map_bankwidth(map);
+
+ ret = do_write_buffer(map, &cfi->chips[chipnum],
+ ofs, buf, size);
+ if (ret)
+ return ret;
+
+ ofs += size;
+ buf += size;
+ (*retlen) += size;
+ len -= size;
+
+ if (ofs >> cfi->chipshift) {
+ chipnum ++;
+ ofs = 0;
+ if (chipnum == cfi->numchips)
+ return 0;
+ }
+ }
+
+ if (len) {
+ size_t retlen_dregs = 0;
+
+ ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
+ len, &retlen_dregs, buf);
+
+ *retlen += retlen_dregs;
+ return ret;
+ }
+
+ return 0;
+}
+#endif /* !FORCE_WORD_WRITE */
+
+/*
+ * Wait for the flash chip to become ready to write data
+ *
+ * This is only called during the panic_write() path. When panic_write()
+ * is called, the kernel is in the process of a panic, and will soon be
+ * dead. Therefore we don't take any locks, and attempt to get access
+ * to the chip as soon as possible.
+ */
+static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
+ unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ int retries = 10;
+ int i;
+
+ /*
+ * If the driver thinks the chip is idle, and no toggle bits
+ * are changing, then the chip is actually idle for sure.
+ */
+ if (chip->state == FL_READY && chip_ready(map, chip, adr, NULL))
+ return 0;
+
+ /*
+ * Try several times to reset the chip and then wait for it
+ * to become idle. The upper limit of a few milliseconds of
+ * delay isn't a big problem: the kernel is dying anyway. It
+ * is more important to save the messages.
+ */
+ while (retries > 0) {
+ const unsigned long timeo = (HZ / 1000) + 1;
+
+ /* send the reset command */
+ map_write(map, CMD(0xF0), chip->start);
+
+ /* wait for the chip to become ready */
+ for (i = 0; i < jiffies_to_usecs(timeo); i++) {
+ if (chip_ready(map, chip, adr, NULL))
+ return 0;
+
+ udelay(1);
+ }
+
+ retries--;
+ }
+
+ /* the chip never became ready */
+ return -EBUSY;
+}
+
+/*
+ * Write out one word of data to a single flash chip during a kernel panic
+ *
+ * This is only called during the panic_write() path. When panic_write()
+ * is called, the kernel is in the process of a panic, and will soon be
+ * dead. Therefore we don't take any locks, and attempt to get access
+ * to the chip as soon as possible.
+ *
+ * The implementation of this routine is intentionally similar to
+ * do_write_oneword(), in order to ease code maintenance.
+ */
+static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
+ unsigned long adr, map_word datum)
+{
+ const unsigned long uWriteTimeout = (HZ / 1000) + 1;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int retry_cnt = 0;
+ map_word oldd;
+ int ret;
+ int i;
+
+ adr += chip->start;
+
+ ret = cfi_amdstd_panic_wait(map, chip, adr);
+ if (ret)
+ return ret;
+
+ pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
+ __func__, adr, datum.x[0]);
+
+ /*
+ * Check for a NOP for the case when the datum to write is already
+ * present - it saves time and works around buggy chips that corrupt
+ * data at other locations when 0xff is written to a location that
+ * already contains 0xff.
+ */
+ oldd = map_read(map, adr);
+ if (map_word_equal(map, oldd, datum)) {
+ pr_debug("MTD %s(): NOP\n", __func__);
+ goto op_done;
+ }
+
+ ENABLE_VPP(map);
+
+retry:
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ map_write(map, datum, adr);
+
+ for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
+ if (chip_ready(map, chip, adr, NULL))
+ break;
+
+ udelay(1);
+ }
+
+ if (!chip_ready(map, chip, adr, &datum) ||
+ cfi_check_err_status(map, chip, adr)) {
+ /* reset on all failures. */
+ map_write(map, CMD(0xF0), chip->start);
+ /* FIXME - should have reset delay before continuing */
+
+ if (++retry_cnt <= MAX_RETRIES)
+ goto retry;
+
+ ret = -EIO;
+ }
+
+op_done:
+ DISABLE_VPP(map);
+ return ret;
+}
+
+/*
+ * Write out some data during a kernel panic
+ *
+ * This is used by the mtdoops driver to save the dying messages from a
+ * kernel which has panic'd.
+ *
+ * This routine ignores all of the locking used throughout the rest of the
+ * driver, in order to ensure that the data gets written out no matter what
+ * state this driver (and the flash chip itself) was in when the kernel crashed.
+ *
+ * The implementation of this routine is intentionally similar to
+ * cfi_amdstd_write_words(), in order to ease code maintenance.
+ */
+static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long ofs, chipstart;
+ int ret;
+ int chipnum;
+
+ chipnum = to >> cfi->chipshift;
+ ofs = to - (chipnum << cfi->chipshift);
+ chipstart = cfi->chips[chipnum].start;
+
+ /* If it's not bus aligned, do the first byte write */
+ if (ofs & (map_bankwidth(map) - 1)) {
+ unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
+ int i = ofs - bus_ofs;
+ int n = 0;
+ map_word tmp_buf;
+
+ ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
+ if (ret)
+ return ret;
+
+ /* Load 'tmp_buf' with old contents of flash */
+ tmp_buf = map_read(map, bus_ofs + chipstart);
+
+ /* Number of bytes to copy from buffer */
+ n = min_t(int, len, map_bankwidth(map) - i);
+
+ tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
+
+ ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
+ bus_ofs, tmp_buf);
+ if (ret)
+ return ret;
+
+ ofs += n;
+ buf += n;
+ (*retlen) += n;
+ len -= n;
+
+ if (ofs >> cfi->chipshift) {
+ chipnum++;
+ ofs = 0;
+ if (chipnum == cfi->numchips)
+ return 0;
+ }
+ }
+
+ /* We are now aligned, write as much as possible */
+ while (len >= map_bankwidth(map)) {
+ map_word datum;
+
+ datum = map_word_load(map, buf);
+
+ ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
+ ofs, datum);
+ if (ret)
+ return ret;
+
+ ofs += map_bankwidth(map);
+ buf += map_bankwidth(map);
+ (*retlen) += map_bankwidth(map);
+ len -= map_bankwidth(map);
+
+ if (ofs >> cfi->chipshift) {
+ chipnum++;
+ ofs = 0;
+ if (chipnum == cfi->numchips)
+ return 0;
+
+ chipstart = cfi->chips[chipnum].start;
+ }
+ }
+
+ /* Write the trailing bytes if any */
+ if (len & (map_bankwidth(map) - 1)) {
+ map_word tmp_buf;
+
+ ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
+ if (ret)
+ return ret;
+
+ tmp_buf = map_read(map, ofs + chipstart);
+
+ tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
+
+ ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
+ ofs, tmp_buf);
+ if (ret)
+ return ret;
+
+ (*retlen) += len;
+ }
+
+ return 0;
+}
+
+
+/*
+ * Handle devices with one erase region, that only implement
+ * the chip erase command.
+ */
+static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long timeo = jiffies + HZ;
+ unsigned long int adr;
+ DECLARE_WAITQUEUE(wait, current);
+ int ret;
+ int retry_cnt = 0;
+ map_word datum = map_word_ff(map);
+
+ adr = cfi->addr_unlock1;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr, FL_ERASING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ pr_debug("MTD %s(): ERASE 0x%.8lx\n",
+ __func__, chip->start);
+
+ XIP_INVAL_CACHED_RANGE(map, adr, map->size);
+ ENABLE_VPP(map);
+ xip_disable(map, chip, adr);
+
+ retry:
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+
+ chip->state = FL_ERASING;
+ chip->erase_suspended = 0;
+ chip->in_progress_block_addr = adr;
+ chip->in_progress_block_mask = ~(map->size - 1);
+
+ INVALIDATE_CACHE_UDELAY(map, chip,
+ adr, map->size,
+ chip->erase_time*500);
+
+ timeo = jiffies + (HZ*20);
+
+ for (;;) {
+ if (chip->state != FL_ERASING) {
+ /* Someone's suspended the erase. Sleep */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ mutex_lock(&chip->mutex);
+ continue;
+ }
+ if (chip->erase_suspended) {
+ /* This erase was suspended and resumed.
+ Adjust the timeout */
+ timeo = jiffies + (HZ*20); /* FIXME */
+ chip->erase_suspended = 0;
+ }
+
+ if (chip_ready(map, chip, adr, &datum)) {
+ if (cfi_check_err_status(map, chip, adr))
+ ret = -EIO;
+ break;
+ }
+
+ if (time_after(jiffies, timeo)) {
+ printk(KERN_WARNING "MTD %s(): software timeout\n",
+ __func__);
+ ret = -EIO;
+ break;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ UDELAY(map, chip, adr, 1000000/HZ);
+ }
+ /* Did we succeed? */
+ if (ret) {
+ /* reset on all failures. */
+ map_write(map, CMD(0xF0), chip->start);
+ /* FIXME - should have reset delay before continuing */
+
+ if (++retry_cnt <= MAX_RETRIES) {
+ ret = 0;
+ goto retry;
+ }
+ }
+
+ chip->state = FL_READY;
+ xip_enable(map, chip, adr);
+ DISABLE_VPP(map);
+ put_chip(map, chip, adr);
+ mutex_unlock(&chip->mutex);
+
+ return ret;
+}
+
+
+static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long timeo = jiffies + HZ;
+ DECLARE_WAITQUEUE(wait, current);
+ int ret;
+ int retry_cnt = 0;
+ map_word datum = map_word_ff(map);
+
+ adr += chip->start;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr, FL_ERASING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ pr_debug("MTD %s(): ERASE 0x%.8lx\n",
+ __func__, adr);
+
+ XIP_INVAL_CACHED_RANGE(map, adr, len);
+ ENABLE_VPP(map);
+ xip_disable(map, chip, adr);
+
+ retry:
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
+ map_write(map, cfi->sector_erase_cmd, adr);
+
+ chip->state = FL_ERASING;
+ chip->erase_suspended = 0;
+ chip->in_progress_block_addr = adr;
+ chip->in_progress_block_mask = ~(len - 1);
+
+ INVALIDATE_CACHE_UDELAY(map, chip,
+ adr, len,
+ chip->erase_time*500);
+
+ timeo = jiffies + (HZ*20);
+
+ for (;;) {
+ if (chip->state != FL_ERASING) {
+ /* Someone's suspended the erase. Sleep */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ mutex_lock(&chip->mutex);
+ continue;
+ }
+ if (chip->erase_suspended) {
+ /* This erase was suspended and resumed.
+ Adjust the timeout */
+ timeo = jiffies + (HZ*20); /* FIXME */
+ chip->erase_suspended = 0;
+ }
+
+ if (chip_ready(map, chip, adr, &datum)) {
+ if (cfi_check_err_status(map, chip, adr))
+ ret = -EIO;
+ break;
+ }
+
+ if (time_after(jiffies, timeo)) {
+ printk(KERN_WARNING "MTD %s(): software timeout\n",
+ __func__);
+ ret = -EIO;
+ break;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ UDELAY(map, chip, adr, 1000000/HZ);
+ }
+ /* Did we succeed? */
+ if (ret) {
+ /* reset on all failures. */
+ map_write(map, CMD(0xF0), chip->start);
+ /* FIXME - should have reset delay before continuing */
+
+ if (++retry_cnt <= MAX_RETRIES) {
+ ret = 0;
+ goto retry;
+ }
+ }
+
+ chip->state = FL_READY;
+ xip_enable(map, chip, adr);
+ DISABLE_VPP(map);
+ put_chip(map, chip, adr);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+
+static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
+{
+ return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
+ instr->len, NULL);
+}
+
+
+static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ if (instr->addr != 0)
+ return -EINVAL;
+
+ if (instr->len != mtd->size)
+ return -EINVAL;
+
+ return do_erase_chip(map, &cfi->chips[0]);
+}
+
+static int do_atmel_lock(struct map_info *map, struct flchip *chip,
+ unsigned long adr, int len, void *thunk)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
+ if (ret)
+ goto out_unlock;
+ chip->state = FL_LOCKING;
+
+ pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
+
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ map_write(map, CMD(0x40), chip->start + adr);
+
+ chip->state = FL_READY;
+ put_chip(map, chip, adr + chip->start);
+ ret = 0;
+
+out_unlock:
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
+ unsigned long adr, int len, void *thunk)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
+ if (ret)
+ goto out_unlock;
+ chip->state = FL_UNLOCKING;
+
+ pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
+
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ map_write(map, CMD(0x70), adr);
+
+ chip->state = FL_READY;
+ put_chip(map, chip, adr + chip->start);
+ ret = 0;
+
+out_unlock:
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
+}
+
+static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
+}
+
+/*
+ * Advanced Sector Protection - PPB (Persistent Protection Bit) locking
+ */
+
+struct ppb_lock {
+ struct flchip *chip;
+ unsigned long adr;
+ int locked;
+};
+
+#define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1)
+#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2)
+#define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3)
+
+static int __maybe_unused do_ppb_xxlock(struct map_info *map,
+ struct flchip *chip,
+ unsigned long adr, int len, void *thunk)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long timeo;
+ int ret;
+
+ adr += chip->start;
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr, FL_LOCKING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
+
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ /* PPB entry command */
+ cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+
+ if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
+ chip->state = FL_LOCKING;
+ map_write(map, CMD(0xA0), adr);
+ map_write(map, CMD(0x00), adr);
+ } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
+ /*
+ * Unlocking of one specific sector is not supported, so we
+ * have to unlock all sectors of this device instead
+ */
+ chip->state = FL_UNLOCKING;
+ map_write(map, CMD(0x80), chip->start);
+ map_write(map, CMD(0x30), chip->start);
+ } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
+ chip->state = FL_JEDEC_QUERY;
+ /* Return locked status: 0->locked, 1->unlocked */
+ ret = !cfi_read_query(map, adr);
+ } else
+ BUG();
+
+ /*
+ * Wait for some time as unlocking of all sectors takes quite long
+ */
+ timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */
+ for (;;) {
+ if (chip_ready(map, chip, adr, NULL))
+ break;
+
+ if (time_after(jiffies, timeo)) {
+ printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
+ ret = -EIO;
+ break;
+ }
+
+ UDELAY(map, chip, adr, 1);
+ }
+
+ /* Exit BC commands */
+ map_write(map, CMD(0x90), chip->start);
+ map_write(map, CMD(0x00), chip->start);
+
+ chip->state = FL_READY;
+ put_chip(map, chip, adr);
+ mutex_unlock(&chip->mutex);
+
+ return ret;
+}
+
+static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len)
+{
+ return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
+ DO_XXLOCK_ONEBLOCK_LOCK);
+}
+
+static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len)
+{
+ struct mtd_erase_region_info *regions = mtd->eraseregions;
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct ppb_lock *sect;
+ unsigned long adr;
+ loff_t offset;
+ uint64_t length;
+ int chipnum;
+ int i;
+ int sectors;
+ int ret;
+ int max_sectors;
+
+ /*
+ * PPB unlocking always unlocks all sectors of the flash chip.
+ * We need to re-lock all previously locked sectors. So lets
+ * first check the locking status of all sectors and save
+ * it for future use.
+ */
+ max_sectors = 0;
+ for (i = 0; i < mtd->numeraseregions; i++)
+ max_sectors += regions[i].numblocks;
+
+ sect = kcalloc(max_sectors, sizeof(struct ppb_lock), GFP_KERNEL);
+ if (!sect)
+ return -ENOMEM;
+
+ /*
+ * This code to walk all sectors is a slightly modified version
+ * of the cfi_varsize_frob() code.
+ */
+ i = 0;
+ chipnum = 0;
+ adr = 0;
+ sectors = 0;
+ offset = 0;
+ length = mtd->size;
+
+ while (length) {
+ int size = regions[i].erasesize;
+
+ /*
+ * Only test sectors that shall not be unlocked. The other
+ * sectors shall be unlocked, so lets keep their locking
+ * status at "unlocked" (locked=0) for the final re-locking.
+ */
+ if ((offset < ofs) || (offset >= (ofs + len))) {
+ sect[sectors].chip = &cfi->chips[chipnum];
+ sect[sectors].adr = adr;
+ sect[sectors].locked = do_ppb_xxlock(
+ map, &cfi->chips[chipnum], adr, 0,
+ DO_XXLOCK_ONEBLOCK_GETLOCK);
+ }
+
+ adr += size;
+ offset += size;
+ length -= size;
+
+ if (offset == regions[i].offset + size * regions[i].numblocks)
+ i++;
+
+ if (adr >> cfi->chipshift) {
+ if (offset >= (ofs + len))
+ break;
+ adr = 0;
+ chipnum++;
+
+ if (chipnum >= cfi->numchips)
+ break;
+ }
+
+ sectors++;
+ if (sectors >= max_sectors) {
+ printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
+ max_sectors);
+ kfree(sect);
+ return -EINVAL;
+ }
+ }
+
+ /* Now unlock the whole chip */
+ ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
+ DO_XXLOCK_ONEBLOCK_UNLOCK);
+ if (ret) {
+ kfree(sect);
+ return ret;
+ }
+
+ /*
+ * PPB unlocking always unlocks all sectors of the flash chip.
+ * We need to re-lock all previously locked sectors.
+ */
+ for (i = 0; i < sectors; i++) {
+ if (sect[i].locked)
+ do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
+ DO_XXLOCK_ONEBLOCK_LOCK);
+ }
+
+ kfree(sect);
+ return ret;
+}
+
+static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len)
+{
+ return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
+ DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
+}
+
+static void cfi_amdstd_sync (struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i;
+ struct flchip *chip;
+ int ret = 0;
+ DECLARE_WAITQUEUE(wait, current);
+
+ for (i=0; !ret && i<cfi->numchips; i++) {
+ chip = &cfi->chips[i];
+
+ retry:
+ mutex_lock(&chip->mutex);
+
+ switch(chip->state) {
+ case FL_READY:
+ case FL_STATUS:
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ chip->oldstate = chip->state;
+ chip->state = FL_SYNCING;
+ /* No need to wake_up() on this state change -
+ * as the whole point is that nobody can do anything
+ * with the chip now anyway.
+ */
+ fallthrough;
+ case FL_SYNCING:
+ mutex_unlock(&chip->mutex);
+ break;
+
+ default:
+ /* Not an idle state */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+
+ mutex_unlock(&chip->mutex);
+
+ schedule();
+
+ remove_wait_queue(&chip->wq, &wait);
+
+ goto retry;
+ }
+ }
+
+ /* Unlock the chips again */
+
+ for (i--; i >=0; i--) {
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ if (chip->state == FL_SYNCING) {
+ chip->state = chip->oldstate;
+ wake_up(&chip->wq);
+ }
+ mutex_unlock(&chip->mutex);
+ }
+}
+
+
+static int cfi_amdstd_suspend(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i;
+ struct flchip *chip;
+ int ret = 0;
+
+ for (i=0; !ret && i<cfi->numchips; i++) {
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ switch(chip->state) {
+ case FL_READY:
+ case FL_STATUS:
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ chip->oldstate = chip->state;
+ chip->state = FL_PM_SUSPENDED;
+ /* No need to wake_up() on this state change -
+ * as the whole point is that nobody can do anything
+ * with the chip now anyway.
+ */
+ break;
+ case FL_PM_SUSPENDED:
+ break;
+
+ default:
+ ret = -EAGAIN;
+ break;
+ }
+ mutex_unlock(&chip->mutex);
+ }
+
+ /* Unlock the chips again */
+
+ if (ret) {
+ for (i--; i >=0; i--) {
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ if (chip->state == FL_PM_SUSPENDED) {
+ chip->state = chip->oldstate;
+ wake_up(&chip->wq);
+ }
+ mutex_unlock(&chip->mutex);
+ }
+ }
+
+ return ret;
+}
+
+
+static void cfi_amdstd_resume(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i;
+ struct flchip *chip;
+
+ for (i=0; i<cfi->numchips; i++) {
+
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ if (chip->state == FL_PM_SUSPENDED) {
+ chip->state = FL_READY;
+ map_write(map, CMD(0xF0), chip->start);
+ wake_up(&chip->wq);
+ }
+ else
+ printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
+
+ mutex_unlock(&chip->mutex);
+ }
+}
+
+
+/*
+ * Ensure that the flash device is put back into read array mode before
+ * unloading the driver or rebooting. On some systems, rebooting while
+ * the flash is in query/program/erase mode will prevent the CPU from
+ * fetching the bootloader code, requiring a hard reset or power cycle.
+ */
+static int cfi_amdstd_reset(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i, ret;
+ struct flchip *chip;
+
+ for (i = 0; i < cfi->numchips; i++) {
+
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
+ if (!ret) {
+ map_write(map, CMD(0xF0), chip->start);
+ chip->state = FL_SHUTDOWN;
+ put_chip(map, chip, chip->start);
+ }
+
+ mutex_unlock(&chip->mutex);
+ }
+
+ return 0;
+}
+
+
+static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
+ void *v)
+{
+ struct mtd_info *mtd;
+
+ mtd = container_of(nb, struct mtd_info, reboot_notifier);
+ cfi_amdstd_reset(mtd);
+ return NOTIFY_DONE;
+}
+
+
+static void cfi_amdstd_destroy(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ cfi_amdstd_reset(mtd);
+ unregister_reboot_notifier(&mtd->reboot_notifier);
+ kfree(cfi->cmdset_priv);
+ kfree(cfi->cfiq);
+ kfree(cfi);
+ kfree(mtd->eraseregions);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
+MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
+MODULE_ALIAS("cfi_cmdset_0006");
+MODULE_ALIAS("cfi_cmdset_0701");
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
new file mode 100644
index 0000000000..60c7f6f751
--- /dev/null
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -0,0 +1,1402 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common Flash Interface support:
+ * ST Advanced Architecture Command Set (ID 0x0020)
+ *
+ * (C) 2000 Red Hat.
+ *
+ * 10/10/2000 Nicolas Pitre <nico@fluxnic.net>
+ * - completely revamped method functions so they are aware and
+ * independent of the flash geometry (buswidth, interleave, etc.)
+ * - scalability vs code size is completely set at compile-time
+ * (see include/linux/mtd/cfi.h for selection)
+ * - optimized write buffer method
+ * 06/21/2002 Joern Engel <joern@wh.fh-wedel.de> and others
+ * - modified Intel Command Set 0x0001 to support ST Advanced Architecture
+ * (command set 0x0020)
+ * - added a writev function
+ * 07/13/2005 Joern Engel <joern@wh.fh-wedel.de>
+ * - Plugged memory leak in cfi_staa_writev().
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/mtd.h>
+
+
+static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen);
+static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
+static void cfi_staa_sync (struct mtd_info *);
+static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_staa_suspend (struct mtd_info *);
+static void cfi_staa_resume (struct mtd_info *);
+
+static void cfi_staa_destroy(struct mtd_info *);
+
+struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
+
+static struct mtd_info *cfi_staa_setup (struct map_info *);
+
+static struct mtd_chip_driver cfi_staa_chipdrv = {
+ .probe = NULL, /* Not usable directly */
+ .destroy = cfi_staa_destroy,
+ .name = "cfi_cmdset_0020",
+ .module = THIS_MODULE
+};
+
+/* #define DEBUG_LOCK_BITS */
+//#define DEBUG_CFI_FEATURES
+
+#ifdef DEBUG_CFI_FEATURES
+static void cfi_tell_features(struct cfi_pri_intelext *extp)
+{
+ int i;
+ printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
+ printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
+ printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
+ printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
+ printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
+ printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
+ printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
+ printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
+ printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
+ printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
+ for (i=9; i<32; i++) {
+ if (extp->FeatureSupport & (1<<i))
+ printk(" - Unknown Bit %X: supported\n", i);
+ }
+
+ printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
+ printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
+ for (i=1; i<8; i++) {
+ if (extp->SuspendCmdSupport & (1<<i))
+ printk(" - Unknown Bit %X: supported\n", i);
+ }
+
+ printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
+ printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
+ printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
+ for (i=2; i<16; i++) {
+ if (extp->BlkStatusRegMask & (1<<i))
+ printk(" - Unknown Bit %X Active: yes\n",i);
+ }
+
+ printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
+ extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
+ if (extp->VppOptimal)
+ printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
+ extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
+}
+#endif
+
+/* This routine is made available to other mtd code via
+ * inter_module_register. It must only be accessed through
+ * inter_module_get which will bump the use count of this module. The
+ * addresses passed back in cfi are valid as long as the use count of
+ * this module is non-zero, i.e. between inter_module_get and
+ * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
+ */
+struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i;
+
+ if (cfi->cfi_mode) {
+ /*
+ * It's a real CFI chip, not one for which the probe
+ * routine faked a CFI structure. So we read the feature
+ * table from it.
+ */
+ __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
+ struct cfi_pri_intelext *extp;
+
+ extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
+ if (!extp)
+ return NULL;
+
+ if (extp->MajorVersion != '1' ||
+ (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
+ printk(KERN_ERR " Unknown ST Microelectronics"
+ " Extended Query version %c.%c.\n",
+ extp->MajorVersion, extp->MinorVersion);
+ kfree(extp);
+ return NULL;
+ }
+
+ /* Do some byteswapping if necessary */
+ extp->FeatureSupport = cfi32_to_cpu(map, extp->FeatureSupport);
+ extp->BlkStatusRegMask = cfi32_to_cpu(map,
+ extp->BlkStatusRegMask);
+
+#ifdef DEBUG_CFI_FEATURES
+ /* Tell the user about it in lots of lovely detail */
+ cfi_tell_features(extp);
+#endif
+
+ /* Install our own private info structure */
+ cfi->cmdset_priv = extp;
+ }
+
+ for (i=0; i< cfi->numchips; i++) {
+ cfi->chips[i].word_write_time = 128;
+ cfi->chips[i].buffer_write_time = 128;
+ cfi->chips[i].erase_time = 1024;
+ cfi->chips[i].ref_point_counter = 0;
+ init_waitqueue_head(&(cfi->chips[i].wq));
+ }
+
+ return cfi_staa_setup(map);
+}
+EXPORT_SYMBOL_GPL(cfi_cmdset_0020);
+
+static struct mtd_info *cfi_staa_setup(struct map_info *map)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct mtd_info *mtd;
+ unsigned long offset = 0;
+ int i,j;
+ unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
+
+ mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
+ //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
+
+ if (!mtd) {
+ kfree(cfi->cmdset_priv);
+ return NULL;
+ }
+
+ mtd->priv = map;
+ mtd->type = MTD_NORFLASH;
+ mtd->size = devsize * cfi->numchips;
+
+ mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
+ mtd->eraseregions = kmalloc_array(mtd->numeraseregions,
+ sizeof(struct mtd_erase_region_info),
+ GFP_KERNEL);
+ if (!mtd->eraseregions) {
+ kfree(cfi->cmdset_priv);
+ kfree(mtd);
+ return NULL;
+ }
+
+ for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
+ unsigned long ernum, ersize;
+ ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
+ ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
+
+ if (mtd->erasesize < ersize) {
+ mtd->erasesize = ersize;
+ }
+ for (j=0; j<cfi->numchips; j++) {
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
+ }
+ offset += (ersize * ernum);
+ }
+
+ if (offset != devsize) {
+ /* Argh */
+ printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
+ kfree(mtd->eraseregions);
+ kfree(cfi->cmdset_priv);
+ kfree(mtd);
+ return NULL;
+ }
+
+ for (i=0; i<mtd->numeraseregions;i++){
+ printk(KERN_DEBUG "%d: offset=0x%llx,size=0x%x,blocks=%d\n",
+ i, (unsigned long long)mtd->eraseregions[i].offset,
+ mtd->eraseregions[i].erasesize,
+ mtd->eraseregions[i].numblocks);
+ }
+
+ /* Also select the correct geometry setup too */
+ mtd->_erase = cfi_staa_erase_varsize;
+ mtd->_read = cfi_staa_read;
+ mtd->_write = cfi_staa_write_buffers;
+ mtd->_writev = cfi_staa_writev;
+ mtd->_sync = cfi_staa_sync;
+ mtd->_lock = cfi_staa_lock;
+ mtd->_unlock = cfi_staa_unlock;
+ mtd->_suspend = cfi_staa_suspend;
+ mtd->_resume = cfi_staa_resume;
+ mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
+ mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
+ mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
+ map->fldrv = &cfi_staa_chipdrv;
+ __module_get(THIS_MODULE);
+ mtd->name = map->name;
+ return mtd;
+}
+
+
+static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
+{
+ map_word status, status_OK;
+ unsigned long timeo;
+ DECLARE_WAITQUEUE(wait, current);
+ int suspended = 0;
+ unsigned long cmd_addr;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ adr += chip->start;
+
+ /* Ensure cmd read/writes are aligned. */
+ cmd_addr = adr & ~(map_bankwidth(map)-1);
+
+ /* Let's determine this according to the interleave only once */
+ status_OK = CMD(0x80);
+
+ timeo = jiffies + HZ;
+ retry:
+ mutex_lock(&chip->mutex);
+
+ /* Check that the chip's ready to talk to us.
+ * If it's in FL_ERASING state, suspend it and make it talk now.
+ */
+ switch (chip->state) {
+ case FL_ERASING:
+ if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
+ goto sleep; /* We don't support erase suspend */
+
+ map_write (map, CMD(0xb0), cmd_addr);
+ /* If the flash has finished erasing, then 'erase suspend'
+ * appears to make some (28F320) flash devices switch to
+ * 'read' mode. Make sure that we switch to 'read status'
+ * mode so we get the right data. --rmk
+ */
+ map_write(map, CMD(0x70), cmd_addr);
+ chip->oldstate = FL_ERASING;
+ chip->state = FL_ERASE_SUSPENDING;
+ // printk("Erase suspending at 0x%lx\n", cmd_addr);
+ for (;;) {
+ status = map_read(map, cmd_addr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ if (time_after(jiffies, timeo)) {
+ /* Urgh */
+ map_write(map, CMD(0xd0), cmd_addr);
+ /* make sure we're in 'read status' mode */
+ map_write(map, CMD(0x70), cmd_addr);
+ chip->state = FL_ERASING;
+ wake_up(&chip->wq);
+ mutex_unlock(&chip->mutex);
+ printk(KERN_ERR "Chip not ready after erase "
+ "suspended: status = 0x%lx\n", status.x[0]);
+ return -EIO;
+ }
+
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ mutex_lock(&chip->mutex);
+ }
+
+ suspended = 1;
+ map_write(map, CMD(0xff), cmd_addr);
+ chip->state = FL_READY;
+ break;
+
+#if 0
+ case FL_WRITING:
+ /* Not quite yet */
+#endif
+
+ case FL_READY:
+ break;
+
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ map_write(map, CMD(0x70), cmd_addr);
+ chip->state = FL_STATUS;
+ fallthrough;
+ case FL_STATUS:
+ status = map_read(map, cmd_addr);
+ if (map_word_andequal(map, status, status_OK, status_OK)) {
+ map_write(map, CMD(0xff), cmd_addr);
+ chip->state = FL_READY;
+ break;
+ }
+
+ /* Urgh. Chip not yet ready to talk to us. */
+ if (time_after(jiffies, timeo)) {
+ mutex_unlock(&chip->mutex);
+ printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ goto retry;
+
+ default:
+ sleep:
+ /* Stick ourselves on a wait queue to be woken when
+ someone changes the status */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + HZ;
+ goto retry;
+ }
+
+ map_copy_from(map, buf, adr, len);
+
+ if (suspended) {
+ chip->state = chip->oldstate;
+ /* What if one interleaved chip has finished and the
+ other hasn't? The old code would leave the finished
+ one in READY mode. That's bad, and caused -EROFS
+ errors to be returned from do_erase_oneblock because
+ that's the only bit it checked for at the time.
+ As the state machine appears to explicitly allow
+ sending the 0x70 (Read Status) command to an erasing
+ chip and expecting it to be ignored, that's what we
+ do. */
+ map_write(map, CMD(0xd0), cmd_addr);
+ map_write(map, CMD(0x70), cmd_addr);
+ }
+
+ wake_up(&chip->wq);
+ mutex_unlock(&chip->mutex);
+ return 0;
+}
+
+static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long ofs;
+ int chipnum;
+ int ret = 0;
+
+ /* ofs: offset within the first chip that the first read should start */
+ chipnum = (from >> cfi->chipshift);
+ ofs = from - (chipnum << cfi->chipshift);
+
+ while (len) {
+ unsigned long thislen;
+
+ if (chipnum >= cfi->numchips)
+ break;
+
+ if ((len + ofs -1) >> cfi->chipshift)
+ thislen = (1<<cfi->chipshift) - ofs;
+ else
+ thislen = len;
+
+ ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
+ if (ret)
+ break;
+
+ *retlen += thislen;
+ len -= thislen;
+ buf += thislen;
+
+ ofs = 0;
+ chipnum++;
+ }
+ return ret;
+}
+
+static int do_write_buffer(struct map_info *map, struct flchip *chip,
+ unsigned long adr, const u_char *buf, int len)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word status, status_OK;
+ unsigned long cmd_adr, timeo;
+ DECLARE_WAITQUEUE(wait, current);
+ int wbufsize, z;
+
+ /* M58LW064A requires bus alignment for buffer wriets -- saw */
+ if (adr & (map_bankwidth(map)-1))
+ return -EINVAL;
+
+ wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
+ adr += chip->start;
+ cmd_adr = adr & ~(wbufsize-1);
+
+ /* Let's determine this according to the interleave only once */
+ status_OK = CMD(0x80);
+
+ timeo = jiffies + HZ;
+ retry:
+
+#ifdef DEBUG_CFI_FEATURES
+ printk("%s: chip->state[%d]\n", __func__, chip->state);
+#endif
+ mutex_lock(&chip->mutex);
+
+ /* Check that the chip's ready to talk to us.
+ * Later, we can actually think about interrupting it
+ * if it's in FL_ERASING state.
+ * Not just yet, though.
+ */
+ switch (chip->state) {
+ case FL_READY:
+ break;
+
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ map_write(map, CMD(0x70), cmd_adr);
+ chip->state = FL_STATUS;
+#ifdef DEBUG_CFI_FEATURES
+ printk("%s: 1 status[%x]\n", __func__, map_read(map, cmd_adr));
+#endif
+ fallthrough;
+ case FL_STATUS:
+ status = map_read(map, cmd_adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+ /* Urgh. Chip not yet ready to talk to us. */
+ if (time_after(jiffies, timeo)) {
+ mutex_unlock(&chip->mutex);
+ printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
+ status.x[0], map_read(map, cmd_adr).x[0]);
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ goto retry;
+
+ default:
+ /* Stick ourselves on a wait queue to be woken when
+ someone changes the status */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + HZ;
+ goto retry;
+ }
+
+ ENABLE_VPP(map);
+ map_write(map, CMD(0xe8), cmd_adr);
+ chip->state = FL_WRITING_TO_BUFFER;
+
+ z = 0;
+ for (;;) {
+ status = map_read(map, cmd_adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ mutex_lock(&chip->mutex);
+
+ if (++z > 100) {
+ /* Argh. Not ready for write to buffer */
+ DISABLE_VPP(map);
+ map_write(map, CMD(0x70), cmd_adr);
+ chip->state = FL_STATUS;
+ mutex_unlock(&chip->mutex);
+ printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
+ return -EIO;
+ }
+ }
+
+ /* Write length of data to come */
+ map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
+
+ /* Write data */
+ for (z = 0; z < len;
+ z += map_bankwidth(map), buf += map_bankwidth(map)) {
+ map_word d;
+ d = map_word_load(map, buf);
+ map_write(map, d, adr+z);
+ }
+ /* GO GO GO */
+ map_write(map, CMD(0xd0), cmd_adr);
+ chip->state = FL_WRITING;
+
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(chip->buffer_write_time);
+ mutex_lock(&chip->mutex);
+
+ timeo = jiffies + (HZ/2);
+ z = 0;
+ for (;;) {
+ if (chip->state != FL_WRITING) {
+ /* Someone's suspended the write. Sleep */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + (HZ / 2); /* FIXME */
+ mutex_lock(&chip->mutex);
+ continue;
+ }
+
+ status = map_read(map, cmd_adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ /* OK Still waiting */
+ if (time_after(jiffies, timeo)) {
+ /* clear status */
+ map_write(map, CMD(0x50), cmd_adr);
+ /* put back into read status register mode */
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+ DISABLE_VPP(map);
+ mutex_unlock(&chip->mutex);
+ printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ z++;
+ mutex_lock(&chip->mutex);
+ }
+ if (!z) {
+ chip->buffer_write_time--;
+ if (!chip->buffer_write_time)
+ chip->buffer_write_time++;
+ }
+ if (z > 1)
+ chip->buffer_write_time++;
+
+ /* Done and happy. */
+ DISABLE_VPP(map);
+ chip->state = FL_STATUS;
+
+ /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
+ if (map_word_bitsset(map, status, CMD(0x3a))) {
+#ifdef DEBUG_CFI_FEATURES
+ printk("%s: 2 status[%lx]\n", __func__, status.x[0]);
+#endif
+ /* clear status */
+ map_write(map, CMD(0x50), cmd_adr);
+ /* put back into read status register mode */
+ map_write(map, CMD(0x70), adr);
+ wake_up(&chip->wq);
+ mutex_unlock(&chip->mutex);
+ return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
+ }
+ wake_up(&chip->wq);
+ mutex_unlock(&chip->mutex);
+
+ return 0;
+}
+
+static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
+ size_t len, size_t *retlen, const u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
+ int ret;
+ int chipnum;
+ unsigned long ofs;
+
+ chipnum = to >> cfi->chipshift;
+ ofs = to - (chipnum << cfi->chipshift);
+
+#ifdef DEBUG_CFI_FEATURES
+ printk("%s: map_bankwidth(map)[%x]\n", __func__, map_bankwidth(map));
+ printk("%s: chipnum[%x] wbufsize[%x]\n", __func__, chipnum, wbufsize);
+ printk("%s: ofs[%x] len[%x]\n", __func__, ofs, len);
+#endif
+
+ /* Write buffer is worth it only if more than one word to write... */
+ while (len > 0) {
+ /* We must not cross write block boundaries */
+ int size = wbufsize - (ofs & (wbufsize-1));
+
+ if (size > len)
+ size = len;
+
+ ret = do_write_buffer(map, &cfi->chips[chipnum],
+ ofs, buf, size);
+ if (ret)
+ return ret;
+
+ ofs += size;
+ buf += size;
+ (*retlen) += size;
+ len -= size;
+
+ if (ofs >> cfi->chipshift) {
+ chipnum ++;
+ ofs = 0;
+ if (chipnum == cfi->numchips)
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Writev for ECC-Flashes is a little more complicated. We need to maintain
+ * a small buffer for this.
+ * XXX: If the buffer size is not a multiple of 2, this will break
+ */
+#define ECCBUF_SIZE (mtd->writesize)
+#define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
+#define ECCBUF_MOD(x) ((x) & (ECCBUF_SIZE - 1))
+static int
+cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
+{
+ unsigned long i;
+ size_t totlen = 0, thislen;
+ int ret = 0;
+ size_t buflen = 0;
+ char *buffer;
+
+ if (!ECCBUF_SIZE) {
+ /* We should fall back to a general writev implementation.
+ * Until that is written, just break.
+ */
+ return -EIO;
+ }
+ buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ for (i=0; i<count; i++) {
+ size_t elem_len = vecs[i].iov_len;
+ void *elem_base = vecs[i].iov_base;
+ if (!elem_len) /* FIXME: Might be unnecessary. Check that */
+ continue;
+ if (buflen) { /* cut off head */
+ if (buflen + elem_len < ECCBUF_SIZE) { /* just accumulate */
+ memcpy(buffer+buflen, elem_base, elem_len);
+ buflen += elem_len;
+ continue;
+ }
+ memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
+ ret = mtd_write(mtd, to, ECCBUF_SIZE, &thislen,
+ buffer);
+ totlen += thislen;
+ if (ret || thislen != ECCBUF_SIZE)
+ goto write_error;
+ elem_len -= thislen-buflen;
+ elem_base += thislen-buflen;
+ to += ECCBUF_SIZE;
+ }
+ if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
+ ret = mtd_write(mtd, to, ECCBUF_DIV(elem_len),
+ &thislen, elem_base);
+ totlen += thislen;
+ if (ret || thislen != ECCBUF_DIV(elem_len))
+ goto write_error;
+ to += thislen;
+ }
+ buflen = ECCBUF_MOD(elem_len); /* cut off tail */
+ if (buflen) {
+ memset(buffer, 0xff, ECCBUF_SIZE);
+ memcpy(buffer, elem_base + thislen, buflen);
+ }
+ }
+ if (buflen) { /* flush last page, even if not full */
+ /* This is sometimes intended behaviour, really */
+ ret = mtd_write(mtd, to, buflen, &thislen, buffer);
+ totlen += thislen;
+ if (ret || thislen != ECCBUF_SIZE)
+ goto write_error;
+ }
+write_error:
+ if (retlen)
+ *retlen = totlen;
+ kfree(buffer);
+ return ret;
+}
+
+
+static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word status, status_OK;
+ unsigned long timeo;
+ int retries = 3;
+ DECLARE_WAITQUEUE(wait, current);
+ int ret = 0;
+
+ adr += chip->start;
+
+ /* Let's determine this according to the interleave only once */
+ status_OK = CMD(0x80);
+
+ timeo = jiffies + HZ;
+retry:
+ mutex_lock(&chip->mutex);
+
+ /* Check that the chip's ready to talk to us. */
+ switch (chip->state) {
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ case FL_READY:
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+ fallthrough;
+ case FL_STATUS:
+ status = map_read(map, adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ /* Urgh. Chip not yet ready to talk to us. */
+ if (time_after(jiffies, timeo)) {
+ mutex_unlock(&chip->mutex);
+ printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ goto retry;
+
+ default:
+ /* Stick ourselves on a wait queue to be woken when
+ someone changes the status */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + HZ;
+ goto retry;
+ }
+
+ ENABLE_VPP(map);
+ /* Clear the status register first */
+ map_write(map, CMD(0x50), adr);
+
+ /* Now erase */
+ map_write(map, CMD(0x20), adr);
+ map_write(map, CMD(0xD0), adr);
+ chip->state = FL_ERASING;
+
+ mutex_unlock(&chip->mutex);
+ msleep(1000);
+ mutex_lock(&chip->mutex);
+
+ /* FIXME. Use a timer to check this, and return immediately. */
+ /* Once the state machine's known to be working I'll do that */
+
+ timeo = jiffies + (HZ*20);
+ for (;;) {
+ if (chip->state != FL_ERASING) {
+ /* Someone's suspended the erase. Sleep */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + (HZ*20); /* FIXME */
+ mutex_lock(&chip->mutex);
+ continue;
+ }
+
+ status = map_read(map, adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ /* OK Still waiting */
+ if (time_after(jiffies, timeo)) {
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+ printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
+ DISABLE_VPP(map);
+ mutex_unlock(&chip->mutex);
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ mutex_lock(&chip->mutex);
+ }
+
+ DISABLE_VPP(map);
+ ret = 0;
+
+ /* We've broken this before. It doesn't hurt to be safe */
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+ status = map_read(map, adr);
+
+ /* check for lock bit */
+ if (map_word_bitsset(map, status, CMD(0x3a))) {
+ unsigned char chipstatus = status.x[0];
+ if (!map_word_equal(map, status, CMD(chipstatus))) {
+ int i, w;
+ for (w=0; w<map_words(map); w++) {
+ for (i = 0; i<cfi_interleave(cfi); i++) {
+ chipstatus |= status.x[w] >> (cfi->device_type * 8);
+ }
+ }
+ printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
+ status.x[0], chipstatus);
+ }
+ /* Reset the error bits */
+ map_write(map, CMD(0x50), adr);
+ map_write(map, CMD(0x70), adr);
+
+ if ((chipstatus & 0x30) == 0x30) {
+ printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
+ ret = -EIO;
+ } else if (chipstatus & 0x02) {
+ /* Protection bit set */
+ ret = -EROFS;
+ } else if (chipstatus & 0x8) {
+ /* Voltage */
+ printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
+ ret = -EIO;
+ } else if (chipstatus & 0x20) {
+ if (retries--) {
+ printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
+ timeo = jiffies + HZ;
+ chip->state = FL_STATUS;
+ mutex_unlock(&chip->mutex);
+ goto retry;
+ }
+ printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
+ ret = -EIO;
+ }
+ }
+
+ wake_up(&chip->wq);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int cfi_staa_erase_varsize(struct mtd_info *mtd,
+ struct erase_info *instr)
+{ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long adr, len;
+ int chipnum, ret;
+ int i, first;
+ struct mtd_erase_region_info *regions = mtd->eraseregions;
+
+ /* Check that both start and end of the requested erase are
+ * aligned with the erasesize at the appropriate addresses.
+ */
+
+ i = 0;
+
+ /* Skip all erase regions which are ended before the start of
+ the requested erase. Actually, to save on the calculations,
+ we skip to the first erase region which starts after the
+ start of the requested erase, and then go back one.
+ */
+
+ while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
+ i++;
+ i--;
+
+ /* OK, now i is pointing at the erase region in which this
+ erase request starts. Check the start of the requested
+ erase range is aligned with the erase size which is in
+ effect here.
+ */
+
+ if (instr->addr & (regions[i].erasesize-1))
+ return -EINVAL;
+
+ /* Remember the erase region we start on */
+ first = i;
+
+ /* Next, check that the end of the requested erase is aligned
+ * with the erase region at that address.
+ */
+
+ while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
+ i++;
+
+ /* As before, drop back one to point at the region in which
+ the address actually falls
+ */
+ i--;
+
+ if ((instr->addr + instr->len) & (regions[i].erasesize-1))
+ return -EINVAL;
+
+ chipnum = instr->addr >> cfi->chipshift;
+ adr = instr->addr - (chipnum << cfi->chipshift);
+ len = instr->len;
+
+ i=first;
+
+ while(len) {
+ ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
+
+ if (ret)
+ return ret;
+
+ adr += regions[i].erasesize;
+ len -= regions[i].erasesize;
+
+ if (adr % (1<< cfi->chipshift) == (((unsigned long)regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
+ i++;
+
+ if (adr >> cfi->chipshift) {
+ adr = 0;
+ chipnum++;
+
+ if (chipnum >= cfi->numchips)
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void cfi_staa_sync (struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i;
+ struct flchip *chip;
+ int ret = 0;
+ DECLARE_WAITQUEUE(wait, current);
+
+ for (i=0; !ret && i<cfi->numchips; i++) {
+ chip = &cfi->chips[i];
+
+ retry:
+ mutex_lock(&chip->mutex);
+
+ switch(chip->state) {
+ case FL_READY:
+ case FL_STATUS:
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ chip->oldstate = chip->state;
+ chip->state = FL_SYNCING;
+ /* No need to wake_up() on this state change -
+ * as the whole point is that nobody can do anything
+ * with the chip now anyway.
+ */
+ fallthrough;
+ case FL_SYNCING:
+ mutex_unlock(&chip->mutex);
+ break;
+
+ default:
+ /* Not an idle state */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+
+ goto retry;
+ }
+ }
+
+ /* Unlock the chips again */
+
+ for (i--; i >=0; i--) {
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ if (chip->state == FL_SYNCING) {
+ chip->state = chip->oldstate;
+ wake_up(&chip->wq);
+ }
+ mutex_unlock(&chip->mutex);
+ }
+}
+
+static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word status, status_OK;
+ unsigned long timeo = jiffies + HZ;
+ DECLARE_WAITQUEUE(wait, current);
+
+ adr += chip->start;
+
+ /* Let's determine this according to the interleave only once */
+ status_OK = CMD(0x80);
+
+ timeo = jiffies + HZ;
+retry:
+ mutex_lock(&chip->mutex);
+
+ /* Check that the chip's ready to talk to us. */
+ switch (chip->state) {
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ case FL_READY:
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+ fallthrough;
+ case FL_STATUS:
+ status = map_read(map, adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ /* Urgh. Chip not yet ready to talk to us. */
+ if (time_after(jiffies, timeo)) {
+ mutex_unlock(&chip->mutex);
+ printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ goto retry;
+
+ default:
+ /* Stick ourselves on a wait queue to be woken when
+ someone changes the status */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + HZ;
+ goto retry;
+ }
+
+ ENABLE_VPP(map);
+ map_write(map, CMD(0x60), adr);
+ map_write(map, CMD(0x01), adr);
+ chip->state = FL_LOCKING;
+
+ mutex_unlock(&chip->mutex);
+ msleep(1000);
+ mutex_lock(&chip->mutex);
+
+ /* FIXME. Use a timer to check this, and return immediately. */
+ /* Once the state machine's known to be working I'll do that */
+
+ timeo = jiffies + (HZ*2);
+ for (;;) {
+
+ status = map_read(map, adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ /* OK Still waiting */
+ if (time_after(jiffies, timeo)) {
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+ printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
+ DISABLE_VPP(map);
+ mutex_unlock(&chip->mutex);
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ mutex_lock(&chip->mutex);
+ }
+
+ /* Done and happy. */
+ chip->state = FL_STATUS;
+ DISABLE_VPP(map);
+ wake_up(&chip->wq);
+ mutex_unlock(&chip->mutex);
+ return 0;
+}
+static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long adr;
+ int chipnum, ret;
+#ifdef DEBUG_LOCK_BITS
+ int ofs_factor = cfi->interleave * cfi->device_type;
+#endif
+
+ if (ofs & (mtd->erasesize - 1))
+ return -EINVAL;
+
+ if (len & (mtd->erasesize -1))
+ return -EINVAL;
+
+ chipnum = ofs >> cfi->chipshift;
+ adr = ofs - (chipnum << cfi->chipshift);
+
+ while(len) {
+
+#ifdef DEBUG_LOCK_BITS
+ cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
+ printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
+ cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
+#endif
+
+ ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
+
+#ifdef DEBUG_LOCK_BITS
+ cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
+ printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
+ cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
+#endif
+
+ if (ret)
+ return ret;
+
+ adr += mtd->erasesize;
+ len -= mtd->erasesize;
+
+ if (adr >> cfi->chipshift) {
+ adr = 0;
+ chipnum++;
+
+ if (chipnum >= cfi->numchips)
+ break;
+ }
+ }
+ return 0;
+}
+static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word status, status_OK;
+ unsigned long timeo = jiffies + HZ;
+ DECLARE_WAITQUEUE(wait, current);
+
+ adr += chip->start;
+
+ /* Let's determine this according to the interleave only once */
+ status_OK = CMD(0x80);
+
+ timeo = jiffies + HZ;
+retry:
+ mutex_lock(&chip->mutex);
+
+ /* Check that the chip's ready to talk to us. */
+ switch (chip->state) {
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ case FL_READY:
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+ fallthrough;
+ case FL_STATUS:
+ status = map_read(map, adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ /* Urgh. Chip not yet ready to talk to us. */
+ if (time_after(jiffies, timeo)) {
+ mutex_unlock(&chip->mutex);
+ printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ goto retry;
+
+ default:
+ /* Stick ourselves on a wait queue to be woken when
+ someone changes the status */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + HZ;
+ goto retry;
+ }
+
+ ENABLE_VPP(map);
+ map_write(map, CMD(0x60), adr);
+ map_write(map, CMD(0xD0), adr);
+ chip->state = FL_UNLOCKING;
+
+ mutex_unlock(&chip->mutex);
+ msleep(1000);
+ mutex_lock(&chip->mutex);
+
+ /* FIXME. Use a timer to check this, and return immediately. */
+ /* Once the state machine's known to be working I'll do that */
+
+ timeo = jiffies + (HZ*2);
+ for (;;) {
+
+ status = map_read(map, adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ /* OK Still waiting */
+ if (time_after(jiffies, timeo)) {
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+ printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
+ DISABLE_VPP(map);
+ mutex_unlock(&chip->mutex);
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the unlock, wait a while and retry */
+ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ mutex_lock(&chip->mutex);
+ }
+
+ /* Done and happy. */
+ chip->state = FL_STATUS;
+ DISABLE_VPP(map);
+ wake_up(&chip->wq);
+ mutex_unlock(&chip->mutex);
+ return 0;
+}
+static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long adr;
+ int chipnum, ret;
+#ifdef DEBUG_LOCK_BITS
+ int ofs_factor = cfi->interleave * cfi->device_type;
+#endif
+
+ chipnum = ofs >> cfi->chipshift;
+ adr = ofs - (chipnum << cfi->chipshift);
+
+#ifdef DEBUG_LOCK_BITS
+ {
+ unsigned long temp_adr = adr;
+ unsigned long temp_len = len;
+
+ cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
+ while (temp_len) {
+ printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
+ temp_adr += mtd->erasesize;
+ temp_len -= mtd->erasesize;
+ }
+ cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
+ }
+#endif
+
+ ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
+
+#ifdef DEBUG_LOCK_BITS
+ cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
+ printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
+ cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
+#endif
+
+ return ret;
+}
+
+static int cfi_staa_suspend(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i;
+ struct flchip *chip;
+ int ret = 0;
+
+ for (i=0; !ret && i<cfi->numchips; i++) {
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ switch(chip->state) {
+ case FL_READY:
+ case FL_STATUS:
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ chip->oldstate = chip->state;
+ chip->state = FL_PM_SUSPENDED;
+ /* No need to wake_up() on this state change -
+ * as the whole point is that nobody can do anything
+ * with the chip now anyway.
+ */
+ break;
+
+ case FL_PM_SUSPENDED:
+ break;
+
+ default:
+ ret = -EAGAIN;
+ break;
+ }
+ mutex_unlock(&chip->mutex);
+ }
+
+ /* Unlock the chips again */
+
+ if (ret) {
+ for (i--; i >=0; i--) {
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ if (chip->state == FL_PM_SUSPENDED) {
+ /* No need to force it into a known state here,
+ because we're returning failure, and it didn't
+ get power cycled */
+ chip->state = chip->oldstate;
+ wake_up(&chip->wq);
+ }
+ mutex_unlock(&chip->mutex);
+ }
+ }
+
+ return ret;
+}
+
+static void cfi_staa_resume(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i;
+ struct flchip *chip;
+
+ for (i=0; i<cfi->numchips; i++) {
+
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ /* Go to known state. Chip may have been power cycled */
+ if (chip->state == FL_PM_SUSPENDED) {
+ map_write(map, CMD(0xFF), 0);
+ chip->state = FL_READY;
+ wake_up(&chip->wq);
+ }
+
+ mutex_unlock(&chip->mutex);
+ }
+}
+
+static void cfi_staa_destroy(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ kfree(cfi->cmdset_priv);
+ kfree(cfi);
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
new file mode 100644
index 0000000000..a04b617418
--- /dev/null
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ Common Flash Interface probe code.
+ (C) 2000 Red Hat.
+*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <linux/mtd/xip.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/gen_probe.h>
+
+//#define DEBUG_CFI
+
+#ifdef DEBUG_CFI
+static void print_cfi_ident(struct cfi_ident *);
+#endif
+
+static int cfi_probe_chip(struct map_info *map, __u32 base,
+ unsigned long *chip_map, struct cfi_private *cfi);
+static int cfi_chip_setup(struct map_info *map, struct cfi_private *cfi);
+
+struct mtd_info *cfi_probe(struct map_info *map);
+
+#ifdef CONFIG_MTD_XIP
+
+/* only needed for short periods, so this is rather simple */
+#define xip_disable() local_irq_disable()
+
+#define xip_allowed(base, map) \
+do { \
+ (void) map_read(map, base); \
+ xip_iprefetch(); \
+ local_irq_enable(); \
+} while (0)
+
+#define xip_enable(base, map, cfi) \
+do { \
+ cfi_qry_mode_off(base, map, cfi); \
+ xip_allowed(base, map); \
+} while (0)
+
+#define xip_disable_qry(base, map, cfi) \
+do { \
+ xip_disable(); \
+ cfi_qry_mode_on(base, map, cfi); \
+} while (0)
+
+#else
+
+#define xip_disable() do { } while (0)
+#define xip_allowed(base, map) do { } while (0)
+#define xip_enable(base, map, cfi) do { } while (0)
+#define xip_disable_qry(base, map, cfi) do { } while (0)
+
+#endif
+
+/*
+ * This fixup occurs immediately after reading the CFI structure and can affect
+ * the number of chips detected, unlike cfi_fixup, which occurs after an
+ * mtd_info structure has been created for the chip.
+ */
+struct cfi_early_fixup {
+ uint16_t mfr;
+ uint16_t id;
+ void (*fixup)(struct cfi_private *cfi);
+};
+
+static void cfi_early_fixup(struct cfi_private *cfi,
+ const struct cfi_early_fixup *fixups)
+{
+ const struct cfi_early_fixup *f;
+
+ for (f = fixups; f->fixup; f++) {
+ if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) &&
+ ((f->id == CFI_ID_ANY) || (f->id == cfi->id))) {
+ f->fixup(cfi);
+ }
+ }
+}
+
+/* check for QRY.
+ in: interleave,type,mode
+ ret: table index, <0 for error
+ */
+
+static int __xipram cfi_probe_chip(struct map_info *map, __u32 base,
+ unsigned long *chip_map, struct cfi_private *cfi)
+{
+ int i;
+
+ if ((base + 0) >= map->size) {
+ printk(KERN_NOTICE
+ "Probe at base[0x00](0x%08lx) past the end of the map(0x%08lx)\n",
+ (unsigned long)base, map->size -1);
+ return 0;
+ }
+ if ((base + 0xff) >= map->size) {
+ printk(KERN_NOTICE
+ "Probe at base[0x55](0x%08lx) past the end of the map(0x%08lx)\n",
+ (unsigned long)base + 0x55, map->size -1);
+ return 0;
+ }
+
+ xip_disable();
+ if (!cfi_qry_mode_on(base, map, cfi)) {
+ xip_enable(base, map, cfi);
+ return 0;
+ }
+
+ if (!cfi->numchips) {
+ /* This is the first time we're called. Set up the CFI
+ stuff accordingly and return */
+ return cfi_chip_setup(map, cfi);
+ }
+
+ /* Check each previous chip to see if it's an alias */
+ for (i=0; i < (base >> cfi->chipshift); i++) {
+ unsigned long start;
+ if(!test_bit(i, chip_map)) {
+ /* Skip location; no valid chip at this address */
+ continue;
+ }
+ start = i << cfi->chipshift;
+ /* This chip should be in read mode if it's one
+ we've already touched. */
+ if (cfi_qry_present(map, start, cfi)) {
+ /* Eep. This chip also had the QRY marker.
+ * Is it an alias for the new one? */
+ cfi_qry_mode_off(start, map, cfi);
+
+ /* If the QRY marker goes away, it's an alias */
+ if (!cfi_qry_present(map, start, cfi)) {
+ xip_allowed(base, map);
+ printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
+ map->name, base, start);
+ return 0;
+ }
+ /* Yes, it's actually got QRY for data. Most
+ * unfortunate. Stick the new chip in read mode
+ * too and if it's the same, assume it's an alias. */
+ /* FIXME: Use other modes to do a proper check */
+ cfi_qry_mode_off(base, map, cfi);
+
+ if (cfi_qry_present(map, base, cfi)) {
+ xip_allowed(base, map);
+ printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
+ map->name, base, start);
+ return 0;
+ }
+ }
+ }
+
+ /* OK, if we got to here, then none of the previous chips appear to
+ be aliases for the current one. */
+ set_bit((base >> cfi->chipshift), chip_map); /* Update chip map */
+ cfi->numchips++;
+
+ /* Put it back into Read Mode */
+ cfi_qry_mode_off(base, map, cfi);
+ xip_allowed(base, map);
+
+ printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
+ map->name, cfi->interleave, cfi->device_type*8, base,
+ map->bankwidth*8);
+
+ return 1;
+}
+
+static void fixup_s70gl02gs_chips(struct cfi_private *cfi)
+{
+ /*
+ * S70GL02GS flash reports a single 256 MiB chip, but is really made up
+ * of two 128 MiB chips with 1024 sectors each.
+ */
+ cfi->cfiq->DevSize = 27;
+ cfi->cfiq->EraseRegionInfo[0] = 0x20003ff;
+ pr_warn("Bad S70GL02GS CFI data; adjust to detect 2 chips\n");
+}
+
+static const struct cfi_early_fixup cfi_early_fixup_table[] = {
+ { CFI_MFR_AMD, 0x4801, fixup_s70gl02gs_chips },
+ { },
+};
+
+static int __xipram cfi_chip_setup(struct map_info *map,
+ struct cfi_private *cfi)
+{
+ int ofs_factor = cfi->interleave*cfi->device_type;
+ __u32 base = 0;
+ int num_erase_regions = cfi_read_query(map, base + (0x10 + 28)*ofs_factor);
+ int i;
+ int addr_unlock1 = 0x555, addr_unlock2 = 0x2AA;
+
+ xip_enable(base, map, cfi);
+#ifdef DEBUG_CFI
+ printk("Number of erase regions: %d\n", num_erase_regions);
+#endif
+ if (!num_erase_regions)
+ return 0;
+
+ cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
+ if (!cfi->cfiq)
+ return 0;
+
+ memset(cfi->cfiq,0,sizeof(struct cfi_ident));
+
+ cfi->cfi_mode = CFI_MODE_CFI;
+
+ cfi->sector_erase_cmd = CMD(0x30);
+
+ /* Read the CFI info structure */
+ xip_disable_qry(base, map, cfi);
+ for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++)
+ ((unsigned char *)cfi->cfiq)[i] = cfi_read_query(map,base + (0x10 + i)*ofs_factor);
+
+ /* Do any necessary byteswapping */
+ cfi->cfiq->P_ID = le16_to_cpu(cfi->cfiq->P_ID);
+
+ cfi->cfiq->P_ADR = le16_to_cpu(cfi->cfiq->P_ADR);
+ cfi->cfiq->A_ID = le16_to_cpu(cfi->cfiq->A_ID);
+ cfi->cfiq->A_ADR = le16_to_cpu(cfi->cfiq->A_ADR);
+ cfi->cfiq->InterfaceDesc = le16_to_cpu(cfi->cfiq->InterfaceDesc);
+ cfi->cfiq->MaxBufWriteSize = le16_to_cpu(cfi->cfiq->MaxBufWriteSize);
+
+#ifdef DEBUG_CFI
+ /* Dump the information therein */
+ print_cfi_ident(cfi->cfiq);
+#endif
+
+ for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
+ cfi->cfiq->EraseRegionInfo[i] = le32_to_cpu(cfi->cfiq->EraseRegionInfo[i]);
+
+#ifdef DEBUG_CFI
+ printk(" Erase Region #%d: BlockSize 0x%4.4X bytes, %d blocks\n",
+ i, (cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff,
+ (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1);
+#endif
+ }
+
+ if (cfi->cfiq->P_ID == P_ID_SST_OLD) {
+ addr_unlock1 = 0x5555;
+ addr_unlock2 = 0x2AAA;
+ }
+
+ /*
+ * Note we put the device back into Read Mode BEFORE going into Auto
+ * Select Mode, as some devices support nesting of modes, others
+ * don't. This way should always work.
+ * On cmdset 0001 the writes of 0xaa and 0x55 are not needed, and
+ * so should be treated as nops or illegal (and so put the device
+ * back into Read Mode, which is a nop in this case).
+ */
+ cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xaa, addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, addr_unlock2, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x90, addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ cfi->mfr = cfi_read_query16(map, base);
+ cfi->id = cfi_read_query16(map, base + ofs_factor);
+
+ /* Get AMD/Spansion extended JEDEC ID */
+ if (cfi->mfr == CFI_MFR_AMD && (cfi->id & 0xff) == 0x7e)
+ cfi->id = cfi_read_query(map, base + 0xe * ofs_factor) << 8 |
+ cfi_read_query(map, base + 0xf * ofs_factor);
+
+ /* Put it back into Read Mode */
+ cfi_qry_mode_off(base, map, cfi);
+ xip_allowed(base, map);
+
+ cfi_early_fixup(cfi, cfi_early_fixup_table);
+
+ printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank. Manufacturer ID %#08x Chip ID %#08x\n",
+ map->name, cfi->interleave, cfi->device_type*8, base,
+ map->bankwidth*8, cfi->mfr, cfi->id);
+
+ return 1;
+}
+
+#ifdef DEBUG_CFI
+static char *vendorname(__u16 vendor)
+{
+ switch (vendor) {
+ case P_ID_NONE:
+ return "None";
+
+ case P_ID_INTEL_EXT:
+ return "Intel/Sharp Extended";
+
+ case P_ID_AMD_STD:
+ return "AMD/Fujitsu Standard";
+
+ case P_ID_INTEL_STD:
+ return "Intel/Sharp Standard";
+
+ case P_ID_AMD_EXT:
+ return "AMD/Fujitsu Extended";
+
+ case P_ID_WINBOND:
+ return "Winbond Standard";
+
+ case P_ID_ST_ADV:
+ return "ST Advanced";
+
+ case P_ID_MITSUBISHI_STD:
+ return "Mitsubishi Standard";
+
+ case P_ID_MITSUBISHI_EXT:
+ return "Mitsubishi Extended";
+
+ case P_ID_SST_PAGE:
+ return "SST Page Write";
+
+ case P_ID_SST_OLD:
+ return "SST 39VF160x/39VF320x";
+
+ case P_ID_INTEL_PERFORMANCE:
+ return "Intel Performance Code";
+
+ case P_ID_INTEL_DATA:
+ return "Intel Data";
+
+ case P_ID_RESERVED:
+ return "Not Allowed / Reserved for Future Use";
+
+ default:
+ return "Unknown";
+ }
+}
+
+
+static void print_cfi_ident(struct cfi_ident *cfip)
+{
+#if 0
+ if (cfip->qry[0] != 'Q' || cfip->qry[1] != 'R' || cfip->qry[2] != 'Y') {
+ printk("Invalid CFI ident structure.\n");
+ return;
+ }
+#endif
+ printk("Primary Vendor Command Set: %4.4X (%s)\n", cfip->P_ID, vendorname(cfip->P_ID));
+ if (cfip->P_ADR)
+ printk("Primary Algorithm Table at %4.4X\n", cfip->P_ADR);
+ else
+ printk("No Primary Algorithm Table\n");
+
+ printk("Alternative Vendor Command Set: %4.4X (%s)\n", cfip->A_ID, vendorname(cfip->A_ID));
+ if (cfip->A_ADR)
+ printk("Alternate Algorithm Table at %4.4X\n", cfip->A_ADR);
+ else
+ printk("No Alternate Algorithm Table\n");
+
+
+ printk("Vcc Minimum: %2d.%d V\n", cfip->VccMin >> 4, cfip->VccMin & 0xf);
+ printk("Vcc Maximum: %2d.%d V\n", cfip->VccMax >> 4, cfip->VccMax & 0xf);
+ if (cfip->VppMin) {
+ printk("Vpp Minimum: %2d.%d V\n", cfip->VppMin >> 4, cfip->VppMin & 0xf);
+ printk("Vpp Maximum: %2d.%d V\n", cfip->VppMax >> 4, cfip->VppMax & 0xf);
+ }
+ else
+ printk("No Vpp line\n");
+
+ printk("Typical byte/word write timeout: %d µs\n", 1<<cfip->WordWriteTimeoutTyp);
+ printk("Maximum byte/word write timeout: %d µs\n", (1<<cfip->WordWriteTimeoutMax) * (1<<cfip->WordWriteTimeoutTyp));
+
+ if (cfip->BufWriteTimeoutTyp || cfip->BufWriteTimeoutMax) {
+ printk("Typical full buffer write timeout: %d µs\n", 1<<cfip->BufWriteTimeoutTyp);
+ printk("Maximum full buffer write timeout: %d µs\n", (1<<cfip->BufWriteTimeoutMax) * (1<<cfip->BufWriteTimeoutTyp));
+ }
+ else
+ printk("Full buffer write not supported\n");
+
+ printk("Typical block erase timeout: %d ms\n", 1<<cfip->BlockEraseTimeoutTyp);
+ printk("Maximum block erase timeout: %d ms\n", (1<<cfip->BlockEraseTimeoutMax) * (1<<cfip->BlockEraseTimeoutTyp));
+ if (cfip->ChipEraseTimeoutTyp || cfip->ChipEraseTimeoutMax) {
+ printk("Typical chip erase timeout: %d ms\n", 1<<cfip->ChipEraseTimeoutTyp);
+ printk("Maximum chip erase timeout: %d ms\n", (1<<cfip->ChipEraseTimeoutMax) * (1<<cfip->ChipEraseTimeoutTyp));
+ }
+ else
+ printk("Chip erase not supported\n");
+
+ printk("Device size: 0x%X bytes (%d MiB)\n", 1 << cfip->DevSize, 1<< (cfip->DevSize - 20));
+ printk("Flash Device Interface description: 0x%4.4X\n", cfip->InterfaceDesc);
+ switch(cfip->InterfaceDesc) {
+ case CFI_INTERFACE_X8_ASYNC:
+ printk(" - x8-only asynchronous interface\n");
+ break;
+
+ case CFI_INTERFACE_X16_ASYNC:
+ printk(" - x16-only asynchronous interface\n");
+ break;
+
+ case CFI_INTERFACE_X8_BY_X16_ASYNC:
+ printk(" - supports x8 and x16 via BYTE# with asynchronous interface\n");
+ break;
+
+ case CFI_INTERFACE_X32_ASYNC:
+ printk(" - x32-only asynchronous interface\n");
+ break;
+
+ case CFI_INTERFACE_X16_BY_X32_ASYNC:
+ printk(" - supports x16 and x32 via Word# with asynchronous interface\n");
+ break;
+
+ case CFI_INTERFACE_NOT_ALLOWED:
+ printk(" - Not Allowed / Reserved\n");
+ break;
+
+ default:
+ printk(" - Unknown\n");
+ break;
+ }
+
+ printk("Max. bytes in buffer write: 0x%x\n", 1<< cfip->MaxBufWriteSize);
+ printk("Number of Erase Block Regions: %d\n", cfip->NumEraseRegions);
+
+}
+#endif /* DEBUG_CFI */
+
+static struct chip_probe cfi_chip_probe = {
+ .name = "CFI",
+ .probe_chip = cfi_probe_chip
+};
+
+struct mtd_info *cfi_probe(struct map_info *map)
+{
+ /*
+ * Just use the generic probe stuff to call our CFI-specific
+ * chip_probe routine in all the possible permutations, etc.
+ */
+ return mtd_do_chip_probe(map, &cfi_chip_probe);
+}
+
+static struct mtd_chip_driver cfi_chipdrv = {
+ .probe = cfi_probe,
+ .name = "cfi_probe",
+ .module = THIS_MODULE
+};
+
+static int __init cfi_probe_init(void)
+{
+ register_mtd_chip_driver(&cfi_chipdrv);
+ return 0;
+}
+
+static void __exit cfi_probe_exit(void)
+{
+ unregister_mtd_chip_driver(&cfi_chipdrv);
+}
+
+module_init(cfi_probe_init);
+module_exit(cfi_probe_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
+MODULE_DESCRIPTION("Probe code for CFI-compliant flash chips");
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
new file mode 100644
index 0000000000..140c69a67e
--- /dev/null
+++ b/drivers/mtd/chips/cfi_util.c
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common Flash Interface support:
+ * Generic utility functions not dependent on command set
+ *
+ * Copyright (C) 2002 Red Hat
+ * Copyright (C) 2003 STMicroelectronics Limited
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/xip.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+
+void cfi_udelay(int us)
+{
+ if (us >= 1000) {
+ msleep(DIV_ROUND_UP(us, 1000));
+ } else {
+ udelay(us);
+ cond_resched();
+ }
+}
+EXPORT_SYMBOL(cfi_udelay);
+
+/*
+ * Returns the command address according to the given geometry.
+ */
+uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
+ struct map_info *map, struct cfi_private *cfi)
+{
+ unsigned bankwidth = map_bankwidth(map);
+ unsigned interleave = cfi_interleave(cfi);
+ unsigned type = cfi->device_type;
+ uint32_t addr;
+
+ addr = (cmd_ofs * type) * interleave;
+
+ /* Modify the unlock address if we are in compatibility mode.
+ * For 16bit devices on 8 bit busses
+ * and 32bit devices on 16 bit busses
+ * set the low bit of the alternating bit sequence of the address.
+ */
+ if (((type * interleave) > bankwidth) && ((cmd_ofs & 0xff) == 0xaa))
+ addr |= (type >> 1)*interleave;
+
+ return addr;
+}
+EXPORT_SYMBOL(cfi_build_cmd_addr);
+
+/*
+ * Transforms the CFI command for the given geometry (bus width & interleave).
+ * It looks too long to be inline, but in the common case it should almost all
+ * get optimised away.
+ */
+map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
+{
+ map_word val = { {0} };
+ int wordwidth, words_per_bus, chip_mode, chips_per_word;
+ unsigned long onecmd;
+ int i;
+
+ /* We do it this way to give the compiler a fighting chance
+ of optimising away all the crap for 'bankwidth' larger than
+ an unsigned long, in the common case where that support is
+ disabled */
+ if (map_bankwidth_is_large(map)) {
+ wordwidth = sizeof(unsigned long);
+ words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
+ } else {
+ wordwidth = map_bankwidth(map);
+ words_per_bus = 1;
+ }
+
+ chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
+ chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
+
+ /* First, determine what the bit-pattern should be for a single
+ device, according to chip mode and endianness... */
+ switch (chip_mode) {
+ default: BUG();
+ case 1:
+ onecmd = cmd;
+ break;
+ case 2:
+ onecmd = cpu_to_cfi16(map, cmd);
+ break;
+ case 4:
+ onecmd = cpu_to_cfi32(map, cmd);
+ break;
+ }
+
+ /* Now replicate it across the size of an unsigned long, or
+ just to the bus width as appropriate */
+ switch (chips_per_word) {
+ default: BUG();
+#if BITS_PER_LONG >= 64
+ case 8:
+ onecmd |= (onecmd << (chip_mode * 32));
+ fallthrough;
+#endif
+ case 4:
+ onecmd |= (onecmd << (chip_mode * 16));
+ fallthrough;
+ case 2:
+ onecmd |= (onecmd << (chip_mode * 8));
+ fallthrough;
+ case 1:
+ ;
+ }
+
+ /* And finally, for the multi-word case, replicate it
+ in all words in the structure */
+ for (i=0; i < words_per_bus; i++) {
+ val.x[i] = onecmd;
+ }
+
+ return val;
+}
+EXPORT_SYMBOL(cfi_build_cmd);
+
+unsigned long cfi_merge_status(map_word val, struct map_info *map,
+ struct cfi_private *cfi)
+{
+ int wordwidth, words_per_bus, chip_mode, chips_per_word;
+ unsigned long onestat, res = 0;
+ int i;
+
+ /* We do it this way to give the compiler a fighting chance
+ of optimising away all the crap for 'bankwidth' larger than
+ an unsigned long, in the common case where that support is
+ disabled */
+ if (map_bankwidth_is_large(map)) {
+ wordwidth = sizeof(unsigned long);
+ words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
+ } else {
+ wordwidth = map_bankwidth(map);
+ words_per_bus = 1;
+ }
+
+ chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
+ chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
+
+ onestat = val.x[0];
+ /* Or all status words together */
+ for (i=1; i < words_per_bus; i++) {
+ onestat |= val.x[i];
+ }
+
+ res = onestat;
+ switch(chips_per_word) {
+ default: BUG();
+#if BITS_PER_LONG >= 64
+ case 8:
+ res |= (onestat >> (chip_mode * 32));
+ fallthrough;
+#endif
+ case 4:
+ res |= (onestat >> (chip_mode * 16));
+ fallthrough;
+ case 2:
+ res |= (onestat >> (chip_mode * 8));
+ fallthrough;
+ case 1:
+ ;
+ }
+
+ /* Last, determine what the bit-pattern should be for a single
+ device, according to chip mode and endianness... */
+ switch (chip_mode) {
+ case 1:
+ break;
+ case 2:
+ res = cfi16_to_cpu(map, res);
+ break;
+ case 4:
+ res = cfi32_to_cpu(map, res);
+ break;
+ default: BUG();
+ }
+ return res;
+}
+EXPORT_SYMBOL(cfi_merge_status);
+
+/*
+ * Sends a CFI command to a bank of flash for the given geometry.
+ *
+ * Returns the offset in flash where the command was written.
+ * If prev_val is non-null, it will be set to the value at the command address,
+ * before the command was written.
+ */
+uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
+ struct map_info *map, struct cfi_private *cfi,
+ int type, map_word *prev_val)
+{
+ map_word val;
+ uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi);
+ val = cfi_build_cmd(cmd, map, cfi);
+
+ if (prev_val)
+ *prev_val = map_read(map, addr);
+
+ map_write(map, val, addr);
+
+ return addr - base;
+}
+EXPORT_SYMBOL(cfi_send_gen_cmd);
+
+int __xipram cfi_qry_present(struct map_info *map, __u32 base,
+ struct cfi_private *cfi)
+{
+ int osf = cfi->interleave * cfi->device_type; /* scale factor */
+ map_word val[3];
+ map_word qry[3];
+
+ qry[0] = cfi_build_cmd('Q', map, cfi);
+ qry[1] = cfi_build_cmd('R', map, cfi);
+ qry[2] = cfi_build_cmd('Y', map, cfi);
+
+ val[0] = map_read(map, base + osf*0x10);
+ val[1] = map_read(map, base + osf*0x11);
+ val[2] = map_read(map, base + osf*0x12);
+
+ if (!map_word_equal(map, qry[0], val[0]))
+ return 0;
+
+ if (!map_word_equal(map, qry[1], val[1]))
+ return 0;
+
+ if (!map_word_equal(map, qry[2], val[2]))
+ return 0;
+
+ return 1; /* "QRY" found */
+}
+EXPORT_SYMBOL_GPL(cfi_qry_present);
+
+int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
+ struct cfi_private *cfi)
+{
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
+ if (cfi_qry_present(map, base, cfi))
+ return 1;
+ /* QRY not found probably we deal with some odd CFI chips */
+ /* Some revisions of some old Intel chips? */
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
+ if (cfi_qry_present(map, base, cfi))
+ return 1;
+ /* ST M29DW chips */
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
+ if (cfi_qry_present(map, base, cfi))
+ return 1;
+ /* some old SST chips, e.g. 39VF160x/39VF320x */
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL);
+ if (cfi_qry_present(map, base, cfi))
+ return 1;
+ /* SST 39VF640xB */
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xAA, 0x555, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, 0x2AA, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
+ if (cfi_qry_present(map, base, cfi))
+ return 1;
+ /* QRY not found */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cfi_qry_mode_on);
+
+void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map,
+ struct cfi_private *cfi)
+{
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
+ /* M29W128G flashes require an additional reset command
+ when exit qry mode */
+ if ((cfi->mfr == CFI_MFR_ST) && (cfi->id == 0x227E || cfi->id == 0x7E))
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+}
+EXPORT_SYMBOL_GPL(cfi_qry_mode_off);
+
+struct cfi_extquery *
+__xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ __u32 base = 0; // cfi->chips[0].start;
+ int ofs_factor = cfi->interleave * cfi->device_type;
+ int i;
+ struct cfi_extquery *extp = NULL;
+
+ if (!adr)
+ goto out;
+
+ printk(KERN_INFO "%s Extended Query Table at 0x%4.4X\n", name, adr);
+
+ extp = kmalloc(size, GFP_KERNEL);
+ if (!extp)
+ goto out;
+
+#ifdef CONFIG_MTD_XIP
+ local_irq_disable();
+#endif
+
+ /* Switch it into Query Mode */
+ cfi_qry_mode_on(base, map, cfi);
+ /* Read in the Extended Query Table */
+ for (i=0; i<size; i++) {
+ ((unsigned char *)extp)[i] =
+ cfi_read_query(map, base+((adr+i)*ofs_factor));
+ }
+
+ /* Make sure it returns to read mode */
+ cfi_qry_mode_off(base, map, cfi);
+
+#ifdef CONFIG_MTD_XIP
+ (void) map_read(map, base);
+ xip_iprefetch();
+ local_irq_enable();
+#endif
+
+ out: return extp;
+}
+
+EXPORT_SYMBOL(cfi_read_pri);
+
+void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup *fixups)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_fixup *f;
+
+ for (f=fixups; f->fixup; f++) {
+ if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) &&
+ ((f->id == CFI_ID_ANY) || (f->id == cfi->id))) {
+ f->fixup(mtd);
+ }
+ }
+}
+
+EXPORT_SYMBOL(cfi_fixup);
+
+int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
+ loff_t ofs, size_t len, void *thunk)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long adr;
+ int chipnum, ret = 0;
+ int i, first;
+ struct mtd_erase_region_info *regions = mtd->eraseregions;
+
+ /* Check that both start and end of the requested erase are
+ * aligned with the erasesize at the appropriate addresses.
+ */
+
+ i = 0;
+
+ /* Skip all erase regions which are ended before the start of
+ the requested erase. Actually, to save on the calculations,
+ we skip to the first erase region which starts after the
+ start of the requested erase, and then go back one.
+ */
+
+ while (i < mtd->numeraseregions && ofs >= regions[i].offset)
+ i++;
+ i--;
+
+ /* OK, now i is pointing at the erase region in which this
+ erase request starts. Check the start of the requested
+ erase range is aligned with the erase size which is in
+ effect here.
+ */
+
+ if (ofs & (regions[i].erasesize-1))
+ return -EINVAL;
+
+ /* Remember the erase region we start on */
+ first = i;
+
+ /* Next, check that the end of the requested erase is aligned
+ * with the erase region at that address.
+ */
+
+ while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
+ i++;
+
+ /* As before, drop back one to point at the region in which
+ the address actually falls
+ */
+ i--;
+
+ if ((ofs + len) & (regions[i].erasesize-1))
+ return -EINVAL;
+
+ chipnum = ofs >> cfi->chipshift;
+ adr = ofs - (chipnum << cfi->chipshift);
+
+ i=first;
+
+ while(len) {
+ int size = regions[i].erasesize;
+
+ ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk);
+
+ if (ret)
+ return ret;
+
+ adr += size;
+ ofs += size;
+ len -= size;
+
+ if (ofs == regions[i].offset + size * regions[i].numblocks)
+ i++;
+
+ if (adr >> cfi->chipshift) {
+ adr = 0;
+ chipnum++;
+
+ if (chipnum >= cfi->numchips)
+ break;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(cfi_varsize_frob);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/chips/chipreg.c b/drivers/mtd/chips/chipreg.c
new file mode 100644
index 0000000000..a05e103682
--- /dev/null
+++ b/drivers/mtd/chips/chipreg.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Registration for chip drivers
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
+
+static DEFINE_SPINLOCK(chip_drvs_lock);
+static LIST_HEAD(chip_drvs_list);
+
+void register_mtd_chip_driver(struct mtd_chip_driver *drv)
+{
+ spin_lock(&chip_drvs_lock);
+ list_add(&drv->list, &chip_drvs_list);
+ spin_unlock(&chip_drvs_lock);
+}
+
+void unregister_mtd_chip_driver(struct mtd_chip_driver *drv)
+{
+ spin_lock(&chip_drvs_lock);
+ list_del(&drv->list);
+ spin_unlock(&chip_drvs_lock);
+}
+
+static struct mtd_chip_driver *get_mtd_chip_driver (const char *name)
+{
+ struct mtd_chip_driver *ret = NULL, *this;
+
+ spin_lock(&chip_drvs_lock);
+
+ list_for_each_entry(this, &chip_drvs_list, list) {
+ if (!strcmp(this->name, name)) {
+ ret = this;
+ break;
+ }
+ }
+ if (ret && !try_module_get(ret->module))
+ ret = NULL;
+
+ spin_unlock(&chip_drvs_lock);
+
+ return ret;
+}
+
+ /* Hide all the horrid details, like some silly person taking
+ get_module_symbol() away from us, from the caller. */
+
+struct mtd_info *do_map_probe(const char *name, struct map_info *map)
+{
+ struct mtd_chip_driver *drv;
+ struct mtd_info *ret;
+
+ drv = get_mtd_chip_driver(name);
+
+ if (!drv && !request_module("%s", name))
+ drv = get_mtd_chip_driver(name);
+
+ if (!drv)
+ return NULL;
+
+ ret = drv->probe(map);
+
+ /* We decrease the use count here. It may have been a
+ probe-only module, which is no longer required from this
+ point, having given us a handle on (and increased the use
+ count of) the actual driver code.
+ */
+ module_put(drv->module);
+
+ return ret;
+}
+/*
+ * Destroy an MTD device which was created for a map device.
+ * Make sure the MTD device is already unregistered before calling this
+ */
+void map_destroy(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+
+ if (map->fldrv->destroy)
+ map->fldrv->destroy(mtd);
+
+ module_put(map->fldrv->module);
+
+ kfree(mtd);
+}
+
+EXPORT_SYMBOL(register_mtd_chip_driver);
+EXPORT_SYMBOL(unregister_mtd_chip_driver);
+EXPORT_SYMBOL(do_map_probe);
+EXPORT_SYMBOL(map_destroy);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("Core routines for registering and invoking MTD chip drivers");
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
new file mode 100644
index 0000000000..53e6b2d593
--- /dev/null
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef FWH_LOCK_H
+#define FWH_LOCK_H
+
+
+enum fwh_lock_state {
+ FWH_UNLOCKED = 0,
+ FWH_DENY_WRITE = 1,
+ FWH_IMMUTABLE = 2,
+ FWH_DENY_READ = 4,
+};
+
+struct fwh_xxlock_thunk {
+ enum fwh_lock_state val;
+ flstate_t state;
+};
+
+
+#define FWH_XXLOCK_ONEBLOCK_LOCK ((struct fwh_xxlock_thunk){ FWH_DENY_WRITE, FL_LOCKING})
+#define FWH_XXLOCK_ONEBLOCK_UNLOCK ((struct fwh_xxlock_thunk){ FWH_UNLOCKED, FL_UNLOCKING})
+
+/*
+ * This locking/unlock is specific to firmware hub parts. Only one
+ * is known that supports the Intel command set. Firmware
+ * hub parts cannot be interleaved as they are on the LPC bus
+ * so this code has not been tested with interleaved chips,
+ * and will likely fail in that context.
+ */
+static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
+ unsigned long adr, int len, void *thunk)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct fwh_xxlock_thunk *xxlt = (struct fwh_xxlock_thunk *)thunk;
+ int ret;
+
+ /* Refuse the operation if the we cannot look behind the chip */
+ if (chip->start < 0x400000) {
+ pr_debug( "MTD %s(): chip->start: %lx wanted >= 0x400000\n",
+ __func__, chip->start );
+ return -EIO;
+ }
+ /*
+ * lock block registers:
+ * - on 64k boundariesand
+ * - bit 1 set high
+ * - block lock registers are 4MiB lower - overflow subtract (danger)
+ *
+ * The address manipulation is first done on the logical address
+ * which is 0 at the start of the chip, and then the offset of
+ * the individual chip is addted to it. Any other order a weird
+ * map offset could cause problems.
+ */
+ adr = (adr & ~0xffffUL) | 0x2;
+ adr += chip->start - 0x400000;
+
+ /*
+ * This is easy because these are writes to registers and not writes
+ * to flash memory - that means that we don't have to check status
+ * and timeout.
+ */
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr, FL_LOCKING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ chip->oldstate = chip->state;
+ chip->state = xxlt->state;
+ map_write(map, CMD(xxlt->val), adr);
+
+ /* Done and happy. */
+ chip->state = chip->oldstate;
+ put_chip(map, chip, adr);
+ mutex_unlock(&chip->mutex);
+ return 0;
+}
+
+
+static int fwh_lock_varsize(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret;
+
+ ret = cfi_varsize_frob(mtd, fwh_xxlock_oneblock, ofs, len,
+ (void *)&FWH_XXLOCK_ONEBLOCK_LOCK);
+
+ return ret;
+}
+
+
+static int fwh_unlock_varsize(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret;
+
+ ret = cfi_varsize_frob(mtd, fwh_xxlock_oneblock, ofs, len,
+ (void *)&FWH_XXLOCK_ONEBLOCK_UNLOCK);
+
+ return ret;
+}
+
+static void fixup_use_fwh_lock(struct mtd_info *mtd)
+{
+ printk(KERN_NOTICE "using fwh lock/unlock method\n");
+ /* Setup for the chips with the fwh lock method */
+ mtd->_lock = fwh_lock_varsize;
+ mtd->_unlock = fwh_unlock_varsize;
+}
+#endif /* FWH_LOCK_H */
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
new file mode 100644
index 0000000000..9e53fcd760
--- /dev/null
+++ b/drivers/mtd/chips/gen_probe.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Routines common to all CFI-type probes.
+ * (C) 2001-2003 Red Hat, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/gen_probe.h>
+
+static struct mtd_info *check_cmd_set(struct map_info *, int);
+static struct cfi_private *genprobe_ident_chips(struct map_info *map,
+ struct chip_probe *cp);
+static int genprobe_new_chip(struct map_info *map, struct chip_probe *cp,
+ struct cfi_private *cfi);
+
+struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp)
+{
+ struct mtd_info *mtd;
+ struct cfi_private *cfi;
+
+ /* First probe the map to see if we have CFI stuff there. */
+ cfi = genprobe_ident_chips(map, cp);
+
+ if (!cfi)
+ return NULL;
+
+ map->fldrv_priv = cfi;
+ /* OK we liked it. Now find a driver for the command set it talks */
+
+ mtd = check_cmd_set(map, 1); /* First the primary cmdset */
+ if (!mtd)
+ mtd = check_cmd_set(map, 0); /* Then the secondary */
+
+ if (mtd) {
+ if (mtd->size > map->size) {
+ printk(KERN_WARNING "Reducing visibility of %ldKiB chip to %ldKiB\n",
+ (unsigned long)mtd->size >> 10,
+ (unsigned long)map->size >> 10);
+ mtd->size = map->size;
+ }
+ return mtd;
+ }
+
+ printk(KERN_WARNING"gen_probe: No supported Vendor Command Set found\n");
+
+ kfree(cfi->cfiq);
+ kfree(cfi);
+ map->fldrv_priv = NULL;
+ return NULL;
+}
+EXPORT_SYMBOL(mtd_do_chip_probe);
+
+
+static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chip_probe *cp)
+{
+ struct cfi_private cfi;
+ struct cfi_private *retcfi;
+ unsigned long *chip_map;
+ int max_chips;
+ int i, j;
+
+ memset(&cfi, 0, sizeof(cfi));
+
+ /* Call the probetype-specific code with all permutations of
+ interleave and device type, etc. */
+ if (!genprobe_new_chip(map, cp, &cfi)) {
+ /* The probe didn't like it */
+ pr_debug("%s: Found no %s device at location zero\n",
+ cp->name, map->name);
+ return NULL;
+ }
+
+#if 0 /* Let the CFI probe routine do this sanity check. The Intel and AMD
+ probe routines won't ever return a broken CFI structure anyway,
+ because they make them up themselves.
+ */
+ if (cfi.cfiq->NumEraseRegions == 0) {
+ printk(KERN_WARNING "Number of erase regions is zero\n");
+ kfree(cfi.cfiq);
+ return NULL;
+ }
+#endif
+ cfi.chipshift = cfi.cfiq->DevSize;
+
+ if (cfi_interleave_is_1(&cfi)) {
+ ;
+ } else if (cfi_interleave_is_2(&cfi)) {
+ cfi.chipshift++;
+ } else if (cfi_interleave_is_4((&cfi))) {
+ cfi.chipshift += 2;
+ } else if (cfi_interleave_is_8(&cfi)) {
+ cfi.chipshift += 3;
+ } else {
+ BUG();
+ }
+
+ cfi.numchips = 1;
+
+ /*
+ * Allocate memory for bitmap of valid chips.
+ * Align bitmap storage size to full byte.
+ */
+ max_chips = map->size >> cfi.chipshift;
+ if (!max_chips) {
+ printk(KERN_WARNING "NOR chip too large to fit in mapping. Attempting to cope...\n");
+ max_chips = 1;
+ }
+
+ chip_map = bitmap_zalloc(max_chips, GFP_KERNEL);
+ if (!chip_map) {
+ kfree(cfi.cfiq);
+ return NULL;
+ }
+
+ set_bit(0, chip_map); /* Mark first chip valid */
+
+ /*
+ * Now probe for other chips, checking sensibly for aliases while
+ * we're at it. The new_chip probe above should have let the first
+ * chip in read mode.
+ */
+
+ for (i = 1; i < max_chips; i++) {
+ cp->probe_chip(map, i << cfi.chipshift, chip_map, &cfi);
+ }
+
+ /*
+ * Now allocate the space for the structures we need to return to
+ * our caller, and copy the appropriate data into them.
+ */
+
+ retcfi = kmalloc(struct_size(retcfi, chips, cfi.numchips), GFP_KERNEL);
+
+ if (!retcfi) {
+ kfree(cfi.cfiq);
+ bitmap_free(chip_map);
+ return NULL;
+ }
+
+ memcpy(retcfi, &cfi, sizeof(cfi));
+ memset(&retcfi->chips[0], 0, sizeof(struct flchip) * cfi.numchips);
+
+ for (i = 0, j = 0; (j < cfi.numchips) && (i < max_chips); i++) {
+ if(test_bit(i, chip_map)) {
+ struct flchip *pchip = &retcfi->chips[j++];
+
+ pchip->start = (i << cfi.chipshift);
+ pchip->state = FL_READY;
+ init_waitqueue_head(&pchip->wq);
+ mutex_init(&pchip->mutex);
+ }
+ }
+
+ bitmap_free(chip_map);
+ return retcfi;
+}
+
+
+static int genprobe_new_chip(struct map_info *map, struct chip_probe *cp,
+ struct cfi_private *cfi)
+{
+ int min_chips = (map_bankwidth(map)/4?:1); /* At most 4-bytes wide. */
+ int max_chips = map_bankwidth(map); /* And minimum 1 */
+ int nr_chips, type;
+
+ for (nr_chips = max_chips; nr_chips >= min_chips; nr_chips >>= 1) {
+
+ if (!cfi_interleave_supported(nr_chips))
+ continue;
+
+ cfi->interleave = nr_chips;
+
+ /* Minimum device size. Don't look for one 8-bit device
+ in a 16-bit bus, etc. */
+ type = map_bankwidth(map) / nr_chips;
+
+ for (; type <= CFI_DEVICETYPE_X32; type<<=1) {
+ cfi->device_type = type;
+
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+ }
+ }
+ return 0;
+}
+
+typedef struct mtd_info *cfi_cmdset_fn_t(struct map_info *, int);
+
+extern cfi_cmdset_fn_t cfi_cmdset_0001;
+extern cfi_cmdset_fn_t cfi_cmdset_0002;
+extern cfi_cmdset_fn_t cfi_cmdset_0020;
+
+static inline struct mtd_info *cfi_cmdset_unknown(struct map_info *map,
+ int primary)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ __u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID;
+#ifdef CONFIG_MODULES
+ cfi_cmdset_fn_t *probe_function;
+ char *probename;
+
+ probename = kasprintf(GFP_KERNEL, "cfi_cmdset_%4.4X", type);
+ if (!probename)
+ return NULL;
+
+ probe_function = __symbol_get(probename);
+ if (!probe_function) {
+ request_module("cfi_cmdset_%4.4X", type);
+ probe_function = __symbol_get(probename);
+ }
+ kfree(probename);
+
+ if (probe_function) {
+ struct mtd_info *mtd;
+
+ mtd = (*probe_function)(map, primary);
+ /* If it was happy, it'll have increased its own use count */
+ symbol_put_addr(probe_function);
+ return mtd;
+ }
+#endif
+ printk(KERN_NOTICE "Support for command set %04X not present\n", type);
+
+ return NULL;
+}
+
+static struct mtd_info *check_cmd_set(struct map_info *map, int primary)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ __u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID;
+
+ if (type == P_ID_NONE || type == P_ID_RESERVED)
+ return NULL;
+
+ switch(type){
+ /* We need these for the !CONFIG_MODULES case,
+ because symbol_get() doesn't work there */
+#ifdef CONFIG_MTD_CFI_INTELEXT
+ case P_ID_INTEL_EXT:
+ case P_ID_INTEL_STD:
+ case P_ID_INTEL_PERFORMANCE:
+ return cfi_cmdset_0001(map, primary);
+#endif
+#ifdef CONFIG_MTD_CFI_AMDSTD
+ case P_ID_AMD_STD:
+ case P_ID_SST_OLD:
+ case P_ID_WINBOND:
+ return cfi_cmdset_0002(map, primary);
+#endif
+#ifdef CONFIG_MTD_CFI_STAA
+ case P_ID_ST_ADV:
+ return cfi_cmdset_0020(map, primary);
+#endif
+ default:
+ return cfi_cmdset_unknown(map, primary);
+ }
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("Helper routines for flash chip probe code");
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
new file mode 100644
index 0000000000..23c32fe584
--- /dev/null
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -0,0 +1,2316 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ Common Flash Interface probe code.
+ (C) 2000 Red Hat.
+ See JEDEC (http://www.jedec.org/) standard JESD21C (section 3.5)
+ for the standard this probe goes back to.
+
+ Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
+*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/gen_probe.h>
+
+/* AMD */
+#define AM29DL800BB 0x22CB
+#define AM29DL800BT 0x224A
+
+#define AM29F800BB 0x2258
+#define AM29F800BT 0x22D6
+#define AM29LV400BB 0x22BA
+#define AM29LV400BT 0x22B9
+#define AM29LV800BB 0x225B
+#define AM29LV800BT 0x22DA
+#define AM29LV160DT 0x22C4
+#define AM29LV160DB 0x2249
+#define AM29F017D 0x003D
+#define AM29F016D 0x00AD
+#define AM29F080 0x00D5
+#define AM29F040 0x00A4
+#define AM29LV040B 0x004F
+#define AM29F032B 0x0041
+#define AM29F002T 0x00B0
+#define AM29SL800DB 0x226B
+#define AM29SL800DT 0x22EA
+
+/* Atmel */
+#define AT49BV512 0x0003
+#define AT29LV512 0x003d
+#define AT49BV16X 0x00C0
+#define AT49BV16XT 0x00C2
+#define AT49BV32X 0x00C8
+#define AT49BV32XT 0x00C9
+
+/* Eon */
+#define EN29LV400AT 0x22B9
+#define EN29LV400AB 0x22BA
+#define EN29SL800BB 0x226B
+#define EN29SL800BT 0x22EA
+
+/* Fujitsu */
+#define MBM29F040C 0x00A4
+#define MBM29F800BA 0x2258
+#define MBM29LV650UE 0x22D7
+#define MBM29LV320TE 0x22F6
+#define MBM29LV320BE 0x22F9
+#define MBM29LV160TE 0x22C4
+#define MBM29LV160BE 0x2249
+#define MBM29LV800BA 0x225B
+#define MBM29LV800TA 0x22DA
+#define MBM29LV400TC 0x22B9
+#define MBM29LV400BC 0x22BA
+
+/* Hyundai */
+#define HY29F002T 0x00B0
+
+/* Intel */
+#define I28F004B3T 0x00d4
+#define I28F004B3B 0x00d5
+#define I28F400B3T 0x8894
+#define I28F400B3B 0x8895
+#define I28F008S5 0x00a6
+#define I28F016S5 0x00a0
+#define I28F008SA 0x00a2
+#define I28F008B3T 0x00d2
+#define I28F008B3B 0x00d3
+#define I28F800B3T 0x8892
+#define I28F800B3B 0x8893
+#define I28F016S3 0x00aa
+#define I28F016B3T 0x00d0
+#define I28F016B3B 0x00d1
+#define I28F160B3T 0x8890
+#define I28F160B3B 0x8891
+#define I28F320B3T 0x8896
+#define I28F320B3B 0x8897
+#define I28F640B3T 0x8898
+#define I28F640B3B 0x8899
+#define I28F640C3B 0x88CD
+#define I28F160F3T 0x88F3
+#define I28F160F3B 0x88F4
+#define I28F160C3T 0x88C2
+#define I28F160C3B 0x88C3
+#define I82802AB 0x00ad
+#define I82802AC 0x00ac
+
+/* Macronix */
+#define MX29LV040C 0x004F
+#define MX29LV160T 0x22C4
+#define MX29LV160B 0x2249
+#define MX29F040 0x00A4
+#define MX29F016 0x00AD
+#define MX29F002T 0x00B0
+#define MX29F004T 0x0045
+#define MX29F004B 0x0046
+
+/* NEC */
+#define UPD29F064115 0x221C
+
+/* PMC */
+#define PM49FL002 0x006D
+#define PM49FL004 0x006E
+#define PM49FL008 0x006A
+
+/* Sharp */
+#define LH28F640BF 0x00B0
+
+/* ST - www.st.com */
+#define M29F800AB 0x0058
+#define M29W800DT 0x22D7
+#define M29W800DB 0x225B
+#define M29W400DT 0x00EE
+#define M29W400DB 0x00EF
+#define M29W160DT 0x22C4
+#define M29W160DB 0x2249
+#define M29W040B 0x00E3
+#define M50FW040 0x002C
+#define M50FW080 0x002D
+#define M50FW016 0x002E
+#define M50LPW080 0x002F
+#define M50FLW080A 0x0080
+#define M50FLW080B 0x0081
+#define PSD4256G6V 0x00e9
+
+/* SST */
+#define SST29EE020 0x0010
+#define SST29LE020 0x0012
+#define SST29EE512 0x005d
+#define SST29LE512 0x003d
+#define SST39LF800 0x2781
+#define SST39LF160 0x2782
+#define SST39VF1601 0x234b
+#define SST39VF3201 0x235b
+#define SST39WF1601 0x274b
+#define SST39WF1602 0x274a
+#define SST39LF512 0x00D4
+#define SST39LF010 0x00D5
+#define SST39LF020 0x00D6
+#define SST39LF040 0x00D7
+#define SST39SF010A 0x00B5
+#define SST39SF020A 0x00B6
+#define SST39SF040 0x00B7
+#define SST49LF004B 0x0060
+#define SST49LF040B 0x0050
+#define SST49LF008A 0x005a
+#define SST49LF030A 0x001C
+#define SST49LF040A 0x0051
+#define SST49LF080A 0x005B
+#define SST36VF3203 0x7354
+
+/* Toshiba */
+#define TC58FVT160 0x00C2
+#define TC58FVB160 0x0043
+#define TC58FVT321 0x009A
+#define TC58FVB321 0x009C
+#define TC58FVT641 0x0093
+#define TC58FVB641 0x0095
+
+/* Winbond */
+#define W49V002A 0x00b0
+
+
+/*
+ * Unlock address sets for AMD command sets.
+ * Intel command sets use the MTD_UADDR_UNNECESSARY.
+ * Each identifier, except MTD_UADDR_UNNECESSARY, and
+ * MTD_UADDR_NO_SUPPORT must be defined below in unlock_addrs[].
+ * MTD_UADDR_NOT_SUPPORTED must be 0 so that structure
+ * initialization need not require initializing all of the
+ * unlock addresses for all bit widths.
+ */
+enum uaddr {
+ MTD_UADDR_NOT_SUPPORTED = 0, /* data width not supported */
+ MTD_UADDR_0x0555_0x02AA,
+ MTD_UADDR_0x0555_0x0AAA,
+ MTD_UADDR_0x5555_0x2AAA,
+ MTD_UADDR_0x0AAA_0x0554,
+ MTD_UADDR_0x0AAA_0x0555,
+ MTD_UADDR_0xAAAA_0x5555,
+ MTD_UADDR_DONT_CARE, /* Requires an arbitrary address */
+ MTD_UADDR_UNNECESSARY, /* Does not require any address */
+};
+
+
+struct unlock_addr {
+ uint32_t addr1;
+ uint32_t addr2;
+};
+
+
+/*
+ * I don't like the fact that the first entry in unlock_addrs[]
+ * exists, but is for MTD_UADDR_NOT_SUPPORTED - and, therefore,
+ * should not be used. The problem is that structures with
+ * initializers have extra fields initialized to 0. It is _very_
+ * desirable to have the unlock address entries for unsupported
+ * data widths automatically initialized - that means that
+ * MTD_UADDR_NOT_SUPPORTED must be 0 and the first entry here
+ * must go unused.
+ */
+static const struct unlock_addr unlock_addrs[] = {
+ [MTD_UADDR_NOT_SUPPORTED] = {
+ .addr1 = 0xffff,
+ .addr2 = 0xffff
+ },
+
+ [MTD_UADDR_0x0555_0x02AA] = {
+ .addr1 = 0x0555,
+ .addr2 = 0x02aa
+ },
+
+ [MTD_UADDR_0x0555_0x0AAA] = {
+ .addr1 = 0x0555,
+ .addr2 = 0x0aaa
+ },
+
+ [MTD_UADDR_0x5555_0x2AAA] = {
+ .addr1 = 0x5555,
+ .addr2 = 0x2aaa
+ },
+
+ [MTD_UADDR_0x0AAA_0x0554] = {
+ .addr1 = 0x0AAA,
+ .addr2 = 0x0554
+ },
+
+ [MTD_UADDR_0x0AAA_0x0555] = {
+ .addr1 = 0x0AAA,
+ .addr2 = 0x0555
+ },
+
+ [MTD_UADDR_0xAAAA_0x5555] = {
+ .addr1 = 0xaaaa,
+ .addr2 = 0x5555
+ },
+
+ [MTD_UADDR_DONT_CARE] = {
+ .addr1 = 0x0000, /* Doesn't matter which address */
+ .addr2 = 0x0000 /* is used - must be last entry */
+ },
+
+ [MTD_UADDR_UNNECESSARY] = {
+ .addr1 = 0x0000,
+ .addr2 = 0x0000
+ }
+};
+
+struct amd_flash_info {
+ const char *name;
+ const uint16_t mfr_id;
+ const uint16_t dev_id;
+ const uint8_t dev_size;
+ const uint8_t nr_regions;
+ const uint16_t cmd_set;
+ const uint32_t regions[6];
+ const uint8_t devtypes; /* Bitmask for x8, x16 etc. */
+ const uint8_t uaddr; /* unlock addrs for 8, 16, 32, 64 */
+};
+
+#define ERASEINFO(size,blocks) (size<<8)|(blocks-1)
+
+#define SIZE_64KiB 16
+#define SIZE_128KiB 17
+#define SIZE_256KiB 18
+#define SIZE_512KiB 19
+#define SIZE_1MiB 20
+#define SIZE_2MiB 21
+#define SIZE_4MiB 22
+#define SIZE_8MiB 23
+
+
+/*
+ * Please keep this list ordered by manufacturer!
+ * Fortunately, the list isn't searched often and so a
+ * slow, linear search isn't so bad.
+ */
+static const struct amd_flash_info jedec_table[] = {
+ {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29F032B,
+ .name = "AMD AM29F032B",
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,64)
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29LV160DT,
+ .name = "AMD AM29LV160DT",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,31),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29LV160DB,
+ .name = "AMD AM29LV160DB",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,31)
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29LV400BB,
+ .name = "AMD AM29LV400BB",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,7)
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29LV400BT,
+ .name = "AMD AM29LV400BT",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,7),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29LV800BB,
+ .name = "AMD AM29LV800BB",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,15),
+ }
+ }, {
+/* add DL */
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29DL800BB,
+ .name = "AMD AM29DL800BB",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 6,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,4),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x10000,14)
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29DL800BT,
+ .name = "AMD AM29DL800BT",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 6,
+ .regions = {
+ ERASEINFO(0x10000,14),
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,4),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29F800BB,
+ .name = "AMD AM29F800BB",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,15),
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29LV800BT,
+ .name = "AMD AM29LV800BT",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,15),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29F800BT,
+ .name = "AMD AM29F800BT",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,15),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29F017D,
+ .name = "AMD AM29F017D",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_DONT_CARE,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,32),
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29F016D,
+ .name = "AMD AM29F016D",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,32),
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29F080,
+ .name = "AMD AM29F080",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,16),
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29F040,
+ .name = "AMD AM29F040",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29LV040B,
+ .name = "AMD AM29LV040B",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29F002T,
+ .name = "AMD AM29F002T",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,3),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1),
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29SL800DT,
+ .name = "AMD AM29SL800DT",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,15),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1),
+ }
+ }, {
+ .mfr_id = CFI_MFR_AMD,
+ .dev_id = AM29SL800DB,
+ .name = "AMD AM29SL800DB",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,15),
+ }
+ }, {
+ .mfr_id = CFI_MFR_ATMEL,
+ .dev_id = AT49BV512,
+ .name = "Atmel AT49BV512",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_64KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_ATMEL,
+ .dev_id = AT29LV512,
+ .name = "Atmel AT29LV512",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_64KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x80,256),
+ ERASEINFO(0x80,256)
+ }
+ }, {
+ .mfr_id = CFI_MFR_ATMEL,
+ .dev_id = AT49BV16X,
+ .name = "Atmel AT49BV16X",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000,8),
+ ERASEINFO(0x10000,31)
+ }
+ }, {
+ .mfr_id = CFI_MFR_ATMEL,
+ .dev_id = AT49BV16XT,
+ .name = "Atmel AT49BV16XT",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000,31),
+ ERASEINFO(0x02000,8)
+ }
+ }, {
+ .mfr_id = CFI_MFR_ATMEL,
+ .dev_id = AT49BV32X,
+ .name = "Atmel AT49BV32X",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000,8),
+ ERASEINFO(0x10000,63)
+ }
+ }, {
+ .mfr_id = CFI_MFR_ATMEL,
+ .dev_id = AT49BV32XT,
+ .name = "Atmel AT49BV32XT",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000,63),
+ ERASEINFO(0x02000,8)
+ }
+ }, {
+ .mfr_id = CFI_MFR_EON,
+ .dev_id = EN29LV400AT,
+ .name = "Eon EN29LV400AT",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,7),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1),
+ }
+ }, {
+ .mfr_id = CFI_MFR_EON,
+ .dev_id = EN29LV400AB,
+ .name = "Eon EN29LV400AB",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,7),
+ }
+ }, {
+ .mfr_id = CFI_MFR_EON,
+ .dev_id = EN29SL800BT,
+ .name = "Eon EN29SL800BT",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,15),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1),
+ }
+ }, {
+ .mfr_id = CFI_MFR_EON,
+ .dev_id = EN29SL800BB,
+ .name = "Eon EN29SL800BB",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,15),
+ }
+ }, {
+ .mfr_id = CFI_MFR_FUJITSU,
+ .dev_id = MBM29F040C,
+ .name = "Fujitsu MBM29F040C",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,8)
+ }
+ }, {
+ .mfr_id = CFI_MFR_FUJITSU,
+ .dev_id = MBM29F800BA,
+ .name = "Fujitsu MBM29F800BA",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,15),
+ }
+ }, {
+ .mfr_id = CFI_MFR_FUJITSU,
+ .dev_id = MBM29LV650UE,
+ .name = "Fujitsu MBM29LV650UE",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_DONT_CARE,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,128)
+ }
+ }, {
+ .mfr_id = CFI_MFR_FUJITSU,
+ .dev_id = MBM29LV320TE,
+ .name = "Fujitsu MBM29LV320TE",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000,63),
+ ERASEINFO(0x02000,8)
+ }
+ }, {
+ .mfr_id = CFI_MFR_FUJITSU,
+ .dev_id = MBM29LV320BE,
+ .name = "Fujitsu MBM29LV320BE",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000,8),
+ ERASEINFO(0x10000,63)
+ }
+ }, {
+ .mfr_id = CFI_MFR_FUJITSU,
+ .dev_id = MBM29LV160TE,
+ .name = "Fujitsu MBM29LV160TE",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,31),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_FUJITSU,
+ .dev_id = MBM29LV160BE,
+ .name = "Fujitsu MBM29LV160BE",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,31)
+ }
+ }, {
+ .mfr_id = CFI_MFR_FUJITSU,
+ .dev_id = MBM29LV800BA,
+ .name = "Fujitsu MBM29LV800BA",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,15)
+ }
+ }, {
+ .mfr_id = CFI_MFR_FUJITSU,
+ .dev_id = MBM29LV800TA,
+ .name = "Fujitsu MBM29LV800TA",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,15),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_FUJITSU,
+ .dev_id = MBM29LV400BC,
+ .name = "Fujitsu MBM29LV400BC",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,7)
+ }
+ }, {
+ .mfr_id = CFI_MFR_FUJITSU,
+ .dev_id = MBM29LV400TC,
+ .name = "Fujitsu MBM29LV400TC",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,7),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_HYUNDAI,
+ .dev_id = HY29F002T,
+ .name = "Hyundai HY29F002T",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,3),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F004B3B,
+ .name = "Intel 28F004B3B",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000, 8),
+ ERASEINFO(0x10000, 7),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F004B3T,
+ .name = "Intel 28F004B3T",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000, 7),
+ ERASEINFO(0x02000, 8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F400B3B,
+ .name = "Intel 28F400B3B",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000, 8),
+ ERASEINFO(0x10000, 7),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F400B3T,
+ .name = "Intel 28F400B3T",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000, 7),
+ ERASEINFO(0x02000, 8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F008B3B,
+ .name = "Intel 28F008B3B",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000, 8),
+ ERASEINFO(0x10000, 15),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F008B3T,
+ .name = "Intel 28F008B3T",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000, 15),
+ ERASEINFO(0x02000, 8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F008S5,
+ .name = "Intel 28F008S5",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,16),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F016S5,
+ .name = "Intel 28F016S5",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,32),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F008SA,
+ .name = "Intel 28F008SA",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000, 16),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F800B3B,
+ .name = "Intel 28F800B3B",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000, 8),
+ ERASEINFO(0x10000, 15),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F800B3T,
+ .name = "Intel 28F800B3T",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000, 15),
+ ERASEINFO(0x02000, 8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F016B3B,
+ .name = "Intel 28F016B3B",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000, 8),
+ ERASEINFO(0x10000, 31),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F016S3,
+ .name = "Intel I28F016S3",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000, 32),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F016B3T,
+ .name = "Intel 28F016B3T",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000, 31),
+ ERASEINFO(0x02000, 8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F160B3B,
+ .name = "Intel 28F160B3B",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000, 8),
+ ERASEINFO(0x10000, 31),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F160B3T,
+ .name = "Intel 28F160B3T",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000, 31),
+ ERASEINFO(0x02000, 8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F320B3B,
+ .name = "Intel 28F320B3B",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000, 8),
+ ERASEINFO(0x10000, 63),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F320B3T,
+ .name = "Intel 28F320B3T",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000, 63),
+ ERASEINFO(0x02000, 8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F640B3B,
+ .name = "Intel 28F640B3B",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000, 8),
+ ERASEINFO(0x10000, 127),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F640B3T,
+ .name = "Intel 28F640B3T",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000, 127),
+ ERASEINFO(0x02000, 8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I28F640C3B,
+ .name = "Intel 28F640C3B",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000, 8),
+ ERASEINFO(0x10000, 127),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I82802AB,
+ .name = "Intel 82802AB",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_INTEL,
+ .dev_id = I82802AC,
+ .name = "Intel 82802AC",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,16),
+ }
+ }, {
+ .mfr_id = CFI_MFR_MACRONIX,
+ .dev_id = MX29LV040C,
+ .name = "Macronix MX29LV040C",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_MACRONIX,
+ .dev_id = MX29LV160T,
+ .name = "MXIC MX29LV160T",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,31),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_NEC,
+ .dev_id = UPD29F064115,
+ .name = "NEC uPD29F064115",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_0xAAAA_0x5555,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 3,
+ .regions = {
+ ERASEINFO(0x2000,8),
+ ERASEINFO(0x10000,126),
+ ERASEINFO(0x2000,8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_MACRONIX,
+ .dev_id = MX29LV160B,
+ .name = "MXIC MX29LV160B",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,31)
+ }
+ }, {
+ .mfr_id = CFI_MFR_MACRONIX,
+ .dev_id = MX29F040,
+ .name = "Macronix MX29F040",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_MACRONIX,
+ .dev_id = MX29F016,
+ .name = "Macronix MX29F016",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,32),
+ }
+ }, {
+ .mfr_id = CFI_MFR_MACRONIX,
+ .dev_id = MX29F004T,
+ .name = "Macronix MX29F004T",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,7),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1),
+ }
+ }, {
+ .mfr_id = CFI_MFR_MACRONIX,
+ .dev_id = MX29F004B,
+ .name = "Macronix MX29F004B",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,7),
+ }
+ }, {
+ .mfr_id = CFI_MFR_MACRONIX,
+ .dev_id = MX29F002T,
+ .name = "Macronix MX29F002T",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,3),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1),
+ }
+ }, {
+ .mfr_id = CFI_MFR_PMC,
+ .dev_id = PM49FL002,
+ .name = "PMC Pm49FL002",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO( 0x01000, 64 )
+ }
+ }, {
+ .mfr_id = CFI_MFR_PMC,
+ .dev_id = PM49FL004,
+ .name = "PMC Pm49FL004",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO( 0x01000, 128 )
+ }
+ }, {
+ .mfr_id = CFI_MFR_PMC,
+ .dev_id = PM49FL008,
+ .name = "PMC Pm49FL008",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO( 0x01000, 256 )
+ }
+ }, {
+ .mfr_id = CFI_MFR_SHARP,
+ .dev_id = LH28F640BF,
+ .name = "LH28F640BF",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000, 127),
+ ERASEINFO(0x02000, 8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST39LF512,
+ .name = "SST 39LF512",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_64KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,16),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST39LF010,
+ .name = "SST 39LF010",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_128KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,32),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST29EE020,
+ .name = "SST 29EE020",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_SST_PAGE,
+ .nr_regions = 1,
+ .regions = {ERASEINFO(0x01000,64),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST29LE020,
+ .name = "SST 29LE020",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_SST_PAGE,
+ .nr_regions = 1,
+ .regions = {ERASEINFO(0x01000,64),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST39LF020,
+ .name = "SST 39LF020",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,64),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST39LF040,
+ .name = "SST 39LF040",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,128),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST39SF010A,
+ .name = "SST 39SF010A",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_128KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,32),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST39SF020A,
+ .name = "SST 39SF020A",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,64),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST39SF040,
+ .name = "SST 39SF040",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,128),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST49LF040B,
+ .name = "SST 49LF040B",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,128),
+ }
+ }, {
+
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST49LF004B,
+ .name = "SST 49LF004B",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,128),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST49LF008A,
+ .name = "SST 49LF008A",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,256),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST49LF030A,
+ .name = "SST 49LF030A",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,96),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST49LF040A,
+ .name = "SST 49LF040A",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,128),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST49LF080A,
+ .name = "SST 49LF080A",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,256),
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST, /* should be CFI */
+ .dev_id = SST39LF160,
+ .name = "SST 39LF160",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_0xAAAA_0x5555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x1000,256),
+ ERASEINFO(0x1000,256)
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST, /* should be CFI */
+ .dev_id = SST39VF1601,
+ .name = "SST 39VF1601",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_0xAAAA_0x5555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x1000,256),
+ ERASEINFO(0x1000,256)
+ }
+ }, {
+ /* CFI is broken: reports AMD_STD, but needs custom uaddr */
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST39WF1601,
+ .name = "SST 39WF1601",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_0xAAAA_0x5555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x1000,256),
+ ERASEINFO(0x1000,256)
+ }
+ }, {
+ /* CFI is broken: reports AMD_STD, but needs custom uaddr */
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST39WF1602,
+ .name = "SST 39WF1602",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_0xAAAA_0x5555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x1000,256),
+ ERASEINFO(0x1000,256)
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST, /* should be CFI */
+ .dev_id = SST39VF3201,
+ .name = "SST 39VF3201",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_0xAAAA_0x5555,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x1000,256),
+ ERASEINFO(0x1000,256),
+ ERASEINFO(0x1000,256),
+ ERASEINFO(0x1000,256)
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST36VF3203,
+ .name = "SST 36VF3203",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,64),
+ }
+ }, {
+ .mfr_id = CFI_MFR_ST,
+ .dev_id = M29F800AB,
+ .name = "ST M29F800AB",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,15),
+ }
+ }, {
+ .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
+ .dev_id = M29W800DT,
+ .name = "ST M29W800DT",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,15),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
+ .dev_id = M29W800DB,
+ .name = "ST M29W800DB",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,15)
+ }
+ }, {
+ .mfr_id = CFI_MFR_ST,
+ .dev_id = M29W400DT,
+ .name = "ST M29W400DT",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,7),
+ ERASEINFO(0x02000,1),
+ ERASEINFO(0x08000,2),
+ ERASEINFO(0x10000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_ST,
+ .dev_id = M29W400DB,
+ .name = "ST M29W400DB",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,7)
+ }
+ }, {
+ .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
+ .dev_id = M29W160DT,
+ .name = "ST M29W160DT",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,31),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
+ .dev_id = M29W160DB,
+ .name = "ST M29W160DB",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,31)
+ }
+ }, {
+ .mfr_id = CFI_MFR_ST,
+ .dev_id = M29W040B,
+ .name = "ST M29W040B",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_ST,
+ .dev_id = M50FW040,
+ .name = "ST M50FW040",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,8),
+ }
+ }, {
+ .mfr_id = CFI_MFR_ST,
+ .dev_id = M50FW080,
+ .name = "ST M50FW080",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,16),
+ }
+ }, {
+ .mfr_id = CFI_MFR_ST,
+ .dev_id = M50FW016,
+ .name = "ST M50FW016",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,32),
+ }
+ }, {
+ .mfr_id = CFI_MFR_ST,
+ .dev_id = M50LPW080,
+ .name = "ST M50LPW080",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,16),
+ },
+ }, {
+ .mfr_id = CFI_MFR_ST,
+ .dev_id = M50FLW080A,
+ .name = "ST M50FLW080A",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x1000,16),
+ ERASEINFO(0x10000,13),
+ ERASEINFO(0x1000,16),
+ ERASEINFO(0x1000,16),
+ }
+ }, {
+ .mfr_id = CFI_MFR_ST,
+ .dev_id = M50FLW080B,
+ .name = "ST M50FLW080B",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x1000,16),
+ ERASEINFO(0x1000,16),
+ ERASEINFO(0x10000,13),
+ ERASEINFO(0x1000,16),
+ }
+ }, {
+ .mfr_id = 0xff00 | CFI_MFR_ST,
+ .dev_id = 0xff00 | PSD4256G6V,
+ .name = "ST PSD4256G6V",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_0x0AAA_0x0554,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,16),
+ }
+ }, {
+ .mfr_id = CFI_MFR_TOSHIBA,
+ .dev_id = TC58FVT160,
+ .name = "Toshiba TC58FVT160",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000,31),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = CFI_MFR_TOSHIBA,
+ .dev_id = TC58FVB160,
+ .name = "Toshiba TC58FVB160",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,31)
+ }
+ }, {
+ .mfr_id = CFI_MFR_TOSHIBA,
+ .dev_id = TC58FVB321,
+ .name = "Toshiba TC58FVB321",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000,8),
+ ERASEINFO(0x10000,63)
+ }
+ }, {
+ .mfr_id = CFI_MFR_TOSHIBA,
+ .dev_id = TC58FVT321,
+ .name = "Toshiba TC58FVT321",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000,63),
+ ERASEINFO(0x02000,8)
+ }
+ }, {
+ .mfr_id = CFI_MFR_TOSHIBA,
+ .dev_id = TC58FVB641,
+ .name = "Toshiba TC58FVB641",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000,8),
+ ERASEINFO(0x10000,127)
+ }
+ }, {
+ .mfr_id = CFI_MFR_TOSHIBA,
+ .dev_id = TC58FVT641,
+ .name = "Toshiba TC58FVT641",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x10000,127),
+ ERASEINFO(0x02000,8)
+ }
+ }, {
+ .mfr_id = CFI_MFR_WINBOND,
+ .dev_id = W49V002A,
+ .name = "Winbond W49V002A",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
+ .regions = {
+ ERASEINFO(0x10000, 3),
+ ERASEINFO(0x08000, 1),
+ ERASEINFO(0x02000, 2),
+ ERASEINFO(0x04000, 1),
+ }
+ }
+};
+
+static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base,
+ struct cfi_private *cfi)
+{
+ map_word result;
+ unsigned long mask;
+ int bank = 0;
+
+ /* According to JEDEC "Standard Manufacturer's Identification Code"
+ * (http://www.jedec.org/download/search/jep106W.pdf)
+ * several first banks can contain 0x7f instead of actual ID
+ */
+ do {
+ uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi);
+ mask = (1 << (cfi->device_type * 8)) - 1;
+ if (ofs >= map->size)
+ return 0;
+ result = map_read(map, base + ofs);
+ bank++;
+ } while ((result.x[0] & mask) == CFI_MFR_CONTINUATION);
+
+ return result.x[0] & mask;
+}
+
+static inline u32 jedec_read_id(struct map_info *map, uint32_t base,
+ struct cfi_private *cfi)
+{
+ map_word result;
+ unsigned long mask;
+ u32 ofs = cfi_build_cmd_addr(1, map, cfi);
+ mask = (1 << (cfi->device_type * 8)) -1;
+ result = map_read(map, base + ofs);
+ return result.x[0] & mask;
+}
+
+static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi)
+{
+ /* Reset */
+
+ /* after checking the datasheets for SST, MACRONIX and ATMEL
+ * (oh and incidentaly the jedec spec - 3.5.3.3) the reset
+ * sequence is *supposed* to be 0xaa at 0x5555, 0x55 at
+ * 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips
+ * as they will ignore the writes and don't care what address
+ * the F0 is written to */
+ if (cfi->addr_unlock1) {
+ pr_debug( "reset unlock called %x %x \n",
+ cfi->addr_unlock1,cfi->addr_unlock2);
+ cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
+ }
+
+ cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ /* Some misdesigned Intel chips do not respond for 0xF0 for a reset,
+ * so ensure we're in read mode. Send both the Intel and the AMD command
+ * for this. Intel uses 0xff for this, AMD uses 0xff for NOP, so
+ * this should be safe.
+ */
+ cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
+ /* FIXME - should have reset delay before continuing */
+}
+
+
+static int cfi_jedec_setup(struct map_info *map, struct cfi_private *cfi, int index)
+{
+ int i,num_erase_regions;
+ uint8_t uaddr;
+
+ if (!(jedec_table[index].devtypes & cfi->device_type)) {
+ pr_debug("Rejecting potential %s with incompatible %d-bit device type\n",
+ jedec_table[index].name, 4 * (1<<cfi->device_type));
+ return 0;
+ }
+
+ printk(KERN_INFO "Found: %s\n",jedec_table[index].name);
+
+ num_erase_regions = jedec_table[index].nr_regions;
+
+ cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
+ if (!cfi->cfiq) {
+ //xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
+ return 0;
+ }
+
+ memset(cfi->cfiq, 0, sizeof(struct cfi_ident));
+
+ cfi->cfiq->P_ID = jedec_table[index].cmd_set;
+ cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions;
+ cfi->cfiq->DevSize = jedec_table[index].dev_size;
+ cfi->cfi_mode = CFI_MODE_JEDEC;
+ cfi->sector_erase_cmd = CMD(0x30);
+
+ for (i=0; i<num_erase_regions; i++){
+ cfi->cfiq->EraseRegionInfo[i] = jedec_table[index].regions[i];
+ }
+ cfi->cmdset_priv = NULL;
+
+ /* This may be redundant for some cases, but it doesn't hurt */
+ cfi->mfr = jedec_table[index].mfr_id;
+ cfi->id = jedec_table[index].dev_id;
+
+ uaddr = jedec_table[index].uaddr;
+
+ /* The table has unlock addresses in _bytes_, and we try not to let
+ our brains explode when we see the datasheets talking about address
+ lines numbered from A-1 to A18. The CFI table has unlock addresses
+ in device-words according to the mode the device is connected in */
+ cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / cfi->device_type;
+ cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / cfi->device_type;
+
+ return 1; /* ok */
+}
+
+
+/*
+ * There is a BIG problem properly ID'ing the JEDEC device and guaranteeing
+ * the mapped address, unlock addresses, and proper chip ID. This function
+ * attempts to minimize errors. It is doubtfull that this probe will ever
+ * be perfect - consequently there should be some module parameters that
+ * could be manually specified to force the chip info.
+ */
+static inline int jedec_match( uint32_t base,
+ struct map_info *map,
+ struct cfi_private *cfi,
+ const struct amd_flash_info *finfo )
+{
+ int rc = 0; /* failure until all tests pass */
+ u32 mfr, id;
+ uint8_t uaddr;
+
+ /*
+ * The IDs must match. For X16 and X32 devices operating in
+ * a lower width ( X8 or X16 ), the device ID's are usually just
+ * the lower byte(s) of the larger device ID for wider mode. If
+ * a part is found that doesn't fit this assumption (device id for
+ * smaller width mode is completely unrealated to full-width mode)
+ * then the jedec_table[] will have to be augmented with the IDs
+ * for different widths.
+ */
+ switch (cfi->device_type) {
+ case CFI_DEVICETYPE_X8:
+ mfr = (uint8_t)finfo->mfr_id;
+ id = (uint8_t)finfo->dev_id;
+
+ /* bjd: it seems that if we do this, we can end up
+ * detecting 16bit flashes as an 8bit device, even though
+ * there aren't.
+ */
+ if (finfo->dev_id > 0xff) {
+ pr_debug("%s(): ID is not 8bit\n",
+ __func__);
+ goto match_done;
+ }
+ break;
+ case CFI_DEVICETYPE_X16:
+ mfr = (uint16_t)finfo->mfr_id;
+ id = (uint16_t)finfo->dev_id;
+ break;
+ case CFI_DEVICETYPE_X32:
+ mfr = (uint16_t)finfo->mfr_id;
+ id = (uint32_t)finfo->dev_id;
+ break;
+ default:
+ printk(KERN_WARNING
+ "MTD %s(): Unsupported device type %d\n",
+ __func__, cfi->device_type);
+ goto match_done;
+ }
+ if ( cfi->mfr != mfr || cfi->id != id ) {
+ goto match_done;
+ }
+
+ /* the part size must fit in the memory window */
+ pr_debug("MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n",
+ __func__, base, 1 << finfo->dev_size, base + (1 << finfo->dev_size) );
+ if ( base + cfi_interleave(cfi) * ( 1 << finfo->dev_size ) > map->size ) {
+ pr_debug("MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n",
+ __func__, finfo->mfr_id, finfo->dev_id,
+ 1 << finfo->dev_size );
+ goto match_done;
+ }
+
+ if (! (finfo->devtypes & cfi->device_type))
+ goto match_done;
+
+ uaddr = finfo->uaddr;
+
+ pr_debug("MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n",
+ __func__, cfi->addr_unlock1, cfi->addr_unlock2 );
+ if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr
+ && ( unlock_addrs[uaddr].addr1 / cfi->device_type != cfi->addr_unlock1 ||
+ unlock_addrs[uaddr].addr2 / cfi->device_type != cfi->addr_unlock2 ) ) {
+ pr_debug("MTD %s(): 0x%.4x 0x%.4x did not match\n",
+ __func__,
+ unlock_addrs[uaddr].addr1,
+ unlock_addrs[uaddr].addr2);
+ goto match_done;
+ }
+
+ /*
+ * Make sure the ID's disappear when the device is taken out of
+ * ID mode. The only time this should fail when it should succeed
+ * is when the ID's are written as data to the same
+ * addresses. For this rare and unfortunate case the chip
+ * cannot be probed correctly.
+ * FIXME - write a driver that takes all of the chip info as
+ * module parameters, doesn't probe but forces a load.
+ */
+ pr_debug("MTD %s(): check ID's disappear when not in ID mode\n",
+ __func__ );
+ jedec_reset( base, map, cfi );
+ mfr = jedec_read_mfr( map, base, cfi );
+ id = jedec_read_id( map, base, cfi );
+ if ( mfr == cfi->mfr && id == cfi->id ) {
+ pr_debug("MTD %s(): ID 0x%.2x:0x%.2x did not change after reset:\n"
+ "You might need to manually specify JEDEC parameters.\n",
+ __func__, cfi->mfr, cfi->id );
+ goto match_done;
+ }
+
+ /* all tests passed - mark as success */
+ rc = 1;
+
+ /*
+ * Put the device back in ID mode - only need to do this if we
+ * were truly frobbing a real device.
+ */
+ pr_debug("MTD %s(): return to ID mode\n", __func__ );
+ if (cfi->addr_unlock1) {
+ cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
+ }
+ cfi_send_gen_cmd(0x90, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ /* FIXME - should have a delay before continuing */
+
+ match_done:
+ return rc;
+}
+
+
+static int jedec_probe_chip(struct map_info *map, __u32 base,
+ unsigned long *chip_map, struct cfi_private *cfi)
+{
+ int i;
+ enum uaddr uaddr_idx = MTD_UADDR_NOT_SUPPORTED;
+ u32 probe_offset1, probe_offset2;
+
+ retry:
+ if (!cfi->numchips) {
+ uaddr_idx++;
+
+ if (MTD_UADDR_UNNECESSARY == uaddr_idx)
+ return 0;
+
+ cfi->addr_unlock1 = unlock_addrs[uaddr_idx].addr1 / cfi->device_type;
+ cfi->addr_unlock2 = unlock_addrs[uaddr_idx].addr2 / cfi->device_type;
+ }
+
+ /* Make certain we aren't probing past the end of map */
+ if (base >= map->size) {
+ printk(KERN_NOTICE
+ "Probe at base(0x%08x) past the end of the map(0x%08lx)\n",
+ base, map->size -1);
+ return 0;
+
+ }
+ /* Ensure the unlock addresses we try stay inside the map */
+ probe_offset1 = cfi_build_cmd_addr(cfi->addr_unlock1, map, cfi);
+ probe_offset2 = cfi_build_cmd_addr(cfi->addr_unlock2, map, cfi);
+ if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) ||
+ ((base + probe_offset2 + map_bankwidth(map)) >= map->size))
+ goto retry;
+
+ /* Reset */
+ jedec_reset(base, map, cfi);
+
+ /* Autoselect Mode */
+ if(cfi->addr_unlock1) {
+ cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
+ }
+ cfi_send_gen_cmd(0x90, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ /* FIXME - should have a delay before continuing */
+
+ if (!cfi->numchips) {
+ /* This is the first time we're called. Set up the CFI
+ stuff accordingly and return */
+
+ cfi->mfr = jedec_read_mfr(map, base, cfi);
+ cfi->id = jedec_read_id(map, base, cfi);
+ pr_debug("Search for id:(%02x %02x) interleave(%d) type(%d)\n",
+ cfi->mfr, cfi->id, cfi_interleave(cfi), cfi->device_type);
+ for (i = 0; i < ARRAY_SIZE(jedec_table); i++) {
+ if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) {
+ pr_debug("MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n",
+ __func__, cfi->mfr, cfi->id,
+ cfi->addr_unlock1, cfi->addr_unlock2 );
+ if (!cfi_jedec_setup(map, cfi, i))
+ return 0;
+ goto ok_out;
+ }
+ }
+ goto retry;
+ } else {
+ uint16_t mfr;
+ uint16_t id;
+
+ /* Make sure it is a chip of the same manufacturer and id */
+ mfr = jedec_read_mfr(map, base, cfi);
+ id = jedec_read_id(map, base, cfi);
+
+ if ((mfr != cfi->mfr) || (id != cfi->id)) {
+ printk(KERN_DEBUG "%s: Found different chip or no chip at all (mfr 0x%x, id 0x%x) at 0x%x\n",
+ map->name, mfr, id, base);
+ jedec_reset(base, map, cfi);
+ return 0;
+ }
+ }
+
+ /* Check each previous chip locations to see if it's an alias */
+ for (i=0; i < (base >> cfi->chipshift); i++) {
+ unsigned long start;
+ if(!test_bit(i, chip_map)) {
+ continue; /* Skip location; no valid chip at this address */
+ }
+ start = i << cfi->chipshift;
+ if (jedec_read_mfr(map, start, cfi) == cfi->mfr &&
+ jedec_read_id(map, start, cfi) == cfi->id) {
+ /* Eep. This chip also looks like it's in autoselect mode.
+ Is it an alias for the new one? */
+ jedec_reset(start, map, cfi);
+
+ /* If the device IDs go away, it's an alias */
+ if (jedec_read_mfr(map, base, cfi) != cfi->mfr ||
+ jedec_read_id(map, base, cfi) != cfi->id) {
+ printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
+ map->name, base, start);
+ return 0;
+ }
+
+ /* Yes, it's actually got the device IDs as data. Most
+ * unfortunate. Stick the new chip in read mode
+ * too and if it's the same, assume it's an alias. */
+ /* FIXME: Use other modes to do a proper check */
+ jedec_reset(base, map, cfi);
+ if (jedec_read_mfr(map, base, cfi) == cfi->mfr &&
+ jedec_read_id(map, base, cfi) == cfi->id) {
+ printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
+ map->name, base, start);
+ return 0;
+ }
+ }
+ }
+
+ /* OK, if we got to here, then none of the previous chips appear to
+ be aliases for the current one. */
+ set_bit((base >> cfi->chipshift), chip_map); /* Update chip map */
+ cfi->numchips++;
+
+ok_out:
+ /* Put it back into Read Mode */
+ jedec_reset(base, map, cfi);
+
+ printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
+ map->name, cfi_interleave(cfi), cfi->device_type*8, base,
+ map->bankwidth*8);
+
+ return 1;
+}
+
+static struct chip_probe jedec_chip_probe = {
+ .name = "JEDEC",
+ .probe_chip = jedec_probe_chip
+};
+
+static struct mtd_info *jedec_probe(struct map_info *map)
+{
+ /*
+ * Just use the generic probe stuff to call our CFI-specific
+ * chip_probe routine in all the possible permutations, etc.
+ */
+ return mtd_do_chip_probe(map, &jedec_chip_probe);
+}
+
+static struct mtd_chip_driver jedec_chipdrv = {
+ .probe = jedec_probe,
+ .name = "jedec_probe",
+ .module = THIS_MODULE
+};
+
+static int __init jedec_probe_init(void)
+{
+ register_mtd_chip_driver(&jedec_chipdrv);
+ return 0;
+}
+
+static void __exit jedec_probe_exit(void)
+{
+ unregister_mtd_chip_driver(&jedec_chipdrv);
+}
+
+module_init(jedec_probe_init);
+module_exit(jedec_probe_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Erwin Authried <eauth@softsys.co.at> et al.");
+MODULE_DESCRIPTION("Probe code for JEDEC-compliant flash chips");
diff --git a/drivers/mtd/chips/map_absent.c b/drivers/mtd/chips/map_absent.c
new file mode 100644
index 0000000000..fc68557f49
--- /dev/null
+++ b/drivers/mtd/chips/map_absent.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Common code to handle absent "placeholder" devices
+ * Copyright 2001 Resilience Corporation <ebrower@resilience.com>
+ *
+ * This map driver is used to allocate "placeholder" MTD
+ * devices on systems that have socketed/removable media.
+ * Use of this driver as a fallback preserves the expected
+ * registration of MTD device nodes regardless of probe outcome.
+ * A usage example is as follows:
+ *
+ * my_dev[i] = do_map_probe("cfi", &my_map[i]);
+ * if(NULL == my_dev[i]) {
+ * my_dev[i] = do_map_probe("map_absent", &my_map[i]);
+ * }
+ *
+ * Any device 'probed' with this driver will return -ENODEV
+ * upon open.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+
+static int map_absent_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int map_absent_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+static int map_absent_erase (struct mtd_info *, struct erase_info *);
+static void map_absent_sync (struct mtd_info *);
+static struct mtd_info *map_absent_probe(struct map_info *map);
+static void map_absent_destroy (struct mtd_info *);
+
+
+static struct mtd_chip_driver map_absent_chipdrv = {
+ .probe = map_absent_probe,
+ .destroy = map_absent_destroy,
+ .name = "map_absent",
+ .module = THIS_MODULE
+};
+
+static struct mtd_info *map_absent_probe(struct map_info *map)
+{
+ struct mtd_info *mtd;
+
+ mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
+ if (!mtd) {
+ return NULL;
+ }
+
+ map->fldrv = &map_absent_chipdrv;
+ mtd->priv = map;
+ mtd->name = map->name;
+ mtd->type = MTD_ABSENT;
+ mtd->size = map->size;
+ mtd->_erase = map_absent_erase;
+ mtd->_read = map_absent_read;
+ mtd->_write = map_absent_write;
+ mtd->_sync = map_absent_sync;
+ mtd->flags = 0;
+ mtd->erasesize = PAGE_SIZE;
+ mtd->writesize = 1;
+
+ __module_get(THIS_MODULE);
+ return mtd;
+}
+
+
+static int map_absent_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+ return -ENODEV;
+}
+
+static int map_absent_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
+{
+ return -ENODEV;
+}
+
+static int map_absent_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ return -ENODEV;
+}
+
+static void map_absent_sync(struct mtd_info *mtd)
+{
+ /* nop */
+}
+
+static void map_absent_destroy(struct mtd_info *mtd)
+{
+ /* nop */
+}
+
+static int __init map_absent_init(void)
+{
+ register_mtd_chip_driver(&map_absent_chipdrv);
+ return 0;
+}
+
+static void __exit map_absent_exit(void)
+{
+ unregister_mtd_chip_driver(&map_absent_chipdrv);
+}
+
+module_init(map_absent_init);
+module_exit(map_absent_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Resilience Corporation - Eric Brower <ebrower@resilience.com>");
+MODULE_DESCRIPTION("Placeholder MTD chip driver for 'absent' chips");
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c
new file mode 100644
index 0000000000..e8dd649692
--- /dev/null
+++ b/drivers/mtd/chips/map_ram.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common code to handle map devices which are simple RAM
+ * (C) 2000 Red Hat.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+
+
+static int mapram_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int mapram_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+static int mapram_erase (struct mtd_info *, struct erase_info *);
+static void mapram_nop (struct mtd_info *);
+static struct mtd_info *map_ram_probe(struct map_info *map);
+static int mapram_point (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys);
+static int mapram_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
+
+
+static struct mtd_chip_driver mapram_chipdrv = {
+ .probe = map_ram_probe,
+ .name = "map_ram",
+ .module = THIS_MODULE
+};
+
+static struct mtd_info *map_ram_probe(struct map_info *map)
+{
+ struct mtd_info *mtd;
+
+ /* Check the first byte is RAM */
+#if 0
+ map_write8(map, 0x55, 0);
+ if (map_read8(map, 0) != 0x55)
+ return NULL;
+
+ map_write8(map, 0xAA, 0);
+ if (map_read8(map, 0) != 0xAA)
+ return NULL;
+
+ /* Check the last byte is RAM */
+ map_write8(map, 0x55, map->size-1);
+ if (map_read8(map, map->size-1) != 0x55)
+ return NULL;
+
+ map_write8(map, 0xAA, map->size-1);
+ if (map_read8(map, map->size-1) != 0xAA)
+ return NULL;
+#endif
+ /* OK. It seems to be RAM. */
+
+ mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
+ if (!mtd)
+ return NULL;
+
+ map->fldrv = &mapram_chipdrv;
+ mtd->priv = map;
+ mtd->name = map->name;
+ mtd->type = MTD_RAM;
+ mtd->size = map->size;
+ mtd->_erase = mapram_erase;
+ mtd->_read = mapram_read;
+ mtd->_write = mapram_write;
+ mtd->_panic_write = mapram_write;
+ mtd->_point = mapram_point;
+ mtd->_sync = mapram_nop;
+ mtd->_unpoint = mapram_unpoint;
+ mtd->flags = MTD_CAP_RAM;
+ mtd->writesize = 1;
+
+ mtd->erasesize = PAGE_SIZE;
+ while(mtd->size & (mtd->erasesize - 1))
+ mtd->erasesize >>= 1;
+
+ __module_get(THIS_MODULE);
+ return mtd;
+}
+
+static int mapram_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys)
+{
+ struct map_info *map = mtd->priv;
+
+ if (!map->virt)
+ return -EINVAL;
+ *virt = map->virt + from;
+ if (phys)
+ *phys = map->phys + from;
+ *retlen = len;
+ return 0;
+}
+
+static int mapram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ return 0;
+}
+
+static int mapram_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+
+ map_copy_from(map, buf, from, len);
+ *retlen = len;
+ return 0;
+}
+
+static int mapram_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+
+ map_copy_to(map, to, buf, len);
+ *retlen = len;
+ return 0;
+}
+
+static int mapram_erase (struct mtd_info *mtd, struct erase_info *instr)
+{
+ /* Yeah, it's inefficient. Who cares? It's faster than a _real_
+ flash erase. */
+ struct map_info *map = mtd->priv;
+ map_word allff;
+ unsigned long i;
+
+ allff = map_word_ff(map);
+ for (i=0; i<instr->len; i += map_bankwidth(map))
+ map_write(map, allff, instr->addr + i);
+ return 0;
+}
+
+static void mapram_nop(struct mtd_info *mtd)
+{
+ /* Nothing to see here */
+}
+
+static int __init map_ram_init(void)
+{
+ register_mtd_chip_driver(&mapram_chipdrv);
+ return 0;
+}
+
+static void __exit map_ram_exit(void)
+{
+ unregister_mtd_chip_driver(&mapram_chipdrv);
+}
+
+module_init(map_ram_init);
+module_exit(map_ram_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("MTD chip driver for RAM chips");
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c
new file mode 100644
index 0000000000..0823b15aaa
--- /dev/null
+++ b/drivers/mtd/chips/map_rom.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common code to handle map devices which are simple ROM
+ * (C) 2000 Red Hat.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+
+static int maprom_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int maprom_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+static void maprom_nop (struct mtd_info *);
+static struct mtd_info *map_rom_probe(struct map_info *map);
+static int maprom_erase (struct mtd_info *mtd, struct erase_info *info);
+static int maprom_point (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys);
+static int maprom_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
+
+
+static struct mtd_chip_driver maprom_chipdrv = {
+ .probe = map_rom_probe,
+ .name = "map_rom",
+ .module = THIS_MODULE
+};
+
+static unsigned int default_erasesize(struct map_info *map)
+{
+ const __be32 *erase_size = NULL;
+
+ erase_size = of_get_property(map->device_node, "erase-size", NULL);
+
+ return !erase_size ? map->size : be32_to_cpu(*erase_size);
+}
+
+static struct mtd_info *map_rom_probe(struct map_info *map)
+{
+ struct mtd_info *mtd;
+
+ mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
+ if (!mtd)
+ return NULL;
+
+ map->fldrv = &maprom_chipdrv;
+ mtd->priv = map;
+ mtd->name = map->name;
+ mtd->type = MTD_ROM;
+ mtd->size = map->size;
+ mtd->_point = maprom_point;
+ mtd->_unpoint = maprom_unpoint;
+ mtd->_read = maprom_read;
+ mtd->_write = maprom_write;
+ mtd->_sync = maprom_nop;
+ mtd->_erase = maprom_erase;
+ mtd->flags = MTD_CAP_ROM;
+ mtd->erasesize = default_erasesize(map);
+ mtd->writesize = 1;
+ mtd->writebufsize = 1;
+
+ __module_get(THIS_MODULE);
+ return mtd;
+}
+
+
+static int maprom_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys)
+{
+ struct map_info *map = mtd->priv;
+
+ if (!map->virt)
+ return -EINVAL;
+ *virt = map->virt + from;
+ if (phys)
+ *phys = map->phys + from;
+ *retlen = len;
+ return 0;
+}
+
+static int maprom_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ return 0;
+}
+
+static int maprom_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+
+ map_copy_from(map, buf, from, len);
+ *retlen = len;
+ return 0;
+}
+
+static void maprom_nop(struct mtd_info *mtd)
+{
+ /* Nothing to see here */
+}
+
+static int maprom_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
+{
+ return -EROFS;
+}
+
+static int maprom_erase (struct mtd_info *mtd, struct erase_info *info)
+{
+ /* We do our best 8) */
+ return -EROFS;
+}
+
+static int __init map_rom_init(void)
+{
+ register_mtd_chip_driver(&maprom_chipdrv);
+ return 0;
+}
+
+static void __exit map_rom_exit(void)
+{
+ unregister_mtd_chip_driver(&maprom_chipdrv);
+}
+
+module_init(map_rom_init);
+module_exit(map_rom_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("MTD chip driver for ROM chips");
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
new file mode 100644
index 0000000000..ff2f9e55ef
--- /dev/null
+++ b/drivers/mtd/devices/Kconfig
@@ -0,0 +1,216 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "Self-contained MTD device drivers"
+ depends on MTD!=n
+ depends on HAS_IOMEM
+
+config MTD_PMC551
+ tristate "Ramix PMC551 PCI Mezzanine RAM card support"
+ depends on PCI
+ help
+ This provides a MTD device driver for the Ramix PMC551 RAM PCI card
+ from Ramix Inc. <http://www.ramix.com/products/memory/pmc551.html>.
+ These devices come in memory configurations from 32M - 1G. If you
+ have one, you probably want to enable this.
+
+ If this driver is compiled as a module you get the ability to select
+ the size of the aperture window pointing into the devices memory.
+ What this means is that if you have a 1G card, normally the kernel
+ will use a 1G memory map as its view of the device. As a module,
+ you can select a 1M window into the memory and the driver will
+ "slide" the window around the PMC551's memory. This was
+ particularly useful on the 2.2 kernels on PPC architectures as there
+ was limited kernel space to deal with.
+
+config MTD_PMC551_BUGFIX
+ bool "PMC551 256M DRAM Bugfix"
+ depends on MTD_PMC551
+ help
+ Some of Ramix's PMC551 boards with 256M configurations have invalid
+ column and row mux values. This option will fix them, but will
+ break other memory configurations. If unsure say N.
+
+config MTD_PMC551_DEBUG
+ bool "PMC551 Debugging"
+ depends on MTD_PMC551
+ help
+ This option makes the PMC551 more verbose during its operation and
+ is only really useful if you are developing on this driver or
+ suspect a possible hardware or driver bug. If unsure say N.
+
+config MTD_MS02NV
+ tristate "DEC MS02-NV NVRAM module support"
+ depends on MACH_DECSTATION
+ help
+ This is an MTD driver for the DEC's MS02-NV (54-20948-01) battery
+ backed-up NVRAM module. The module was originally meant as an NFS
+ accelerator. Say Y here if you have a DECstation 5000/2x0 or a
+ DECsystem 5900 equipped with such a module.
+
+ If you want to compile this driver as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want),
+ say M here and read <file:Documentation/kbuild/modules.rst>.
+ The module will be called ms02-nv.
+
+config MTD_DATAFLASH
+ tristate "Support for AT45xxx DataFlash"
+ depends on SPI_MASTER
+ help
+ This enables access to AT45xxx DataFlash chips, using SPI.
+ Sometimes DataFlash chips are packaged inside MMC-format
+ cards; at this writing, the MMC stack won't handle those.
+
+config MTD_DATAFLASH_WRITE_VERIFY
+ bool "Verify DataFlash page writes"
+ depends on MTD_DATAFLASH
+ help
+ This adds an extra check when data is written to the flash.
+ It may help if you are verifying chip setup (timings etc) on
+ your board. There is a rare possibility that even though the
+ device thinks the write was successful, a bit could have been
+ flipped accidentally due to device wear or something else.
+
+config MTD_DATAFLASH_OTP
+ bool "DataFlash OTP support (Security Register)"
+ depends on MTD_DATAFLASH
+ help
+ Newer DataFlash chips (revisions C and D) support 128 bytes of
+ one-time-programmable (OTP) data. The first half may be written
+ (once) with up to 64 bytes of data, such as a serial number or
+ other key product data. The second half is programmed with a
+ unique-to-each-chip bit pattern at the factory.
+
+config MTD_MCHP23K256
+ tristate "Microchip 23K256 SRAM"
+ depends on SPI_MASTER
+ help
+ This enables access to Microchip 23K256 SRAM chips, using SPI.
+
+ Set up your spi devices with the right board-specific
+ platform data, or a device tree description if you want to
+ specify device partitioning
+
+config MTD_MCHP48L640
+ tristate "Microchip 48L640 EERAM"
+ depends on SPI_MASTER
+ help
+ This enables access to Microchip 48L640 EERAM chips, using SPI.
+
+config MTD_SPEAR_SMI
+ tristate "SPEAR MTD NOR Support through SMI controller"
+ depends on PLAT_SPEAR || COMPILE_TEST
+ default y
+ help
+ This enable SNOR support on SPEAR platforms using SMI controller
+
+config MTD_SST25L
+ tristate "Support SST25L (non JEDEC) SPI Flash chips"
+ depends on SPI_MASTER
+ help
+ This enables access to the non JEDEC SST25L SPI flash chips, used
+ for program and data storage.
+
+ Set up your spi devices with the right board-specific platform data,
+ if you want to specify device partitioning.
+
+config MTD_BCM47XXSFLASH
+ tristate "Support for serial flash on BCMA bus"
+ depends on BCMA_SFLASH && (MIPS || ARM)
+ help
+ BCMA bus can have various flash memories attached, they are
+ registered by bcma as platform devices. This enables driver for
+ serial flash memories.
+
+config MTD_SLRAM
+ tristate "Uncached system RAM"
+ help
+ If your CPU cannot cache all of the physical memory in your machine,
+ you can still use it for storage or swap by using this driver to
+ present it to the system as a Memory Technology Device.
+
+config MTD_PHRAM
+ tristate "Physical system RAM"
+ help
+ This is a re-implementation of the slram driver above.
+
+ Use this driver to access physical memory that the kernel proper
+ doesn't have access to, memory beyond the mem=xxx limit, nvram,
+ memory on the video card, etc...
+
+config MTD_MTDRAM
+ tristate "Test driver using RAM"
+ help
+ This enables a test MTD device driver which uses vmalloc() to
+ provide storage. You probably want to say 'N' unless you're
+ testing stuff.
+
+config MTDRAM_TOTAL_SIZE
+ int "MTDRAM device size in KiB"
+ depends on MTD_MTDRAM
+ default "4096"
+ help
+ This allows you to configure the total size of the MTD device
+ emulated by the MTDRAM driver. If the MTDRAM driver is built
+ as a module, it is also possible to specify this as a parameter when
+ loading the module.
+
+config MTDRAM_ERASE_SIZE
+ int "MTDRAM erase block size in KiB"
+ depends on MTD_MTDRAM
+ default "128"
+ help
+ This allows you to configure the size of the erase blocks in the
+ device emulated by the MTDRAM driver. If the MTDRAM driver is built
+ as a module, it is also possible to specify this as a parameter when
+ loading the module.
+
+config MTD_BLOCK2MTD
+ tristate "MTD using block device"
+ depends on BLOCK
+ help
+ This driver allows a block device to appear as an MTD. It would
+ generally be used in the following cases:
+
+ Using Compact Flash as an MTD, these usually present themselves to
+ the system as an ATA drive.
+ Testing MTD users (eg JFFS2) on large media and media that might
+ be removed during a write (using the floppy drive).
+
+config MTD_POWERNV_FLASH
+ tristate "powernv flash MTD driver"
+ depends on PPC_POWERNV
+ help
+ This provides an MTD device to access flash on powernv OPAL
+ platforms from Linux. This device abstracts away the
+ firmware interface for flash access.
+
+comment "Disk-On-Chip Device Drivers"
+
+config MTD_DOCG3
+ tristate "M-Systems Disk-On-Chip G3"
+ select BCH
+ select BCH_CONST_PARAMS if !MTD_NAND_ECC_SW_BCH
+ select BITREVERSE
+ help
+ This provides an MTD device driver for the M-Systems DiskOnChip
+ G3 devices.
+
+ The driver provides access to G3 DiskOnChip, distributed by
+ M-Systems and now Sandisk. The support is very experimental,
+ and doesn't give access to any write operations.
+
+config MTD_ST_SPI_FSM
+ tristate "ST Microelectronics SPI FSM Serial Flash Controller"
+ depends on ARCH_STI
+ help
+ This provides an MTD device driver for the ST Microelectronics
+ SPI Fast Sequence Mode (FSM) Serial Flash Controller and support
+ for a subset of connected Serial Flash devices.
+
+if MTD_DOCG3
+config BCH_CONST_M
+ default 14
+config BCH_CONST_T
+ default 4
+endif
+
+endmenu
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
new file mode 100644
index 0000000000..d11eb2b8b6
--- /dev/null
+++ b/drivers/mtd/devices/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# linux/drivers/mtd/devices/Makefile
+#
+
+obj-$(CONFIG_MTD_DOCG3) += docg3.o
+obj-$(CONFIG_MTD_SLRAM) += slram.o
+obj-$(CONFIG_MTD_PHRAM) += phram.o
+obj-$(CONFIG_MTD_PMC551) += pmc551.o
+obj-$(CONFIG_MTD_MS02NV) += ms02-nv.o
+obj-$(CONFIG_MTD_MTDRAM) += mtdram.o
+obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
+obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
+obj-$(CONFIG_MTD_MCHP23K256) += mchp23k256.o
+obj-$(CONFIG_MTD_MCHP48L640) += mchp48l640.o
+obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o
+obj-$(CONFIG_MTD_SST25L) += sst25l.o
+obj-$(CONFIG_MTD_BCM47XXSFLASH) += bcm47xxsflash.o
+obj-$(CONFIG_MTD_ST_SPI_FSM) += st_spi_fsm.o
+obj-$(CONFIG_MTD_POWERNV_FLASH) += powernv_flash.o
+
+
+CFLAGS_docg3.o += -I$(src)
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
new file mode 100644
index 0000000000..3af50db8b2
--- /dev/null
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/mtd/mtd.h>
+#include <linux/platform_device.h>
+#include <linux/bcma/bcma.h>
+
+#include "bcm47xxsflash.h"
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Serial flash driver for BCMA bus");
+
+static const char * const probes[] = { "bcm47xxpart", NULL };
+
+/**************************************************
+ * Various helpers
+ **************************************************/
+
+static void bcm47xxsflash_cmd(struct bcm47xxsflash *b47s, u32 opcode)
+{
+ int i;
+
+ b47s->cc_write(b47s, BCMA_CC_FLASHCTL, BCMA_CC_FLASHCTL_START | opcode);
+ for (i = 0; i < 1000; i++) {
+ if (!(b47s->cc_read(b47s, BCMA_CC_FLASHCTL) &
+ BCMA_CC_FLASHCTL_BUSY))
+ return;
+ cpu_relax();
+ }
+ pr_err("Control command failed (timeout)!\n");
+}
+
+static int bcm47xxsflash_poll(struct bcm47xxsflash *b47s, int timeout)
+{
+ unsigned long deadline = jiffies + timeout;
+
+ do {
+ switch (b47s->type) {
+ case BCM47XXSFLASH_TYPE_ST:
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_RDSR);
+ if (!(b47s->cc_read(b47s, BCMA_CC_FLASHDATA) &
+ SR_ST_WIP))
+ return 0;
+ break;
+ case BCM47XXSFLASH_TYPE_ATMEL:
+ bcm47xxsflash_cmd(b47s, OPCODE_AT_STATUS);
+ if (b47s->cc_read(b47s, BCMA_CC_FLASHDATA) &
+ SR_AT_READY)
+ return 0;
+ break;
+ }
+
+ cpu_relax();
+ udelay(1);
+ } while (!time_after_eq(jiffies, deadline));
+
+ pr_err("Timeout waiting for flash to be ready!\n");
+
+ return -EBUSY;
+}
+
+/**************************************************
+ * MTD ops
+ **************************************************/
+
+static int bcm47xxsflash_erase(struct mtd_info *mtd, struct erase_info *erase)
+{
+ struct bcm47xxsflash *b47s = mtd->priv;
+
+ switch (b47s->type) {
+ case BCM47XXSFLASH_TYPE_ST:
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_WREN);
+ b47s->cc_write(b47s, BCMA_CC_FLASHADDR, erase->addr);
+ /* Newer flashes have "sub-sectors" which can be erased
+ * independently with a new command: ST_SSE. The ST_SE command
+ * erases 64KB just as before.
+ */
+ if (b47s->blocksize < (64 * 1024))
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_SSE);
+ else
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_SE);
+ break;
+ case BCM47XXSFLASH_TYPE_ATMEL:
+ b47s->cc_write(b47s, BCMA_CC_FLASHADDR, erase->addr << 1);
+ bcm47xxsflash_cmd(b47s, OPCODE_AT_PAGE_ERASE);
+ break;
+ }
+
+ return bcm47xxsflash_poll(b47s, HZ);
+}
+
+static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct bcm47xxsflash *b47s = mtd->priv;
+ size_t orig_len = len;
+
+ /* Check address range */
+ if ((from + len) > mtd->size)
+ return -EINVAL;
+
+ /* Read as much as possible using fast MMIO window */
+ if (from < BCM47XXSFLASH_WINDOW_SZ) {
+ size_t memcpy_len;
+
+ memcpy_len = min(len, (size_t)(BCM47XXSFLASH_WINDOW_SZ - from));
+ memcpy_fromio(buf, b47s->window + from, memcpy_len);
+ from += memcpy_len;
+ len -= memcpy_len;
+ buf += memcpy_len;
+ }
+
+ /* Use indirect access for content out of the window */
+ for (; len; len--) {
+ b47s->cc_write(b47s, BCMA_CC_FLASHADDR, from++);
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_READ4B);
+ *buf++ = b47s->cc_read(b47s, BCMA_CC_FLASHDATA);
+ }
+
+ *retlen = orig_len;
+
+ return orig_len;
+}
+
+static int bcm47xxsflash_write_st(struct mtd_info *mtd, u32 offset, size_t len,
+ const u_char *buf)
+{
+ struct bcm47xxsflash *b47s = mtd->priv;
+ int written = 0;
+
+ /* Enable writes */
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_WREN);
+
+ /* Write first byte */
+ b47s->cc_write(b47s, BCMA_CC_FLASHADDR, offset);
+ b47s->cc_write(b47s, BCMA_CC_FLASHDATA, *buf++);
+
+ /* Program page */
+ if (b47s->bcma_cc->core->id.rev < 20) {
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_PP);
+ return 1; /* 1B written */
+ }
+
+ /* Program page and set CSA (on newer chips we can continue writing) */
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_CSA | OPCODE_ST_PP);
+ offset++;
+ len--;
+ written++;
+
+ while (len > 0) {
+ /* Page boundary, another function call is needed */
+ if ((offset & 0xFF) == 0)
+ break;
+
+ bcm47xxsflash_cmd(b47s, OPCODE_ST_CSA | *buf++);
+ offset++;
+ len--;
+ written++;
+ }
+
+ /* All done, drop CSA & poll */
+ b47s->cc_write(b47s, BCMA_CC_FLASHCTL, 0);
+ udelay(1);
+ if (bcm47xxsflash_poll(b47s, HZ / 10))
+ pr_err("Flash rejected dropping CSA\n");
+
+ return written;
+}
+
+static int bcm47xxsflash_write_at(struct mtd_info *mtd, u32 offset, size_t len,
+ const u_char *buf)
+{
+ struct bcm47xxsflash *b47s = mtd->priv;
+ u32 mask = b47s->blocksize - 1;
+ u32 page = (offset & ~mask) << 1;
+ u32 byte = offset & mask;
+ int written = 0;
+
+ /* If we don't overwrite whole page, read it to the buffer first */
+ if (byte || (len < b47s->blocksize)) {
+ int err;
+
+ b47s->cc_write(b47s, BCMA_CC_FLASHADDR, page);
+ bcm47xxsflash_cmd(b47s, OPCODE_AT_BUF1_LOAD);
+ /* 250 us for AT45DB321B */
+ err = bcm47xxsflash_poll(b47s, HZ / 1000);
+ if (err) {
+ pr_err("Timeout reading page 0x%X info buffer\n", page);
+ return err;
+ }
+ }
+
+ /* Change buffer content with our data */
+ while (len > 0) {
+ /* Page boundary, another function call is needed */
+ if (byte == b47s->blocksize)
+ break;
+
+ b47s->cc_write(b47s, BCMA_CC_FLASHADDR, byte++);
+ b47s->cc_write(b47s, BCMA_CC_FLASHDATA, *buf++);
+ bcm47xxsflash_cmd(b47s, OPCODE_AT_BUF1_WRITE);
+ len--;
+ written++;
+ }
+
+ /* Program page with the buffer content */
+ b47s->cc_write(b47s, BCMA_CC_FLASHADDR, page);
+ bcm47xxsflash_cmd(b47s, OPCODE_AT_BUF1_PROGRAM);
+
+ return written;
+}
+
+static int bcm47xxsflash_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct bcm47xxsflash *b47s = mtd->priv;
+ int written;
+
+ /* Writing functions can return without writing all passed data, for
+ * example when the hardware is too old or when we git page boundary.
+ */
+ while (len > 0) {
+ switch (b47s->type) {
+ case BCM47XXSFLASH_TYPE_ST:
+ written = bcm47xxsflash_write_st(mtd, to, len, buf);
+ break;
+ case BCM47XXSFLASH_TYPE_ATMEL:
+ written = bcm47xxsflash_write_at(mtd, to, len, buf);
+ break;
+ default:
+ BUG_ON(1);
+ }
+ if (written < 0) {
+ pr_err("Error writing at offset 0x%llX\n", to);
+ return written;
+ }
+ to += (loff_t)written;
+ len -= written;
+ *retlen += written;
+ buf += written;
+ }
+
+ return 0;
+}
+
+static void bcm47xxsflash_fill_mtd(struct bcm47xxsflash *b47s,
+ struct device *dev)
+{
+ struct mtd_info *mtd = &b47s->mtd;
+
+ mtd->priv = b47s;
+ mtd->dev.parent = dev;
+ mtd->name = "bcm47xxsflash";
+
+ mtd->type = MTD_NORFLASH;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->size = b47s->size;
+ mtd->erasesize = b47s->blocksize;
+ mtd->writesize = 1;
+ mtd->writebufsize = 1;
+
+ mtd->_erase = bcm47xxsflash_erase;
+ mtd->_read = bcm47xxsflash_read;
+ mtd->_write = bcm47xxsflash_write;
+}
+
+/**************************************************
+ * BCMA
+ **************************************************/
+
+static int bcm47xxsflash_bcma_cc_read(struct bcm47xxsflash *b47s, u16 offset)
+{
+ return bcma_cc_read32(b47s->bcma_cc, offset);
+}
+
+static void bcm47xxsflash_bcma_cc_write(struct bcm47xxsflash *b47s, u16 offset,
+ u32 value)
+{
+ bcma_cc_write32(b47s->bcma_cc, offset, value);
+}
+
+static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcma_sflash *sflash = dev_get_platdata(dev);
+ struct bcm47xxsflash *b47s;
+ struct resource *res;
+ int err;
+
+ b47s = devm_kzalloc(dev, sizeof(*b47s), GFP_KERNEL);
+ if (!b47s)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "invalid resource\n");
+ return -EINVAL;
+ }
+ if (!devm_request_mem_region(dev, res->start, resource_size(res),
+ res->name)) {
+ dev_err(dev, "can't request region for resource %pR\n", res);
+ return -EBUSY;
+ }
+
+ b47s->bcma_cc = container_of(sflash, struct bcma_drv_cc, sflash);
+ b47s->cc_read = bcm47xxsflash_bcma_cc_read;
+ b47s->cc_write = bcm47xxsflash_bcma_cc_write;
+
+ /*
+ * On old MIPS devices cache was magically invalidated when needed,
+ * allowing us to use cached access and gain some performance. Trying
+ * the same on ARM based BCM53573 results in flash corruptions, we need
+ * to use uncached access for it.
+ *
+ * It may be arch specific, but right now there is only 1 ARM SoC using
+ * this driver, so let's follow Broadcom's reference code and check
+ * ChipCommon revision.
+ */
+ if (b47s->bcma_cc->core->id.rev == 54)
+ b47s->window = ioremap(res->start, resource_size(res));
+ else
+ b47s->window = ioremap_cache(res->start, resource_size(res));
+ if (!b47s->window) {
+ dev_err(dev, "ioremap failed for resource %pR\n", res);
+ return -ENOMEM;
+ }
+
+ switch (b47s->bcma_cc->capabilities & BCMA_CC_CAP_FLASHT) {
+ case BCMA_CC_FLASHT_STSER:
+ b47s->type = BCM47XXSFLASH_TYPE_ST;
+ break;
+ case BCMA_CC_FLASHT_ATSER:
+ b47s->type = BCM47XXSFLASH_TYPE_ATMEL;
+ break;
+ }
+
+ b47s->blocksize = sflash->blocksize;
+ b47s->numblocks = sflash->numblocks;
+ b47s->size = sflash->size;
+ bcm47xxsflash_fill_mtd(b47s, &pdev->dev);
+
+ platform_set_drvdata(pdev, b47s);
+
+ err = mtd_device_parse_register(&b47s->mtd, probes, NULL, NULL, 0);
+ if (err) {
+ pr_err("Failed to register MTD device: %d\n", err);
+ iounmap(b47s->window);
+ return err;
+ }
+
+ if (bcm47xxsflash_poll(b47s, HZ / 10))
+ pr_warn("Serial flash busy\n");
+
+ return 0;
+}
+
+static int bcm47xxsflash_bcma_remove(struct platform_device *pdev)
+{
+ struct bcm47xxsflash *b47s = platform_get_drvdata(pdev);
+
+ mtd_device_unregister(&b47s->mtd);
+ iounmap(b47s->window);
+
+ return 0;
+}
+
+static struct platform_driver bcma_sflash_driver = {
+ .probe = bcm47xxsflash_bcma_probe,
+ .remove = bcm47xxsflash_bcma_remove,
+ .driver = {
+ .name = "bcma_sflash",
+ },
+};
+
+/**************************************************
+ * Init
+ **************************************************/
+
+module_platform_driver(bcma_sflash_driver);
diff --git a/drivers/mtd/devices/bcm47xxsflash.h b/drivers/mtd/devices/bcm47xxsflash.h
new file mode 100644
index 0000000000..fef0d5e42e
--- /dev/null
+++ b/drivers/mtd/devices/bcm47xxsflash.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BCM47XXSFLASH_H
+#define __BCM47XXSFLASH_H
+
+#include <linux/mtd/mtd.h>
+
+#define BCM47XXSFLASH_WINDOW_SZ SZ_16M
+
+/* Used for ST flashes only. */
+#define OPCODE_ST_WREN 0x0006 /* Write Enable */
+#define OPCODE_ST_WRDIS 0x0004 /* Write Disable */
+#define OPCODE_ST_RDSR 0x0105 /* Read Status Register */
+#define OPCODE_ST_WRSR 0x0101 /* Write Status Register */
+#define OPCODE_ST_READ 0x0303 /* Read Data Bytes */
+#define OPCODE_ST_PP 0x0302 /* Page Program */
+#define OPCODE_ST_SE 0x02d8 /* Sector Erase */
+#define OPCODE_ST_BE 0x00c7 /* Bulk Erase */
+#define OPCODE_ST_DP 0x00b9 /* Deep Power-down */
+#define OPCODE_ST_RES 0x03ab /* Read Electronic Signature */
+#define OPCODE_ST_CSA 0x1000 /* Keep chip select asserted */
+#define OPCODE_ST_SSE 0x0220 /* Sub-sector Erase */
+#define OPCODE_ST_READ4B 0x6313 /* Read Data Bytes in 4Byte addressing mode */
+
+/* Used for Atmel flashes only. */
+#define OPCODE_AT_READ 0x07e8
+#define OPCODE_AT_PAGE_READ 0x07d2
+#define OPCODE_AT_STATUS 0x01d7
+#define OPCODE_AT_BUF1_WRITE 0x0384
+#define OPCODE_AT_BUF2_WRITE 0x0387
+#define OPCODE_AT_BUF1_ERASE_PROGRAM 0x0283
+#define OPCODE_AT_BUF2_ERASE_PROGRAM 0x0286
+#define OPCODE_AT_BUF1_PROGRAM 0x0288
+#define OPCODE_AT_BUF2_PROGRAM 0x0289
+#define OPCODE_AT_PAGE_ERASE 0x0281
+#define OPCODE_AT_BLOCK_ERASE 0x0250
+#define OPCODE_AT_BUF1_WRITE_ERASE_PROGRAM 0x0382
+#define OPCODE_AT_BUF2_WRITE_ERASE_PROGRAM 0x0385
+#define OPCODE_AT_BUF1_LOAD 0x0253
+#define OPCODE_AT_BUF2_LOAD 0x0255
+#define OPCODE_AT_BUF1_COMPARE 0x0260
+#define OPCODE_AT_BUF2_COMPARE 0x0261
+#define OPCODE_AT_BUF1_REPROGRAM 0x0258
+#define OPCODE_AT_BUF2_REPROGRAM 0x0259
+
+/* Status register bits for ST flashes */
+#define SR_ST_WIP 0x01 /* Write In Progress */
+#define SR_ST_WEL 0x02 /* Write Enable Latch */
+#define SR_ST_BP_MASK 0x1c /* Block Protect */
+#define SR_ST_BP_SHIFT 2
+#define SR_ST_SRWD 0x80 /* Status Register Write Disable */
+
+/* Status register bits for Atmel flashes */
+#define SR_AT_READY 0x80
+#define SR_AT_MISMATCH 0x40
+#define SR_AT_ID_MASK 0x38
+#define SR_AT_ID_SHIFT 3
+
+struct bcma_drv_cc;
+
+enum bcm47xxsflash_type {
+ BCM47XXSFLASH_TYPE_ATMEL,
+ BCM47XXSFLASH_TYPE_ST,
+};
+
+struct bcm47xxsflash {
+ struct bcma_drv_cc *bcma_cc;
+ int (*cc_read)(struct bcm47xxsflash *b47s, u16 offset);
+ void (*cc_write)(struct bcm47xxsflash *b47s, u16 offset, u32 value);
+
+ enum bcm47xxsflash_type type;
+
+ void __iomem *window;
+
+ u32 blocksize;
+ u16 numblocks;
+ u32 size;
+
+ struct mtd_info mtd;
+};
+
+#endif /* BCM47XXSFLASH */
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
new file mode 100644
index 0000000000..be106dc20f
--- /dev/null
+++ b/drivers/mtd/devices/block2mtd.c
@@ -0,0 +1,527 @@
+/*
+ * block2mtd.c - create an mtd from a block device
+ *
+ * Copyright (C) 2001,2002 Simon Evans <spse@secret.org.uk>
+ * Copyright (C) 2004-2006 Joern Engel <joern@wh.fh-wedel.de>
+ *
+ * Licence: GPL
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+/*
+ * When the first attempt at device initialization fails, we may need to
+ * wait a little bit and retry. This timeout, by default 3 seconds, gives
+ * device time to start up. Required on BCM2708 and a few other chipsets.
+ */
+#define MTD_DEFAULT_TIMEOUT 3
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/backing-dev.h>
+#include <linux/bio.h>
+#include <linux/pagemap.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mutex.h>
+#include <linux/mount.h>
+#include <linux/slab.h>
+#include <linux/major.h>
+
+/* Maximum number of comma-separated items in the 'block2mtd=' parameter */
+#define BLOCK2MTD_PARAM_MAX_COUNT 3
+
+/* Info for the block device */
+struct block2mtd_dev {
+ struct list_head list;
+ struct block_device *blkdev;
+ struct mtd_info mtd;
+ struct mutex write_mutex;
+};
+
+
+/* Static info about the MTD, used in cleanup_module */
+static LIST_HEAD(blkmtd_device_list);
+
+
+static struct page *page_read(struct address_space *mapping, pgoff_t index)
+{
+ return read_mapping_page(mapping, index, NULL);
+}
+
+/* erase a specified part of the device */
+static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
+{
+ struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
+ struct page *page;
+ pgoff_t index = to >> PAGE_SHIFT; // page index
+ int pages = len >> PAGE_SHIFT;
+ u_long *p;
+ u_long *max;
+
+ while (pages) {
+ page = page_read(mapping, index);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ max = page_address(page) + PAGE_SIZE;
+ for (p=page_address(page); p<max; p++)
+ if (*p != -1UL) {
+ lock_page(page);
+ memset(page_address(page), 0xff, PAGE_SIZE);
+ set_page_dirty(page);
+ unlock_page(page);
+ balance_dirty_pages_ratelimited(mapping);
+ break;
+ }
+
+ put_page(page);
+ pages--;
+ index++;
+ }
+ return 0;
+}
+static int block2mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct block2mtd_dev *dev = mtd->priv;
+ size_t from = instr->addr;
+ size_t len = instr->len;
+ int err;
+
+ mutex_lock(&dev->write_mutex);
+ err = _block2mtd_erase(dev, from, len);
+ mutex_unlock(&dev->write_mutex);
+ if (err)
+ pr_err("erase failed err = %d\n", err);
+
+ return err;
+}
+
+
+static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct block2mtd_dev *dev = mtd->priv;
+ struct page *page;
+ pgoff_t index = from >> PAGE_SHIFT;
+ int offset = from & (PAGE_SIZE-1);
+ int cpylen;
+
+ while (len) {
+ if ((offset + len) > PAGE_SIZE)
+ cpylen = PAGE_SIZE - offset; // multiple pages
+ else
+ cpylen = len; // this page
+ len = len - cpylen;
+
+ page = page_read(dev->blkdev->bd_inode->i_mapping, index);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ memcpy(buf, page_address(page) + offset, cpylen);
+ put_page(page);
+
+ if (retlen)
+ *retlen += cpylen;
+ buf += cpylen;
+ offset = 0;
+ index++;
+ }
+ return 0;
+}
+
+
+/* write data to the underlying device */
+static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
+ loff_t to, size_t len, size_t *retlen)
+{
+ struct page *page;
+ struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
+ pgoff_t index = to >> PAGE_SHIFT; // page index
+ int offset = to & ~PAGE_MASK; // page offset
+ int cpylen;
+
+ while (len) {
+ if ((offset+len) > PAGE_SIZE)
+ cpylen = PAGE_SIZE - offset; // multiple pages
+ else
+ cpylen = len; // this page
+ len = len - cpylen;
+
+ page = page_read(mapping, index);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ if (memcmp(page_address(page)+offset, buf, cpylen)) {
+ lock_page(page);
+ memcpy(page_address(page) + offset, buf, cpylen);
+ set_page_dirty(page);
+ unlock_page(page);
+ balance_dirty_pages_ratelimited(mapping);
+ }
+ put_page(page);
+
+ if (retlen)
+ *retlen += cpylen;
+
+ buf += cpylen;
+ offset = 0;
+ index++;
+ }
+ return 0;
+}
+
+
+static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct block2mtd_dev *dev = mtd->priv;
+ int err;
+
+ mutex_lock(&dev->write_mutex);
+ err = _block2mtd_write(dev, buf, to, len, retlen);
+ mutex_unlock(&dev->write_mutex);
+ if (err > 0)
+ err = 0;
+ return err;
+}
+
+
+/* sync the device - wait until the write queue is empty */
+static void block2mtd_sync(struct mtd_info *mtd)
+{
+ struct block2mtd_dev *dev = mtd->priv;
+ sync_blockdev(dev->blkdev);
+ return;
+}
+
+
+static void block2mtd_free_device(struct block2mtd_dev *dev)
+{
+ if (!dev)
+ return;
+
+ kfree(dev->mtd.name);
+
+ if (dev->blkdev) {
+ invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
+ 0, -1);
+ blkdev_put(dev->blkdev, NULL);
+ }
+
+ kfree(dev);
+}
+
+/*
+ * This function is marked __ref because it calls the __init marked
+ * early_lookup_bdev when called from the early boot code.
+ */
+static struct block_device __ref *mdtblock_early_get_bdev(const char *devname,
+ blk_mode_t mode, int timeout, struct block2mtd_dev *dev)
+{
+ struct block_device *bdev = ERR_PTR(-ENODEV);
+#ifndef MODULE
+ int i;
+
+ /*
+ * We can't use early_lookup_bdev from a running system.
+ */
+ if (system_state >= SYSTEM_RUNNING)
+ return bdev;
+
+ /*
+ * We might not have the root device mounted at this point.
+ * Try to resolve the device name by other means.
+ */
+ for (i = 0; i <= timeout; i++) {
+ dev_t devt;
+
+ if (i)
+ /*
+ * Calling wait_for_device_probe in the first loop
+ * was not enough, sleep for a bit in subsequent
+ * go-arounds.
+ */
+ msleep(1000);
+ wait_for_device_probe();
+
+ if (!early_lookup_bdev(devname, &devt)) {
+ bdev = blkdev_get_by_dev(devt, mode, dev, NULL);
+ if (!IS_ERR(bdev))
+ break;
+ }
+ }
+#endif
+ return bdev;
+}
+
+static struct block2mtd_dev *add_device(char *devname, int erase_size,
+ char *label, int timeout)
+{
+ const blk_mode_t mode = BLK_OPEN_READ | BLK_OPEN_WRITE;
+ struct block_device *bdev;
+ struct block2mtd_dev *dev;
+ char *name;
+
+ if (!devname)
+ return NULL;
+
+ dev = kzalloc(sizeof(struct block2mtd_dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ /* Get a handle on the device */
+ bdev = blkdev_get_by_path(devname, mode, dev, NULL);
+ if (IS_ERR(bdev))
+ bdev = mdtblock_early_get_bdev(devname, mode, timeout, dev);
+ if (IS_ERR(bdev)) {
+ pr_err("error: cannot open device %s\n", devname);
+ goto err_free_block2mtd;
+ }
+ dev->blkdev = bdev;
+
+ if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
+ pr_err("attempting to use an MTD device as a block device\n");
+ goto err_free_block2mtd;
+ }
+
+ if ((long)dev->blkdev->bd_inode->i_size % erase_size) {
+ pr_err("erasesize must be a divisor of device size\n");
+ goto err_free_block2mtd;
+ }
+
+ mutex_init(&dev->write_mutex);
+
+ /* Setup the MTD structure */
+ /* make the name contain the block device in */
+ if (!label)
+ name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname);
+ else
+ name = kstrdup(label, GFP_KERNEL);
+ if (!name)
+ goto err_destroy_mutex;
+
+ dev->mtd.name = name;
+
+ dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
+ dev->mtd.erasesize = erase_size;
+ dev->mtd.writesize = 1;
+ dev->mtd.writebufsize = PAGE_SIZE;
+ dev->mtd.type = MTD_RAM;
+ dev->mtd.flags = MTD_CAP_RAM;
+ dev->mtd._erase = block2mtd_erase;
+ dev->mtd._write = block2mtd_write;
+ dev->mtd._sync = block2mtd_sync;
+ dev->mtd._read = block2mtd_read;
+ dev->mtd.priv = dev;
+ dev->mtd.owner = THIS_MODULE;
+
+ if (mtd_device_register(&dev->mtd, NULL, 0)) {
+ /* Device didn't get added, so free the entry */
+ goto err_destroy_mutex;
+ }
+
+ list_add(&dev->list, &blkmtd_device_list);
+ pr_info("mtd%d: [%s] erase_size = %dKiB [%d]\n",
+ dev->mtd.index,
+ label ? label : dev->mtd.name + strlen("block2mtd: "),
+ dev->mtd.erasesize >> 10, dev->mtd.erasesize);
+ return dev;
+
+err_destroy_mutex:
+ mutex_destroy(&dev->write_mutex);
+err_free_block2mtd:
+ block2mtd_free_device(dev);
+ return NULL;
+}
+
+
+/* This function works similar to reguler strtoul. In addition, it
+ * allows some suffixes for a more human-readable number format:
+ * ki, Ki, kiB, KiB - multiply result with 1024
+ * Mi, MiB - multiply result with 1024^2
+ * Gi, GiB - multiply result with 1024^3
+ */
+static int ustrtoul(const char *cp, char **endp, unsigned int base)
+{
+ unsigned long result = simple_strtoul(cp, endp, base);
+ switch (**endp) {
+ case 'G' :
+ result *= 1024;
+ fallthrough;
+ case 'M':
+ result *= 1024;
+ fallthrough;
+ case 'K':
+ case 'k':
+ result *= 1024;
+ /* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */
+ if ((*endp)[1] == 'i') {
+ if ((*endp)[2] == 'B')
+ (*endp) += 3;
+ else
+ (*endp) += 2;
+ }
+ }
+ return result;
+}
+
+
+static int parse_num(size_t *num, const char *token)
+{
+ char *endp;
+ size_t n;
+
+ n = (size_t) ustrtoul(token, &endp, 0);
+ if (*endp)
+ return -EINVAL;
+
+ *num = n;
+ return 0;
+}
+
+
+static inline void kill_final_newline(char *str)
+{
+ char *newline = strrchr(str, '\n');
+ if (newline && !newline[1])
+ *newline = 0;
+}
+
+
+#ifndef MODULE
+static int block2mtd_init_called = 0;
+/* 80 for device, 12 for erase size */
+static char block2mtd_paramline[80 + 12];
+#endif
+
+static int block2mtd_setup2(const char *val)
+{
+ /* 80 for device, 12 for erase size, 80 for name, 8 for timeout */
+ char buf[80 + 12 + 80 + 8];
+ char *str = buf;
+ char *token[BLOCK2MTD_PARAM_MAX_COUNT];
+ char *name;
+ char *label = NULL;
+ size_t erase_size = PAGE_SIZE;
+ unsigned long timeout = MTD_DEFAULT_TIMEOUT;
+ int i, ret;
+
+ if (strnlen(val, sizeof(buf)) >= sizeof(buf)) {
+ pr_err("parameter too long\n");
+ return 0;
+ }
+
+ strcpy(str, val);
+ kill_final_newline(str);
+
+ for (i = 0; i < BLOCK2MTD_PARAM_MAX_COUNT; i++)
+ token[i] = strsep(&str, ",");
+
+ if (str) {
+ pr_err("too many arguments\n");
+ return 0;
+ }
+
+ if (!token[0]) {
+ pr_err("no argument\n");
+ return 0;
+ }
+
+ name = token[0];
+ if (strlen(name) + 1 > 80) {
+ pr_err("device name too long\n");
+ return 0;
+ }
+
+ /* Optional argument when custom label is used */
+ if (token[1] && strlen(token[1])) {
+ ret = parse_num(&erase_size, token[1]);
+ if (ret) {
+ pr_err("illegal erase size\n");
+ return 0;
+ }
+ }
+
+ if (token[2]) {
+ label = token[2];
+ pr_info("Using custom MTD label '%s' for dev %s\n", label, name);
+ }
+
+ add_device(name, erase_size, label, timeout);
+
+ return 0;
+}
+
+
+static int block2mtd_setup(const char *val, const struct kernel_param *kp)
+{
+#ifdef MODULE
+ return block2mtd_setup2(val);
+#else
+ /* If more parameters are later passed in via
+ /sys/module/block2mtd/parameters/block2mtd
+ and block2mtd_init() has already been called,
+ we can parse the argument now. */
+
+ if (block2mtd_init_called)
+ return block2mtd_setup2(val);
+
+ /* During early boot stage, we only save the parameters
+ here. We must parse them later: if the param passed
+ from kernel boot command line, block2mtd_setup() is
+ called so early that it is not possible to resolve
+ the device (even kmalloc() fails). Deter that work to
+ block2mtd_setup2(). */
+
+ strscpy(block2mtd_paramline, val, sizeof(block2mtd_paramline));
+
+ return 0;
+#endif
+}
+
+
+module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
+MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,[<erasesize>][,<label>]]\"");
+
+static int __init block2mtd_init(void)
+{
+ int ret = 0;
+
+#ifndef MODULE
+ if (strlen(block2mtd_paramline))
+ ret = block2mtd_setup2(block2mtd_paramline);
+ block2mtd_init_called = 1;
+#endif
+
+ return ret;
+}
+
+
+static void block2mtd_exit(void)
+{
+ struct list_head *pos, *next;
+
+ /* Remove the MTD devices */
+ list_for_each_safe(pos, next, &blkmtd_device_list) {
+ struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list);
+ block2mtd_sync(&dev->mtd);
+ mtd_device_unregister(&dev->mtd);
+ mutex_destroy(&dev->write_mutex);
+ pr_info("mtd%d: [%s] removed\n",
+ dev->mtd.index,
+ dev->mtd.name + strlen("block2mtd: "));
+ list_del(&dev->list);
+ block2mtd_free_device(dev);
+ }
+}
+
+late_initcall(block2mtd_init);
+module_exit(block2mtd_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Joern Engel <joern@lazybastard.org>");
+MODULE_DESCRIPTION("Emulate an MTD using a block device");
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
new file mode 100644
index 0000000000..22e73dd611
--- /dev/null
+++ b/drivers/mtd/devices/docg3.c
@@ -0,0 +1,2086 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Handles the M-Systems DiskOnChip G3 chip
+ *
+ * Copyright (C) 2011 Robert Jarzmik
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/bitmap.h>
+#include <linux/bitrev.h>
+#include <linux/bch.h>
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#define CREATE_TRACE_POINTS
+#include "docg3.h"
+
+/*
+ * This driver handles the DiskOnChip G3 flash memory.
+ *
+ * As no specification is available from M-Systems/Sandisk, this drivers lacks
+ * several functions available on the chip, as :
+ * - IPL write
+ *
+ * The bus data width (8bits versus 16bits) is not handled (if_cfg flag), and
+ * the driver assumes a 16bits data bus.
+ *
+ * DocG3 relies on 2 ECC algorithms, which are handled in hardware :
+ * - a 1 byte Hamming code stored in the OOB for each page
+ * - a 7 bytes BCH code stored in the OOB for each page
+ * The BCH ECC is :
+ * - BCH is in GF(2^14)
+ * - BCH is over data of 520 bytes (512 page + 7 page_info bytes
+ * + 1 hamming byte)
+ * - BCH can correct up to 4 bits (t = 4)
+ * - BCH syndroms are calculated in hardware, and checked in hardware as well
+ *
+ */
+
+static unsigned int reliable_mode;
+module_param(reliable_mode, uint, 0);
+MODULE_PARM_DESC(reliable_mode, "Set the docg3 mode (0=normal MLC, 1=fast, "
+ "2=reliable) : MLC normal operations are in normal mode");
+
+static int docg3_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ /* byte 7 is Hamming ECC, byte 8-14 are BCH ECC */
+ oobregion->offset = 7;
+ oobregion->length = 8;
+
+ return 0;
+}
+
+static int docg3_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 1)
+ return -ERANGE;
+
+ /* free bytes: byte 0 until byte 6, byte 15 */
+ if (!section) {
+ oobregion->offset = 0;
+ oobregion->length = 7;
+ } else {
+ oobregion->offset = 15;
+ oobregion->length = 1;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops nand_ooblayout_docg3_ops = {
+ .ecc = docg3_ooblayout_ecc,
+ .free = docg3_ooblayout_free,
+};
+
+static inline u8 doc_readb(struct docg3 *docg3, u16 reg)
+{
+ u8 val = readb(docg3->cascade->base + reg);
+
+ trace_docg3_io(0, 8, reg, (int)val);
+ return val;
+}
+
+static inline u16 doc_readw(struct docg3 *docg3, u16 reg)
+{
+ u16 val = readw(docg3->cascade->base + reg);
+
+ trace_docg3_io(0, 16, reg, (int)val);
+ return val;
+}
+
+static inline void doc_writeb(struct docg3 *docg3, u8 val, u16 reg)
+{
+ writeb(val, docg3->cascade->base + reg);
+ trace_docg3_io(1, 8, reg, val);
+}
+
+static inline void doc_writew(struct docg3 *docg3, u16 val, u16 reg)
+{
+ writew(val, docg3->cascade->base + reg);
+ trace_docg3_io(1, 16, reg, val);
+}
+
+static inline void doc_flash_command(struct docg3 *docg3, u8 cmd)
+{
+ doc_writeb(docg3, cmd, DOC_FLASHCOMMAND);
+}
+
+static inline void doc_flash_sequence(struct docg3 *docg3, u8 seq)
+{
+ doc_writeb(docg3, seq, DOC_FLASHSEQUENCE);
+}
+
+static inline void doc_flash_address(struct docg3 *docg3, u8 addr)
+{
+ doc_writeb(docg3, addr, DOC_FLASHADDRESS);
+}
+
+static char const * const part_probes[] = { "cmdlinepart", "saftlpart", NULL };
+
+static int doc_register_readb(struct docg3 *docg3, int reg)
+{
+ u8 val;
+
+ doc_writew(docg3, reg, DOC_READADDRESS);
+ val = doc_readb(docg3, reg);
+ doc_vdbg("Read register %04x : %02x\n", reg, val);
+ return val;
+}
+
+static int doc_register_readw(struct docg3 *docg3, int reg)
+{
+ u16 val;
+
+ doc_writew(docg3, reg, DOC_READADDRESS);
+ val = doc_readw(docg3, reg);
+ doc_vdbg("Read register %04x : %04x\n", reg, val);
+ return val;
+}
+
+/**
+ * doc_delay - delay docg3 operations
+ * @docg3: the device
+ * @nbNOPs: the number of NOPs to issue
+ *
+ * As no specification is available, the right timings between chip commands are
+ * unknown. The only available piece of information are the observed nops on a
+ * working docg3 chip.
+ * Therefore, doc_delay relies on a busy loop of NOPs, instead of scheduler
+ * friendlier msleep() functions or blocking mdelay().
+ */
+static void doc_delay(struct docg3 *docg3, int nbNOPs)
+{
+ int i;
+
+ doc_vdbg("NOP x %d\n", nbNOPs);
+ for (i = 0; i < nbNOPs; i++)
+ doc_writeb(docg3, 0, DOC_NOP);
+}
+
+static int is_prot_seq_error(struct docg3 *docg3)
+{
+ int ctrl;
+
+ ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+ return ctrl & (DOC_CTRL_PROTECTION_ERROR | DOC_CTRL_SEQUENCE_ERROR);
+}
+
+static int doc_is_ready(struct docg3 *docg3)
+{
+ int ctrl;
+
+ ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+ return ctrl & DOC_CTRL_FLASHREADY;
+}
+
+static int doc_wait_ready(struct docg3 *docg3)
+{
+ int maxWaitCycles = 100;
+
+ do {
+ doc_delay(docg3, 4);
+ cpu_relax();
+ } while (!doc_is_ready(docg3) && maxWaitCycles--);
+ doc_delay(docg3, 2);
+ if (maxWaitCycles > 0)
+ return 0;
+ else
+ return -EIO;
+}
+
+static int doc_reset_seq(struct docg3 *docg3)
+{
+ int ret;
+
+ doc_writeb(docg3, 0x10, DOC_FLASHCONTROL);
+ doc_flash_sequence(docg3, DOC_SEQ_RESET);
+ doc_flash_command(docg3, DOC_CMD_RESET);
+ doc_delay(docg3, 2);
+ ret = doc_wait_ready(docg3);
+
+ doc_dbg("doc_reset_seq() -> isReady=%s\n", ret ? "false" : "true");
+ return ret;
+}
+
+/**
+ * doc_read_data_area - Read data from data area
+ * @docg3: the device
+ * @buf: the buffer to fill in (might be NULL is dummy reads)
+ * @len: the length to read
+ * @first: first time read, DOC_READADDRESS should be set
+ *
+ * Reads bytes from flash data. Handles the single byte / even bytes reads.
+ */
+static void doc_read_data_area(struct docg3 *docg3, void *buf, int len,
+ int first)
+{
+ int i, cdr, len4;
+ u16 data16, *dst16;
+ u8 data8, *dst8;
+
+ doc_dbg("doc_read_data_area(buf=%p, len=%d)\n", buf, len);
+ cdr = len & 0x1;
+ len4 = len - cdr;
+
+ if (first)
+ doc_writew(docg3, DOC_IOSPACE_DATA, DOC_READADDRESS);
+ dst16 = buf;
+ for (i = 0; i < len4; i += 2) {
+ data16 = doc_readw(docg3, DOC_IOSPACE_DATA);
+ if (dst16) {
+ *dst16 = data16;
+ dst16++;
+ }
+ }
+
+ if (cdr) {
+ doc_writew(docg3, DOC_IOSPACE_DATA | DOC_READADDR_ONE_BYTE,
+ DOC_READADDRESS);
+ doc_delay(docg3, 1);
+ dst8 = (u8 *)dst16;
+ for (i = 0; i < cdr; i++) {
+ data8 = doc_readb(docg3, DOC_IOSPACE_DATA);
+ if (dst8) {
+ *dst8 = data8;
+ dst8++;
+ }
+ }
+ }
+}
+
+/**
+ * doc_write_data_area - Write data into data area
+ * @docg3: the device
+ * @buf: the buffer to get input bytes from
+ * @len: the length to write
+ *
+ * Writes bytes into flash data. Handles the single byte / even bytes writes.
+ */
+static void doc_write_data_area(struct docg3 *docg3, const void *buf, int len)
+{
+ int i, cdr, len4;
+ u16 *src16;
+ u8 *src8;
+
+ doc_dbg("doc_write_data_area(buf=%p, len=%d)\n", buf, len);
+ cdr = len & 0x3;
+ len4 = len - cdr;
+
+ doc_writew(docg3, DOC_IOSPACE_DATA, DOC_READADDRESS);
+ src16 = (u16 *)buf;
+ for (i = 0; i < len4; i += 2) {
+ doc_writew(docg3, *src16, DOC_IOSPACE_DATA);
+ src16++;
+ }
+
+ src8 = (u8 *)src16;
+ for (i = 0; i < cdr; i++) {
+ doc_writew(docg3, DOC_IOSPACE_DATA | DOC_READADDR_ONE_BYTE,
+ DOC_READADDRESS);
+ doc_writeb(docg3, *src8, DOC_IOSPACE_DATA);
+ src8++;
+ }
+}
+
+/**
+ * doc_set_reliable_mode - Sets the flash to normal or reliable data mode
+ * @docg3: the device
+ *
+ * The reliable data mode is a bit slower than the fast mode, but less errors
+ * occur. Entering the reliable mode cannot be done without entering the fast
+ * mode first.
+ *
+ * In reliable mode, pages 2*n and 2*n+1 are clones. Writing to page 0 of blocks
+ * (4,5) make the hardware write also to page 1 of blocks blocks(4,5). Reading
+ * from page 0 of blocks (4,5) or from page 1 of blocks (4,5) gives the same
+ * result, which is a logical and between bytes from page 0 and page 1 (which is
+ * consistent with the fact that writing to a page is _clearing_ bits of that
+ * page).
+ */
+static void doc_set_reliable_mode(struct docg3 *docg3)
+{
+ static char *strmode[] = { "normal", "fast", "reliable", "invalid" };
+
+ doc_dbg("doc_set_reliable_mode(%s)\n", strmode[docg3->reliable]);
+ switch (docg3->reliable) {
+ case 0:
+ break;
+ case 1:
+ doc_flash_sequence(docg3, DOC_SEQ_SET_FASTMODE);
+ doc_flash_command(docg3, DOC_CMD_FAST_MODE);
+ break;
+ case 2:
+ doc_flash_sequence(docg3, DOC_SEQ_SET_RELIABLEMODE);
+ doc_flash_command(docg3, DOC_CMD_FAST_MODE);
+ doc_flash_command(docg3, DOC_CMD_RELIABLE_MODE);
+ break;
+ default:
+ doc_err("doc_set_reliable_mode(): invalid mode\n");
+ break;
+ }
+ doc_delay(docg3, 2);
+}
+
+/**
+ * doc_set_asic_mode - Set the ASIC mode
+ * @docg3: the device
+ * @mode: the mode
+ *
+ * The ASIC can work in 3 modes :
+ * - RESET: all registers are zeroed
+ * - NORMAL: receives and handles commands
+ * - POWERDOWN: minimal poweruse, flash parts shut off
+ */
+static void doc_set_asic_mode(struct docg3 *docg3, u8 mode)
+{
+ int i;
+
+ for (i = 0; i < 12; i++)
+ doc_readb(docg3, DOC_IOSPACE_IPL);
+
+ mode |= DOC_ASICMODE_MDWREN;
+ doc_dbg("doc_set_asic_mode(%02x)\n", mode);
+ doc_writeb(docg3, mode, DOC_ASICMODE);
+ doc_writeb(docg3, ~mode, DOC_ASICMODECONFIRM);
+ doc_delay(docg3, 1);
+}
+
+/**
+ * doc_set_device_id - Sets the devices id for cascaded G3 chips
+ * @docg3: the device
+ * @id: the chip to select (amongst 0, 1, 2, 3)
+ *
+ * There can be 4 cascaded G3 chips. This function selects the one which will
+ * should be the active one.
+ */
+static void doc_set_device_id(struct docg3 *docg3, int id)
+{
+ u8 ctrl;
+
+ doc_dbg("doc_set_device_id(%d)\n", id);
+ doc_writeb(docg3, id, DOC_DEVICESELECT);
+ ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+
+ ctrl &= ~DOC_CTRL_VIOLATION;
+ ctrl |= DOC_CTRL_CE;
+ doc_writeb(docg3, ctrl, DOC_FLASHCONTROL);
+}
+
+/**
+ * doc_set_extra_page_mode - Change flash page layout
+ * @docg3: the device
+ *
+ * Normally, the flash page is split into the data (512 bytes) and the out of
+ * band data (16 bytes). For each, 4 more bytes can be accessed, where the wear
+ * leveling counters are stored. To access this last area of 4 bytes, a special
+ * mode must be input to the flash ASIC.
+ *
+ * Returns 0 if no error occurred, -EIO else.
+ */
+static int doc_set_extra_page_mode(struct docg3 *docg3)
+{
+ int fctrl;
+
+ doc_dbg("doc_set_extra_page_mode()\n");
+ doc_flash_sequence(docg3, DOC_SEQ_PAGE_SIZE_532);
+ doc_flash_command(docg3, DOC_CMD_PAGE_SIZE_532);
+ doc_delay(docg3, 2);
+
+ fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+ if (fctrl & (DOC_CTRL_PROTECTION_ERROR | DOC_CTRL_SEQUENCE_ERROR))
+ return -EIO;
+ else
+ return 0;
+}
+
+/**
+ * doc_setup_addr_sector - Setup blocks/page/ofs address for one plane
+ * @docg3: the device
+ * @sector: the sector
+ */
+static void doc_setup_addr_sector(struct docg3 *docg3, int sector)
+{
+ doc_delay(docg3, 1);
+ doc_flash_address(docg3, sector & 0xff);
+ doc_flash_address(docg3, (sector >> 8) & 0xff);
+ doc_flash_address(docg3, (sector >> 16) & 0xff);
+ doc_delay(docg3, 1);
+}
+
+/**
+ * doc_setup_writeaddr_sector - Setup blocks/page/ofs address for one plane
+ * @docg3: the device
+ * @sector: the sector
+ * @ofs: the offset in the page, between 0 and (512 + 16 + 512)
+ */
+static void doc_setup_writeaddr_sector(struct docg3 *docg3, int sector, int ofs)
+{
+ ofs = ofs >> 2;
+ doc_delay(docg3, 1);
+ doc_flash_address(docg3, ofs & 0xff);
+ doc_flash_address(docg3, sector & 0xff);
+ doc_flash_address(docg3, (sector >> 8) & 0xff);
+ doc_flash_address(docg3, (sector >> 16) & 0xff);
+ doc_delay(docg3, 1);
+}
+
+/**
+ * doc_read_seek - Set both flash planes to the specified block, page for reading
+ * @docg3: the device
+ * @block0: the first plane block index
+ * @block1: the second plane block index
+ * @page: the page index within the block
+ * @wear: if true, read will occur on the 4 extra bytes of the wear area
+ * @ofs: offset in page to read
+ *
+ * Programs the flash even and odd planes to the specific block and page.
+ * Alternatively, programs the flash to the wear area of the specified page.
+ */
+static int doc_read_seek(struct docg3 *docg3, int block0, int block1, int page,
+ int wear, int ofs)
+{
+ int sector, ret = 0;
+
+ doc_dbg("doc_seek(blocks=(%d,%d), page=%d, ofs=%d, wear=%d)\n",
+ block0, block1, page, ofs, wear);
+
+ if (!wear && (ofs < 2 * DOC_LAYOUT_PAGE_SIZE)) {
+ doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE1);
+ doc_flash_command(docg3, DOC_CMD_READ_PLANE1);
+ doc_delay(docg3, 2);
+ } else {
+ doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE2);
+ doc_flash_command(docg3, DOC_CMD_READ_PLANE2);
+ doc_delay(docg3, 2);
+ }
+
+ doc_set_reliable_mode(docg3);
+ if (wear)
+ ret = doc_set_extra_page_mode(docg3);
+ if (ret)
+ goto out;
+
+ doc_flash_sequence(docg3, DOC_SEQ_READ);
+ sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
+ doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+ doc_setup_addr_sector(docg3, sector);
+
+ sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
+ doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+ doc_setup_addr_sector(docg3, sector);
+ doc_delay(docg3, 1);
+
+out:
+ return ret;
+}
+
+/**
+ * doc_write_seek - Set both flash planes to the specified block, page for writing
+ * @docg3: the device
+ * @block0: the first plane block index
+ * @block1: the second plane block index
+ * @page: the page index within the block
+ * @ofs: offset in page to write
+ *
+ * Programs the flash even and odd planes to the specific block and page.
+ * Alternatively, programs the flash to the wear area of the specified page.
+ */
+static int doc_write_seek(struct docg3 *docg3, int block0, int block1, int page,
+ int ofs)
+{
+ int ret = 0, sector;
+
+ doc_dbg("doc_write_seek(blocks=(%d,%d), page=%d, ofs=%d)\n",
+ block0, block1, page, ofs);
+
+ doc_set_reliable_mode(docg3);
+
+ if (ofs < 2 * DOC_LAYOUT_PAGE_SIZE) {
+ doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE1);
+ doc_flash_command(docg3, DOC_CMD_READ_PLANE1);
+ doc_delay(docg3, 2);
+ } else {
+ doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE2);
+ doc_flash_command(docg3, DOC_CMD_READ_PLANE2);
+ doc_delay(docg3, 2);
+ }
+
+ doc_flash_sequence(docg3, DOC_SEQ_PAGE_SETUP);
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE1);
+
+ sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
+ doc_setup_writeaddr_sector(docg3, sector, ofs);
+
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE3);
+ doc_delay(docg3, 2);
+ ret = doc_wait_ready(docg3);
+ if (ret)
+ goto out;
+
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE1);
+ sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
+ doc_setup_writeaddr_sector(docg3, sector, ofs);
+ doc_delay(docg3, 1);
+
+out:
+ return ret;
+}
+
+
+/**
+ * doc_read_page_ecc_init - Initialize hardware ECC engine
+ * @docg3: the device
+ * @len: the number of bytes covered by the ECC (BCH covered)
+ *
+ * The function does initialize the hardware ECC engine to compute the Hamming
+ * ECC (on 1 byte) and the BCH hardware ECC (on 7 bytes).
+ *
+ * Return 0 if succeeded, -EIO on error
+ */
+static int doc_read_page_ecc_init(struct docg3 *docg3, int len)
+{
+ doc_writew(docg3, DOC_ECCCONF0_READ_MODE
+ | DOC_ECCCONF0_BCH_ENABLE | DOC_ECCCONF0_HAMMING_ENABLE
+ | (len & DOC_ECCCONF0_DATA_BYTES_MASK),
+ DOC_ECCCONF0);
+ doc_delay(docg3, 4);
+ doc_register_readb(docg3, DOC_FLASHCONTROL);
+ return doc_wait_ready(docg3);
+}
+
+/**
+ * doc_write_page_ecc_init - Initialize hardware BCH ECC engine
+ * @docg3: the device
+ * @len: the number of bytes covered by the ECC (BCH covered)
+ *
+ * The function does initialize the hardware ECC engine to compute the Hamming
+ * ECC (on 1 byte) and the BCH hardware ECC (on 7 bytes).
+ *
+ * Return 0 if succeeded, -EIO on error
+ */
+static int doc_write_page_ecc_init(struct docg3 *docg3, int len)
+{
+ doc_writew(docg3, DOC_ECCCONF0_WRITE_MODE
+ | DOC_ECCCONF0_BCH_ENABLE | DOC_ECCCONF0_HAMMING_ENABLE
+ | (len & DOC_ECCCONF0_DATA_BYTES_MASK),
+ DOC_ECCCONF0);
+ doc_delay(docg3, 4);
+ doc_register_readb(docg3, DOC_FLASHCONTROL);
+ return doc_wait_ready(docg3);
+}
+
+/**
+ * doc_ecc_disable - Disable Hamming and BCH ECC hardware calculator
+ * @docg3: the device
+ *
+ * Disables the hardware ECC generator and checker, for unchecked reads (as when
+ * reading OOB only or write status byte).
+ */
+static void doc_ecc_disable(struct docg3 *docg3)
+{
+ doc_writew(docg3, DOC_ECCCONF0_READ_MODE, DOC_ECCCONF0);
+ doc_delay(docg3, 4);
+}
+
+/**
+ * doc_hamming_ecc_init - Initialize hardware Hamming ECC engine
+ * @docg3: the device
+ * @nb_bytes: the number of bytes covered by the ECC (Hamming covered)
+ *
+ * This function programs the ECC hardware to compute the hamming code on the
+ * last provided N bytes to the hardware generator.
+ */
+static void doc_hamming_ecc_init(struct docg3 *docg3, int nb_bytes)
+{
+ u8 ecc_conf1;
+
+ ecc_conf1 = doc_register_readb(docg3, DOC_ECCCONF1);
+ ecc_conf1 &= ~DOC_ECCCONF1_HAMMING_BITS_MASK;
+ ecc_conf1 |= (nb_bytes & DOC_ECCCONF1_HAMMING_BITS_MASK);
+ doc_writeb(docg3, ecc_conf1, DOC_ECCCONF1);
+}
+
+/**
+ * doc_ecc_bch_fix_data - Fix if need be read data from flash
+ * @docg3: the device
+ * @buf: the buffer of read data (512 + 7 + 1 bytes)
+ * @hwecc: the hardware calculated ECC.
+ * It's in fact recv_ecc ^ calc_ecc, where recv_ecc was read from OOB
+ * area data, and calc_ecc the ECC calculated by the hardware generator.
+ *
+ * Checks if the received data matches the ECC, and if an error is detected,
+ * tries to fix the bit flips (at most 4) in the buffer buf. As the docg3
+ * understands the (data, ecc, syndroms) in an inverted order in comparison to
+ * the BCH library, the function reverses the order of bits (ie. bit7 and bit0,
+ * bit6 and bit 1, ...) for all ECC data.
+ *
+ * The hardware ecc unit produces oob_ecc ^ calc_ecc. The kernel's bch
+ * algorithm is used to decode this. However the hw operates on page
+ * data in a bit order that is the reverse of that of the bch alg,
+ * requiring that the bits be reversed on the result. Thanks to Ivan
+ * Djelic for his analysis.
+ *
+ * Returns number of fixed bits (0, 1, 2, 3, 4) or -EBADMSG if too many bit
+ * errors were detected and cannot be fixed.
+ */
+static int doc_ecc_bch_fix_data(struct docg3 *docg3, void *buf, u8 *hwecc)
+{
+ u8 ecc[DOC_ECC_BCH_SIZE];
+ int errorpos[DOC_ECC_BCH_T], i, numerrs;
+
+ for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
+ ecc[i] = bitrev8(hwecc[i]);
+ numerrs = bch_decode(docg3->cascade->bch, NULL,
+ DOC_ECC_BCH_COVERED_BYTES,
+ NULL, ecc, NULL, errorpos);
+ BUG_ON(numerrs == -EINVAL);
+ if (numerrs < 0)
+ goto out;
+
+ for (i = 0; i < numerrs; i++)
+ errorpos[i] = (errorpos[i] & ~7) | (7 - (errorpos[i] & 7));
+ for (i = 0; i < numerrs; i++)
+ if (errorpos[i] < DOC_ECC_BCH_COVERED_BYTES*8)
+ /* error is located in data, correct it */
+ change_bit(errorpos[i], buf);
+out:
+ doc_dbg("doc_ecc_bch_fix_data: flipped %d bits\n", numerrs);
+ return numerrs;
+}
+
+
+/**
+ * doc_read_page_prepare - Prepares reading data from a flash page
+ * @docg3: the device
+ * @block0: the first plane block index on flash memory
+ * @block1: the second plane block index on flash memory
+ * @page: the page index in the block
+ * @offset: the offset in the page (must be a multiple of 4)
+ *
+ * Prepares the page to be read in the flash memory :
+ * - tell ASIC to map the flash pages
+ * - tell ASIC to be in read mode
+ *
+ * After a call to this method, a call to doc_read_page_finish is mandatory,
+ * to end the read cycle of the flash.
+ *
+ * Read data from a flash page. The length to be read must be between 0 and
+ * (page_size + oob_size + wear_size), ie. 532, and a multiple of 4 (because
+ * the extra bytes reading is not implemented).
+ *
+ * As pages are grouped by 2 (in 2 planes), reading from a page must be done
+ * in two steps:
+ * - one read of 512 bytes at offset 0
+ * - one read of 512 bytes at offset 512 + 16
+ *
+ * Returns 0 if successful, -EIO if a read error occurred.
+ */
+static int doc_read_page_prepare(struct docg3 *docg3, int block0, int block1,
+ int page, int offset)
+{
+ int wear_area = 0, ret = 0;
+
+ doc_dbg("doc_read_page_prepare(blocks=(%d,%d), page=%d, ofsInPage=%d)\n",
+ block0, block1, page, offset);
+ if (offset >= DOC_LAYOUT_WEAR_OFFSET)
+ wear_area = 1;
+ if (!wear_area && offset > (DOC_LAYOUT_PAGE_OOB_SIZE * 2))
+ return -EINVAL;
+
+ doc_set_device_id(docg3, docg3->device_id);
+ ret = doc_reset_seq(docg3);
+ if (ret)
+ goto err;
+
+ /* Program the flash address block and page */
+ ret = doc_read_seek(docg3, block0, block1, page, wear_area, offset);
+ if (ret)
+ goto err;
+
+ doc_flash_command(docg3, DOC_CMD_READ_ALL_PLANES);
+ doc_delay(docg3, 2);
+ doc_wait_ready(docg3);
+
+ doc_flash_command(docg3, DOC_CMD_SET_ADDR_READ);
+ doc_delay(docg3, 1);
+ if (offset >= DOC_LAYOUT_PAGE_SIZE * 2)
+ offset -= 2 * DOC_LAYOUT_PAGE_SIZE;
+ doc_flash_address(docg3, offset >> 2);
+ doc_delay(docg3, 1);
+ doc_wait_ready(docg3);
+
+ doc_flash_command(docg3, DOC_CMD_READ_FLASH);
+
+ return 0;
+err:
+ doc_writeb(docg3, 0, DOC_DATAEND);
+ doc_delay(docg3, 2);
+ return -EIO;
+}
+
+/**
+ * doc_read_page_getbytes - Reads bytes from a prepared page
+ * @docg3: the device
+ * @len: the number of bytes to be read (must be a multiple of 4)
+ * @buf: the buffer to be filled in (or NULL is forget bytes)
+ * @first: 1 if first time read, DOC_READADDRESS should be set
+ * @last_odd: 1 if last read ended up on an odd byte
+ *
+ * Reads bytes from a prepared page. There is a trickery here : if the last read
+ * ended up on an odd offset in the 1024 bytes double page, ie. between the 2
+ * planes, the first byte must be read apart. If a word (16bit) read was used,
+ * the read would return the byte of plane 2 as low *and* high endian, which
+ * will mess the read.
+ *
+ */
+static int doc_read_page_getbytes(struct docg3 *docg3, int len, u_char *buf,
+ int first, int last_odd)
+{
+ if (last_odd && len > 0) {
+ doc_read_data_area(docg3, buf, 1, first);
+ doc_read_data_area(docg3, buf ? buf + 1 : buf, len - 1, 0);
+ } else {
+ doc_read_data_area(docg3, buf, len, first);
+ }
+ doc_delay(docg3, 2);
+ return len;
+}
+
+/**
+ * doc_write_page_putbytes - Writes bytes into a prepared page
+ * @docg3: the device
+ * @len: the number of bytes to be written
+ * @buf: the buffer of input bytes
+ *
+ */
+static void doc_write_page_putbytes(struct docg3 *docg3, int len,
+ const u_char *buf)
+{
+ doc_write_data_area(docg3, buf, len);
+ doc_delay(docg3, 2);
+}
+
+/**
+ * doc_get_bch_hw_ecc - Get hardware calculated BCH ECC
+ * @docg3: the device
+ * @hwecc: the array of 7 integers where the hardware ecc will be stored
+ */
+static void doc_get_bch_hw_ecc(struct docg3 *docg3, u8 *hwecc)
+{
+ int i;
+
+ for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
+ hwecc[i] = doc_register_readb(docg3, DOC_BCH_HW_ECC(i));
+}
+
+/**
+ * doc_page_finish - Ends reading/writing of a flash page
+ * @docg3: the device
+ */
+static void doc_page_finish(struct docg3 *docg3)
+{
+ doc_writeb(docg3, 0, DOC_DATAEND);
+ doc_delay(docg3, 2);
+}
+
+/**
+ * doc_read_page_finish - Ends reading of a flash page
+ * @docg3: the device
+ *
+ * As a side effect, resets the chip selector to 0. This ensures that after each
+ * read operation, the floor 0 is selected. Therefore, if the systems halts, the
+ * reboot will boot on floor 0, where the IPL is.
+ */
+static void doc_read_page_finish(struct docg3 *docg3)
+{
+ doc_page_finish(docg3);
+ doc_set_device_id(docg3, 0);
+}
+
+/**
+ * calc_block_sector - Calculate blocks, pages and ofs.
+ *
+ * @from: offset in flash
+ * @block0: first plane block index calculated
+ * @block1: second plane block index calculated
+ * @page: page calculated
+ * @ofs: offset in page
+ * @reliable: 0 if docg3 in normal mode, 1 if docg3 in fast mode, 2 if docg3 in
+ * reliable mode.
+ *
+ * The calculation is based on the reliable/normal mode. In normal mode, the 64
+ * pages of a block are available. In reliable mode, as pages 2*n and 2*n+1 are
+ * clones, only 32 pages per block are available.
+ */
+static void calc_block_sector(loff_t from, int *block0, int *block1, int *page,
+ int *ofs, int reliable)
+{
+ uint sector, pages_biblock;
+
+ pages_biblock = DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES;
+ if (reliable == 1 || reliable == 2)
+ pages_biblock /= 2;
+
+ sector = from / DOC_LAYOUT_PAGE_SIZE;
+ *block0 = sector / pages_biblock * DOC_LAYOUT_NBPLANES;
+ *block1 = *block0 + 1;
+ *page = sector % pages_biblock;
+ *page /= DOC_LAYOUT_NBPLANES;
+ if (reliable == 1 || reliable == 2)
+ *page *= 2;
+ if (sector % 2)
+ *ofs = DOC_LAYOUT_PAGE_OOB_SIZE;
+ else
+ *ofs = 0;
+}
+
+/**
+ * doc_read_oob - Read out of band bytes from flash
+ * @mtd: the device
+ * @from: the offset from first block and first page, in bytes, aligned on page
+ * size
+ * @ops: the mtd oob structure
+ *
+ * Reads flash memory OOB area of pages.
+ *
+ * Returns 0 if read successful, of -EIO, -EINVAL if an error occurred
+ */
+static int doc_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct docg3 *docg3 = mtd->priv;
+ int block0, block1, page, ret, skip, ofs = 0;
+ u8 *oobbuf = ops->oobbuf;
+ u8 *buf = ops->datbuf;
+ size_t len, ooblen, nbdata, nboob;
+ u8 hwecc[DOC_ECC_BCH_SIZE], eccconf1;
+ struct mtd_ecc_stats old_stats;
+ int max_bitflips = 0;
+
+ if (buf)
+ len = ops->len;
+ else
+ len = 0;
+ if (oobbuf)
+ ooblen = ops->ooblen;
+ else
+ ooblen = 0;
+
+ if (oobbuf && ops->mode == MTD_OPS_PLACE_OOB)
+ oobbuf += ops->ooboffs;
+
+ doc_dbg("doc_read_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n",
+ from, ops->mode, buf, len, oobbuf, ooblen);
+ if (ooblen % DOC_LAYOUT_OOB_SIZE)
+ return -EINVAL;
+
+ ops->oobretlen = 0;
+ ops->retlen = 0;
+ ret = 0;
+ skip = from % DOC_LAYOUT_PAGE_SIZE;
+ mutex_lock(&docg3->cascade->lock);
+ old_stats = mtd->ecc_stats;
+ while (ret >= 0 && (len > 0 || ooblen > 0)) {
+ calc_block_sector(from - skip, &block0, &block1, &page, &ofs,
+ docg3->reliable);
+ nbdata = min_t(size_t, len, DOC_LAYOUT_PAGE_SIZE - skip);
+ nboob = min_t(size_t, ooblen, (size_t)DOC_LAYOUT_OOB_SIZE);
+ ret = doc_read_page_prepare(docg3, block0, block1, page, ofs);
+ if (ret < 0)
+ goto out;
+ ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
+ if (ret < 0)
+ goto err_in_read;
+ ret = doc_read_page_getbytes(docg3, skip, NULL, 1, 0);
+ if (ret < skip)
+ goto err_in_read;
+ ret = doc_read_page_getbytes(docg3, nbdata, buf, 0, skip % 2);
+ if (ret < nbdata)
+ goto err_in_read;
+ doc_read_page_getbytes(docg3,
+ DOC_LAYOUT_PAGE_SIZE - nbdata - skip,
+ NULL, 0, (skip + nbdata) % 2);
+ ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0, 0);
+ if (ret < nboob)
+ goto err_in_read;
+ doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE - nboob,
+ NULL, 0, nboob % 2);
+
+ doc_get_bch_hw_ecc(docg3, hwecc);
+ eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1);
+
+ if (nboob >= DOC_LAYOUT_OOB_SIZE) {
+ doc_dbg("OOB - INFO: %*phC\n", 7, oobbuf);
+ doc_dbg("OOB - HAMMING: %02x\n", oobbuf[7]);
+ doc_dbg("OOB - BCH_ECC: %*phC\n", 7, oobbuf + 8);
+ doc_dbg("OOB - UNUSED: %02x\n", oobbuf[15]);
+ }
+ doc_dbg("ECC checks: ECCConf1=%x\n", eccconf1);
+ doc_dbg("ECC HW_ECC: %*phC\n", 7, hwecc);
+
+ ret = -EIO;
+ if (is_prot_seq_error(docg3))
+ goto err_in_read;
+ ret = 0;
+ if ((block0 >= DOC_LAYOUT_BLOCK_FIRST_DATA) &&
+ (eccconf1 & DOC_ECCCONF1_BCH_SYNDROM_ERR) &&
+ (eccconf1 & DOC_ECCCONF1_PAGE_IS_WRITTEN) &&
+ (ops->mode != MTD_OPS_RAW) &&
+ (nbdata == DOC_LAYOUT_PAGE_SIZE)) {
+ ret = doc_ecc_bch_fix_data(docg3, buf, hwecc);
+ if (ret < 0) {
+ mtd->ecc_stats.failed++;
+ ret = -EBADMSG;
+ }
+ if (ret > 0) {
+ mtd->ecc_stats.corrected += ret;
+ max_bitflips = max(max_bitflips, ret);
+ ret = max_bitflips;
+ }
+ }
+
+ doc_read_page_finish(docg3);
+ ops->retlen += nbdata;
+ ops->oobretlen += nboob;
+ buf += nbdata;
+ oobbuf += nboob;
+ len -= nbdata;
+ ooblen -= nboob;
+ from += DOC_LAYOUT_PAGE_SIZE;
+ skip = 0;
+ }
+
+out:
+ if (ops->stats) {
+ ops->stats->uncorrectable_errors +=
+ mtd->ecc_stats.failed - old_stats.failed;
+ ops->stats->corrected_bitflips +=
+ mtd->ecc_stats.corrected - old_stats.corrected;
+ }
+ mutex_unlock(&docg3->cascade->lock);
+ return ret;
+err_in_read:
+ doc_read_page_finish(docg3);
+ goto out;
+}
+
+static int doc_reload_bbt(struct docg3 *docg3)
+{
+ int block = DOC_LAYOUT_BLOCK_BBT;
+ int ret = 0, nbpages, page;
+ u_char *buf = docg3->bbt;
+
+ nbpages = DIV_ROUND_UP(docg3->max_block + 1, 8 * DOC_LAYOUT_PAGE_SIZE);
+ for (page = 0; !ret && (page < nbpages); page++) {
+ ret = doc_read_page_prepare(docg3, block, block + 1,
+ page + DOC_LAYOUT_PAGE_BBT, 0);
+ if (!ret)
+ ret = doc_read_page_ecc_init(docg3,
+ DOC_LAYOUT_PAGE_SIZE);
+ if (!ret)
+ doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE,
+ buf, 1, 0);
+ buf += DOC_LAYOUT_PAGE_SIZE;
+ }
+ doc_read_page_finish(docg3);
+ return ret;
+}
+
+/**
+ * doc_block_isbad - Checks whether a block is good or not
+ * @mtd: the device
+ * @from: the offset to find the correct block
+ *
+ * Returns 1 if block is bad, 0 if block is good
+ */
+static int doc_block_isbad(struct mtd_info *mtd, loff_t from)
+{
+ struct docg3 *docg3 = mtd->priv;
+ int block0, block1, page, ofs, is_good;
+
+ calc_block_sector(from, &block0, &block1, &page, &ofs,
+ docg3->reliable);
+ doc_dbg("doc_block_isbad(from=%lld) => block=(%d,%d), page=%d, ofs=%d\n",
+ from, block0, block1, page, ofs);
+
+ if (block0 < DOC_LAYOUT_BLOCK_FIRST_DATA)
+ return 0;
+ if (block1 > docg3->max_block)
+ return -EINVAL;
+
+ is_good = docg3->bbt[block0 >> 3] & (1 << (block0 & 0x7));
+ return !is_good;
+}
+
+#if 0
+/**
+ * doc_get_erase_count - Get block erase count
+ * @docg3: the device
+ * @from: the offset in which the block is.
+ *
+ * Get the number of times a block was erased. The number is the maximum of
+ * erase times between first and second plane (which should be equal normally).
+ *
+ * Returns The number of erases, or -EINVAL or -EIO on error.
+ */
+static int doc_get_erase_count(struct docg3 *docg3, loff_t from)
+{
+ u8 buf[DOC_LAYOUT_WEAR_SIZE];
+ int ret, plane1_erase_count, plane2_erase_count;
+ int block0, block1, page, ofs;
+
+ doc_dbg("doc_get_erase_count(from=%lld, buf=%p)\n", from, buf);
+ if (from % DOC_LAYOUT_PAGE_SIZE)
+ return -EINVAL;
+ calc_block_sector(from, &block0, &block1, &page, &ofs, docg3->reliable);
+ if (block1 > docg3->max_block)
+ return -EINVAL;
+
+ ret = doc_reset_seq(docg3);
+ if (!ret)
+ ret = doc_read_page_prepare(docg3, block0, block1, page,
+ ofs + DOC_LAYOUT_WEAR_OFFSET, 0);
+ if (!ret)
+ ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_WEAR_SIZE,
+ buf, 1, 0);
+ doc_read_page_finish(docg3);
+
+ if (ret || (buf[0] != DOC_ERASE_MARK) || (buf[2] != DOC_ERASE_MARK))
+ return -EIO;
+ plane1_erase_count = (u8)(~buf[1]) | ((u8)(~buf[4]) << 8)
+ | ((u8)(~buf[5]) << 16);
+ plane2_erase_count = (u8)(~buf[3]) | ((u8)(~buf[6]) << 8)
+ | ((u8)(~buf[7]) << 16);
+
+ return max(plane1_erase_count, plane2_erase_count);
+}
+#endif
+
+/**
+ * doc_get_op_status - get erase/write operation status
+ * @docg3: the device
+ *
+ * Queries the status from the chip, and returns it
+ *
+ * Returns the status (bits DOC_PLANES_STATUS_*)
+ */
+static int doc_get_op_status(struct docg3 *docg3)
+{
+ u8 status;
+
+ doc_flash_sequence(docg3, DOC_SEQ_PLANES_STATUS);
+ doc_flash_command(docg3, DOC_CMD_PLANES_STATUS);
+ doc_delay(docg3, 5);
+
+ doc_ecc_disable(docg3);
+ doc_read_data_area(docg3, &status, 1, 1);
+ return status;
+}
+
+/**
+ * doc_write_erase_wait_status - wait for write or erase completion
+ * @docg3: the device
+ *
+ * Wait for the chip to be ready again after erase or write operation, and check
+ * erase/write status.
+ *
+ * Returns 0 if erase successful, -EIO if erase/write issue, -ETIMEOUT if
+ * timeout
+ */
+static int doc_write_erase_wait_status(struct docg3 *docg3)
+{
+ int i, status, ret = 0;
+
+ for (i = 0; !doc_is_ready(docg3) && i < 5; i++)
+ msleep(20);
+ if (!doc_is_ready(docg3)) {
+ doc_dbg("Timeout reached and the chip is still not ready\n");
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ status = doc_get_op_status(docg3);
+ if (status & DOC_PLANES_STATUS_FAIL) {
+ doc_dbg("Erase/Write failed on (a) plane(s), status = %x\n",
+ status);
+ ret = -EIO;
+ }
+
+out:
+ doc_page_finish(docg3);
+ return ret;
+}
+
+/**
+ * doc_erase_block - Erase a couple of blocks
+ * @docg3: the device
+ * @block0: the first block to erase (leftmost plane)
+ * @block1: the second block to erase (rightmost plane)
+ *
+ * Erase both blocks, and return operation status
+ *
+ * Returns 0 if erase successful, -EIO if erase issue, -ETIMEOUT if chip not
+ * ready for too long
+ */
+static int doc_erase_block(struct docg3 *docg3, int block0, int block1)
+{
+ int ret, sector;
+
+ doc_dbg("doc_erase_block(blocks=(%d,%d))\n", block0, block1);
+ ret = doc_reset_seq(docg3);
+ if (ret)
+ return -EIO;
+
+ doc_set_reliable_mode(docg3);
+ doc_flash_sequence(docg3, DOC_SEQ_ERASE);
+
+ sector = block0 << DOC_ADDR_BLOCK_SHIFT;
+ doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+ doc_setup_addr_sector(docg3, sector);
+ sector = block1 << DOC_ADDR_BLOCK_SHIFT;
+ doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+ doc_setup_addr_sector(docg3, sector);
+ doc_delay(docg3, 1);
+
+ doc_flash_command(docg3, DOC_CMD_ERASECYCLE2);
+ doc_delay(docg3, 2);
+
+ if (is_prot_seq_error(docg3)) {
+ doc_err("Erase blocks %d,%d error\n", block0, block1);
+ return -EIO;
+ }
+
+ return doc_write_erase_wait_status(docg3);
+}
+
+/**
+ * doc_erase - Erase a portion of the chip
+ * @mtd: the device
+ * @info: the erase info
+ *
+ * Erase a bunch of contiguous blocks, by pairs, as a "mtd" page of 1024 is
+ * split into 2 pages of 512 bytes on 2 contiguous blocks.
+ *
+ * Returns 0 if erase successful, -EINVAL if addressing error, -EIO if erase
+ * issue
+ */
+static int doc_erase(struct mtd_info *mtd, struct erase_info *info)
+{
+ struct docg3 *docg3 = mtd->priv;
+ uint64_t len;
+ int block0, block1, page, ret = 0, ofs = 0;
+
+ doc_dbg("doc_erase(from=%lld, len=%lld\n", info->addr, info->len);
+
+ calc_block_sector(info->addr + info->len, &block0, &block1, &page,
+ &ofs, docg3->reliable);
+ if (info->addr + info->len > mtd->size || page || ofs)
+ return -EINVAL;
+
+ calc_block_sector(info->addr, &block0, &block1, &page, &ofs,
+ docg3->reliable);
+ mutex_lock(&docg3->cascade->lock);
+ doc_set_device_id(docg3, docg3->device_id);
+ doc_set_reliable_mode(docg3);
+ for (len = info->len; !ret && len > 0; len -= mtd->erasesize) {
+ ret = doc_erase_block(docg3, block0, block1);
+ block0 += 2;
+ block1 += 2;
+ }
+ mutex_unlock(&docg3->cascade->lock);
+
+ return ret;
+}
+
+/**
+ * doc_write_page - Write a single page to the chip
+ * @docg3: the device
+ * @to: the offset from first block and first page, in bytes, aligned on page
+ * size
+ * @buf: buffer to get bytes from
+ * @oob: buffer to get out of band bytes from (can be NULL if no OOB should be
+ * written)
+ * @autoecc: if 0, all 16 bytes from OOB are taken, regardless of HW Hamming or
+ * BCH computations. If 1, only bytes 0-7 and byte 15 are taken,
+ * remaining ones are filled with hardware Hamming and BCH
+ * computations. Its value is not meaningfull is oob == NULL.
+ *
+ * Write one full page (ie. 1 page split on two planes), of 512 bytes, with the
+ * OOB data. The OOB ECC is automatically computed by the hardware Hamming and
+ * BCH generator if autoecc is not null.
+ *
+ * Returns 0 if write successful, -EIO if write error, -EAGAIN if timeout
+ */
+static int doc_write_page(struct docg3 *docg3, loff_t to, const u_char *buf,
+ const u_char *oob, int autoecc)
+{
+ int block0, block1, page, ret, ofs = 0;
+ u8 hwecc[DOC_ECC_BCH_SIZE], hamming;
+
+ doc_dbg("doc_write_page(to=%lld)\n", to);
+ calc_block_sector(to, &block0, &block1, &page, &ofs, docg3->reliable);
+
+ doc_set_device_id(docg3, docg3->device_id);
+ ret = doc_reset_seq(docg3);
+ if (ret)
+ goto err;
+
+ /* Program the flash address block and page */
+ ret = doc_write_seek(docg3, block0, block1, page, ofs);
+ if (ret)
+ goto err;
+
+ doc_write_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
+ doc_delay(docg3, 2);
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_PAGE_SIZE, buf);
+
+ if (oob && autoecc) {
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_PAGEINFO_SZ, oob);
+ doc_delay(docg3, 2);
+ oob += DOC_LAYOUT_OOB_UNUSED_OFS;
+
+ hamming = doc_register_readb(docg3, DOC_HAMMINGPARITY);
+ doc_delay(docg3, 2);
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_HAMMING_SZ,
+ &hamming);
+ doc_delay(docg3, 2);
+
+ doc_get_bch_hw_ecc(docg3, hwecc);
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_BCH_SZ, hwecc);
+ doc_delay(docg3, 2);
+
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_UNUSED_SZ, oob);
+ }
+ if (oob && !autoecc)
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_SIZE, oob);
+
+ doc_delay(docg3, 2);
+ doc_page_finish(docg3);
+ doc_delay(docg3, 2);
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE2);
+ doc_delay(docg3, 2);
+
+ /*
+ * The wait status will perform another doc_page_finish() call, but that
+ * seems to please the docg3, so leave it.
+ */
+ ret = doc_write_erase_wait_status(docg3);
+ return ret;
+err:
+ doc_read_page_finish(docg3);
+ return ret;
+}
+
+/**
+ * doc_guess_autoecc - Guess autoecc mode from mbd_oob_ops
+ * @ops: the oob operations
+ *
+ * Returns 0 or 1 if success, -EINVAL if invalid oob mode
+ */
+static int doc_guess_autoecc(struct mtd_oob_ops *ops)
+{
+ int autoecc;
+
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ autoecc = 1;
+ break;
+ case MTD_OPS_RAW:
+ autoecc = 0;
+ break;
+ default:
+ autoecc = -EINVAL;
+ }
+ return autoecc;
+}
+
+/**
+ * doc_fill_autooob - Fill a 16 bytes OOB from 8 non-ECC bytes
+ * @dst: the target 16 bytes OOB buffer
+ * @oobsrc: the source 8 bytes non-ECC OOB buffer
+ *
+ */
+static void doc_fill_autooob(u8 *dst, u8 *oobsrc)
+{
+ memcpy(dst, oobsrc, DOC_LAYOUT_OOB_PAGEINFO_SZ);
+ dst[DOC_LAYOUT_OOB_UNUSED_OFS] = oobsrc[DOC_LAYOUT_OOB_PAGEINFO_SZ];
+}
+
+/**
+ * doc_backup_oob - Backup OOB into docg3 structure
+ * @docg3: the device
+ * @to: the page offset in the chip
+ * @ops: the OOB size and buffer
+ *
+ * As the docg3 should write a page with its OOB in one pass, and some userland
+ * applications do write_oob() to setup the OOB and then write(), store the OOB
+ * into a temporary storage. This is very dangerous, as 2 concurrent
+ * applications could store an OOB, and then write their pages (which will
+ * result into one having its OOB corrupted).
+ *
+ * The only reliable way would be for userland to call doc_write_oob() with both
+ * the page data _and_ the OOB area.
+ *
+ * Returns 0 if success, -EINVAL if ops content invalid
+ */
+static int doc_backup_oob(struct docg3 *docg3, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int ooblen = ops->ooblen, autoecc;
+
+ if (ooblen != DOC_LAYOUT_OOB_SIZE)
+ return -EINVAL;
+ autoecc = doc_guess_autoecc(ops);
+ if (autoecc < 0)
+ return autoecc;
+
+ docg3->oob_write_ofs = to;
+ docg3->oob_autoecc = autoecc;
+ if (ops->mode == MTD_OPS_AUTO_OOB) {
+ doc_fill_autooob(docg3->oob_write_buf, ops->oobbuf);
+ ops->oobretlen = 8;
+ } else {
+ memcpy(docg3->oob_write_buf, ops->oobbuf, DOC_LAYOUT_OOB_SIZE);
+ ops->oobretlen = DOC_LAYOUT_OOB_SIZE;
+ }
+ return 0;
+}
+
+/**
+ * doc_write_oob - Write out of band bytes to flash
+ * @mtd: the device
+ * @ofs: the offset from first block and first page, in bytes, aligned on page
+ * size
+ * @ops: the mtd oob structure
+ *
+ * Either write OOB data into a temporary buffer, for the subsequent write
+ * page. The provided OOB should be 16 bytes long. If a data buffer is provided
+ * as well, issue the page write.
+ * Or provide data without OOB, and then a all zeroed OOB will be used (ECC will
+ * still be filled in if asked for).
+ *
+ * Returns 0 is successful, EINVAL if length is not 14 bytes
+ */
+static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
+ struct mtd_oob_ops *ops)
+{
+ struct docg3 *docg3 = mtd->priv;
+ int ret, autoecc, oobdelta;
+ u8 *oobbuf = ops->oobbuf;
+ u8 *buf = ops->datbuf;
+ size_t len, ooblen;
+ u8 oob[DOC_LAYOUT_OOB_SIZE];
+
+ if (buf)
+ len = ops->len;
+ else
+ len = 0;
+ if (oobbuf)
+ ooblen = ops->ooblen;
+ else
+ ooblen = 0;
+
+ if (oobbuf && ops->mode == MTD_OPS_PLACE_OOB)
+ oobbuf += ops->ooboffs;
+
+ doc_dbg("doc_write_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n",
+ ofs, ops->mode, buf, len, oobbuf, ooblen);
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_RAW:
+ oobdelta = mtd->oobsize;
+ break;
+ case MTD_OPS_AUTO_OOB:
+ oobdelta = mtd->oobavail;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % oobdelta) ||
+ (ofs % DOC_LAYOUT_PAGE_SIZE))
+ return -EINVAL;
+ if (len && ooblen &&
+ (len / DOC_LAYOUT_PAGE_SIZE) != (ooblen / oobdelta))
+ return -EINVAL;
+
+ ops->oobretlen = 0;
+ ops->retlen = 0;
+ ret = 0;
+ if (len == 0 && ooblen == 0)
+ return -EINVAL;
+ if (len == 0 && ooblen > 0)
+ return doc_backup_oob(docg3, ofs, ops);
+
+ autoecc = doc_guess_autoecc(ops);
+ if (autoecc < 0)
+ return autoecc;
+
+ mutex_lock(&docg3->cascade->lock);
+ while (!ret && len > 0) {
+ memset(oob, 0, sizeof(oob));
+ if (ofs == docg3->oob_write_ofs)
+ memcpy(oob, docg3->oob_write_buf, DOC_LAYOUT_OOB_SIZE);
+ else if (ooblen > 0 && ops->mode == MTD_OPS_AUTO_OOB)
+ doc_fill_autooob(oob, oobbuf);
+ else if (ooblen > 0)
+ memcpy(oob, oobbuf, DOC_LAYOUT_OOB_SIZE);
+ ret = doc_write_page(docg3, ofs, buf, oob, autoecc);
+
+ ofs += DOC_LAYOUT_PAGE_SIZE;
+ len -= DOC_LAYOUT_PAGE_SIZE;
+ buf += DOC_LAYOUT_PAGE_SIZE;
+ if (ooblen) {
+ oobbuf += oobdelta;
+ ooblen -= oobdelta;
+ ops->oobretlen += oobdelta;
+ }
+ ops->retlen += DOC_LAYOUT_PAGE_SIZE;
+ }
+
+ doc_set_device_id(docg3, 0);
+ mutex_unlock(&docg3->cascade->lock);
+ return ret;
+}
+
+static struct docg3 *sysfs_dev2docg3(struct device *dev,
+ struct device_attribute *attr)
+{
+ int floor;
+ struct mtd_info **docg3_floors = dev_get_drvdata(dev);
+
+ floor = attr->attr.name[1] - '0';
+ if (floor < 0 || floor >= DOC_MAX_NBFLOORS)
+ return NULL;
+ else
+ return docg3_floors[floor]->priv;
+}
+
+static ssize_t dps0_is_key_locked(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int dps0;
+
+ mutex_lock(&docg3->cascade->lock);
+ doc_set_device_id(docg3, docg3->device_id);
+ dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
+ doc_set_device_id(docg3, 0);
+ mutex_unlock(&docg3->cascade->lock);
+
+ return sprintf(buf, "%d\n", !(dps0 & DOC_DPS_KEY_OK));
+}
+
+static ssize_t dps1_is_key_locked(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int dps1;
+
+ mutex_lock(&docg3->cascade->lock);
+ doc_set_device_id(docg3, docg3->device_id);
+ dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
+ doc_set_device_id(docg3, 0);
+ mutex_unlock(&docg3->cascade->lock);
+
+ return sprintf(buf, "%d\n", !(dps1 & DOC_DPS_KEY_OK));
+}
+
+static ssize_t dps0_insert_key(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int i;
+
+ if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
+ return -EINVAL;
+
+ mutex_lock(&docg3->cascade->lock);
+ doc_set_device_id(docg3, docg3->device_id);
+ for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
+ doc_writeb(docg3, buf[i], DOC_DPS0_KEY);
+ doc_set_device_id(docg3, 0);
+ mutex_unlock(&docg3->cascade->lock);
+ return count;
+}
+
+static ssize_t dps1_insert_key(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int i;
+
+ if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
+ return -EINVAL;
+
+ mutex_lock(&docg3->cascade->lock);
+ doc_set_device_id(docg3, docg3->device_id);
+ for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
+ doc_writeb(docg3, buf[i], DOC_DPS1_KEY);
+ doc_set_device_id(docg3, 0);
+ mutex_unlock(&docg3->cascade->lock);
+ return count;
+}
+
+#define FLOOR_SYSFS(id) { \
+ __ATTR(f##id##_dps0_is_keylocked, S_IRUGO, dps0_is_key_locked, NULL), \
+ __ATTR(f##id##_dps1_is_keylocked, S_IRUGO, dps1_is_key_locked, NULL), \
+ __ATTR(f##id##_dps0_protection_key, S_IWUSR|S_IWGRP, NULL, dps0_insert_key), \
+ __ATTR(f##id##_dps1_protection_key, S_IWUSR|S_IWGRP, NULL, dps1_insert_key), \
+}
+
+static struct device_attribute doc_sys_attrs[DOC_MAX_NBFLOORS][4] = {
+ FLOOR_SYSFS(0), FLOOR_SYSFS(1), FLOOR_SYSFS(2), FLOOR_SYSFS(3)
+};
+
+static int doc_register_sysfs(struct platform_device *pdev,
+ struct docg3_cascade *cascade)
+{
+ struct device *dev = &pdev->dev;
+ int floor;
+ int ret;
+ int i;
+
+ for (floor = 0;
+ floor < DOC_MAX_NBFLOORS && cascade->floors[floor];
+ floor++) {
+ for (i = 0; i < 4; i++) {
+ ret = device_create_file(dev, &doc_sys_attrs[floor][i]);
+ if (ret)
+ goto remove_files;
+ }
+ }
+
+ return 0;
+
+remove_files:
+ do {
+ while (--i >= 0)
+ device_remove_file(dev, &doc_sys_attrs[floor][i]);
+ i = 4;
+ } while (--floor >= 0);
+
+ return ret;
+}
+
+static void doc_unregister_sysfs(struct platform_device *pdev,
+ struct docg3_cascade *cascade)
+{
+ struct device *dev = &pdev->dev;
+ int floor, i;
+
+ for (floor = 0; floor < DOC_MAX_NBFLOORS && cascade->floors[floor];
+ floor++)
+ for (i = 0; i < 4; i++)
+ device_remove_file(dev, &doc_sys_attrs[floor][i]);
+}
+
+/*
+ * Debug sysfs entries
+ */
+static int flashcontrol_show(struct seq_file *s, void *p)
+{
+ struct docg3 *docg3 = s->private;
+
+ u8 fctrl;
+
+ mutex_lock(&docg3->cascade->lock);
+ fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+ mutex_unlock(&docg3->cascade->lock);
+
+ seq_printf(s, "FlashControl : 0x%02x (%s,CE# %s,%s,%s,flash %s)\n",
+ fctrl,
+ fctrl & DOC_CTRL_VIOLATION ? "protocol violation" : "-",
+ fctrl & DOC_CTRL_CE ? "active" : "inactive",
+ fctrl & DOC_CTRL_PROTECTION_ERROR ? "protection error" : "-",
+ fctrl & DOC_CTRL_SEQUENCE_ERROR ? "sequence error" : "-",
+ fctrl & DOC_CTRL_FLASHREADY ? "ready" : "not ready");
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(flashcontrol);
+
+static int asic_mode_show(struct seq_file *s, void *p)
+{
+ struct docg3 *docg3 = s->private;
+
+ int pctrl, mode;
+
+ mutex_lock(&docg3->cascade->lock);
+ pctrl = doc_register_readb(docg3, DOC_ASICMODE);
+ mode = pctrl & 0x03;
+ mutex_unlock(&docg3->cascade->lock);
+
+ seq_printf(s,
+ "%04x : RAM_WE=%d,RSTIN_RESET=%d,BDETCT_RESET=%d,WRITE_ENABLE=%d,POWERDOWN=%d,MODE=%d%d (",
+ pctrl,
+ pctrl & DOC_ASICMODE_RAM_WE ? 1 : 0,
+ pctrl & DOC_ASICMODE_RSTIN_RESET ? 1 : 0,
+ pctrl & DOC_ASICMODE_BDETCT_RESET ? 1 : 0,
+ pctrl & DOC_ASICMODE_MDWREN ? 1 : 0,
+ pctrl & DOC_ASICMODE_POWERDOWN ? 1 : 0,
+ mode >> 1, mode & 0x1);
+
+ switch (mode) {
+ case DOC_ASICMODE_RESET:
+ seq_puts(s, "reset");
+ break;
+ case DOC_ASICMODE_NORMAL:
+ seq_puts(s, "normal");
+ break;
+ case DOC_ASICMODE_POWERDOWN:
+ seq_puts(s, "powerdown");
+ break;
+ }
+ seq_puts(s, ")\n");
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(asic_mode);
+
+static int device_id_show(struct seq_file *s, void *p)
+{
+ struct docg3 *docg3 = s->private;
+ int id;
+
+ mutex_lock(&docg3->cascade->lock);
+ id = doc_register_readb(docg3, DOC_DEVICESELECT);
+ mutex_unlock(&docg3->cascade->lock);
+
+ seq_printf(s, "DeviceId = %d\n", id);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(device_id);
+
+static int protection_show(struct seq_file *s, void *p)
+{
+ struct docg3 *docg3 = s->private;
+ int protect, dps0, dps0_low, dps0_high, dps1, dps1_low, dps1_high;
+
+ mutex_lock(&docg3->cascade->lock);
+ protect = doc_register_readb(docg3, DOC_PROTECTION);
+ dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
+ dps0_low = doc_register_readw(docg3, DOC_DPS0_ADDRLOW);
+ dps0_high = doc_register_readw(docg3, DOC_DPS0_ADDRHIGH);
+ dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
+ dps1_low = doc_register_readw(docg3, DOC_DPS1_ADDRLOW);
+ dps1_high = doc_register_readw(docg3, DOC_DPS1_ADDRHIGH);
+ mutex_unlock(&docg3->cascade->lock);
+
+ seq_printf(s, "Protection = 0x%02x (", protect);
+ if (protect & DOC_PROTECT_FOUNDRY_OTP_LOCK)
+ seq_puts(s, "FOUNDRY_OTP_LOCK,");
+ if (protect & DOC_PROTECT_CUSTOMER_OTP_LOCK)
+ seq_puts(s, "CUSTOMER_OTP_LOCK,");
+ if (protect & DOC_PROTECT_LOCK_INPUT)
+ seq_puts(s, "LOCK_INPUT,");
+ if (protect & DOC_PROTECT_STICKY_LOCK)
+ seq_puts(s, "STICKY_LOCK,");
+ if (protect & DOC_PROTECT_PROTECTION_ENABLED)
+ seq_puts(s, "PROTECTION ON,");
+ if (protect & DOC_PROTECT_IPL_DOWNLOAD_LOCK)
+ seq_puts(s, "IPL_DOWNLOAD_LOCK,");
+ if (protect & DOC_PROTECT_PROTECTION_ERROR)
+ seq_puts(s, "PROTECT_ERR,");
+ else
+ seq_puts(s, "NO_PROTECT_ERR");
+ seq_puts(s, ")\n");
+
+ seq_printf(s, "DPS0 = 0x%02x : Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n",
+ dps0, dps0_low, dps0_high,
+ !!(dps0 & DOC_DPS_OTP_PROTECTED),
+ !!(dps0 & DOC_DPS_READ_PROTECTED),
+ !!(dps0 & DOC_DPS_WRITE_PROTECTED),
+ !!(dps0 & DOC_DPS_HW_LOCK_ENABLED),
+ !!(dps0 & DOC_DPS_KEY_OK));
+ seq_printf(s, "DPS1 = 0x%02x : Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n",
+ dps1, dps1_low, dps1_high,
+ !!(dps1 & DOC_DPS_OTP_PROTECTED),
+ !!(dps1 & DOC_DPS_READ_PROTECTED),
+ !!(dps1 & DOC_DPS_WRITE_PROTECTED),
+ !!(dps1 & DOC_DPS_HW_LOCK_ENABLED),
+ !!(dps1 & DOC_DPS_KEY_OK));
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(protection);
+
+static void __init doc_dbg_register(struct mtd_info *floor)
+{
+ struct dentry *root = floor->dbg.dfs_dir;
+ struct docg3 *docg3 = floor->priv;
+
+ if (IS_ERR_OR_NULL(root)) {
+ if (IS_ENABLED(CONFIG_DEBUG_FS) &&
+ !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+ dev_warn(floor->dev.parent,
+ "CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n");
+ return;
+ }
+
+ debugfs_create_file("docg3_flashcontrol", S_IRUSR, root, docg3,
+ &flashcontrol_fops);
+ debugfs_create_file("docg3_asic_mode", S_IRUSR, root, docg3,
+ &asic_mode_fops);
+ debugfs_create_file("docg3_device_id", S_IRUSR, root, docg3,
+ &device_id_fops);
+ debugfs_create_file("docg3_protection", S_IRUSR, root, docg3,
+ &protection_fops);
+}
+
+/**
+ * doc_set_driver_info - Fill the mtd_info structure and docg3 structure
+ * @chip_id: The chip ID of the supported chip
+ * @mtd: The structure to fill
+ */
+static int __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
+{
+ struct docg3 *docg3 = mtd->priv;
+ int cfg;
+
+ cfg = doc_register_readb(docg3, DOC_CONFIGURATION);
+ docg3->if_cfg = (cfg & DOC_CONF_IF_CFG ? 1 : 0);
+ docg3->reliable = reliable_mode;
+
+ switch (chip_id) {
+ case DOC_CHIPID_G3:
+ mtd->name = devm_kasprintf(docg3->dev, GFP_KERNEL, "docg3.%d",
+ docg3->device_id);
+ if (!mtd->name)
+ return -ENOMEM;
+ docg3->max_block = 2047;
+ break;
+ }
+ mtd->type = MTD_NANDFLASH;
+ mtd->flags = MTD_CAP_NANDFLASH;
+ mtd->size = (docg3->max_block + 1) * DOC_LAYOUT_BLOCK_SIZE;
+ if (docg3->reliable == 2)
+ mtd->size /= 2;
+ mtd->erasesize = DOC_LAYOUT_BLOCK_SIZE * DOC_LAYOUT_NBPLANES;
+ if (docg3->reliable == 2)
+ mtd->erasesize /= 2;
+ mtd->writebufsize = mtd->writesize = DOC_LAYOUT_PAGE_SIZE;
+ mtd->oobsize = DOC_LAYOUT_OOB_SIZE;
+ mtd->_erase = doc_erase;
+ mtd->_read_oob = doc_read_oob;
+ mtd->_write_oob = doc_write_oob;
+ mtd->_block_isbad = doc_block_isbad;
+ mtd_set_ooblayout(mtd, &nand_ooblayout_docg3_ops);
+ mtd->oobavail = 8;
+ mtd->ecc_strength = DOC_ECC_BCH_T;
+
+ return 0;
+}
+
+/**
+ * doc_probe_device - Check if a device is available
+ * @cascade: the cascade of chips this devices will belong to
+ * @floor: the floor of the probed device
+ * @dev: the device
+ *
+ * Checks whether a device at the specified IO range, and floor is available.
+ *
+ * Returns a mtd_info struct if there is a device, ENODEV if none found, ENOMEM
+ * if a memory allocation failed. If floor 0 is checked, a reset of the ASIC is
+ * launched.
+ */
+static struct mtd_info * __init
+doc_probe_device(struct docg3_cascade *cascade, int floor, struct device *dev)
+{
+ int ret, bbt_nbpages;
+ u16 chip_id, chip_id_inv;
+ struct docg3 *docg3;
+ struct mtd_info *mtd;
+
+ ret = -ENOMEM;
+ docg3 = kzalloc(sizeof(struct docg3), GFP_KERNEL);
+ if (!docg3)
+ goto nomem1;
+ mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
+ if (!mtd)
+ goto nomem2;
+ mtd->priv = docg3;
+ mtd->dev.parent = dev;
+ bbt_nbpages = DIV_ROUND_UP(docg3->max_block + 1,
+ 8 * DOC_LAYOUT_PAGE_SIZE);
+ docg3->bbt = kcalloc(DOC_LAYOUT_PAGE_SIZE, bbt_nbpages, GFP_KERNEL);
+ if (!docg3->bbt)
+ goto nomem3;
+
+ docg3->dev = dev;
+ docg3->device_id = floor;
+ docg3->cascade = cascade;
+ doc_set_device_id(docg3, docg3->device_id);
+ if (!floor)
+ doc_set_asic_mode(docg3, DOC_ASICMODE_RESET);
+ doc_set_asic_mode(docg3, DOC_ASICMODE_NORMAL);
+
+ chip_id = doc_register_readw(docg3, DOC_CHIPID);
+ chip_id_inv = doc_register_readw(docg3, DOC_CHIPID_INV);
+
+ ret = 0;
+ if (chip_id != (u16)(~chip_id_inv)) {
+ goto nomem4;
+ }
+
+ switch (chip_id) {
+ case DOC_CHIPID_G3:
+ doc_info("Found a G3 DiskOnChip at addr %p, floor %d\n",
+ docg3->cascade->base, floor);
+ break;
+ default:
+ doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id);
+ goto nomem4;
+ }
+
+ ret = doc_set_driver_info(chip_id, mtd);
+ if (ret)
+ goto nomem4;
+
+ doc_hamming_ecc_init(docg3, DOC_LAYOUT_OOB_PAGEINFO_SZ);
+ doc_reload_bbt(docg3);
+ return mtd;
+
+nomem4:
+ kfree(docg3->bbt);
+nomem3:
+ kfree(mtd);
+nomem2:
+ kfree(docg3);
+nomem1:
+ return ret ? ERR_PTR(ret) : NULL;
+}
+
+/**
+ * doc_release_device - Release a docg3 floor
+ * @mtd: the device
+ */
+static void doc_release_device(struct mtd_info *mtd)
+{
+ struct docg3 *docg3 = mtd->priv;
+
+ mtd_device_unregister(mtd);
+ kfree(docg3->bbt);
+ kfree(docg3);
+ kfree(mtd);
+}
+
+/**
+ * docg3_resume - Awakens docg3 floor
+ * @pdev: platfrom device
+ *
+ * Returns 0 (always successful)
+ */
+static int docg3_resume(struct platform_device *pdev)
+{
+ int i;
+ struct docg3_cascade *cascade;
+ struct mtd_info **docg3_floors, *mtd;
+ struct docg3 *docg3;
+
+ cascade = platform_get_drvdata(pdev);
+ docg3_floors = cascade->floors;
+ mtd = docg3_floors[0];
+ docg3 = mtd->priv;
+
+ doc_dbg("docg3_resume()\n");
+ for (i = 0; i < 12; i++)
+ doc_readb(docg3, DOC_IOSPACE_IPL);
+ return 0;
+}
+
+/**
+ * docg3_suspend - Put in low power mode the docg3 floor
+ * @pdev: platform device
+ * @state: power state
+ *
+ * Shuts off most of docg3 circuitery to lower power consumption.
+ *
+ * Returns 0 if suspend succeeded, -EIO if chip refused suspend
+ */
+static int docg3_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int floor, i;
+ struct docg3_cascade *cascade;
+ struct mtd_info **docg3_floors, *mtd;
+ struct docg3 *docg3;
+ u8 ctrl, pwr_down;
+
+ cascade = platform_get_drvdata(pdev);
+ docg3_floors = cascade->floors;
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
+ mtd = docg3_floors[floor];
+ if (!mtd)
+ continue;
+ docg3 = mtd->priv;
+
+ doc_writeb(docg3, floor, DOC_DEVICESELECT);
+ ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+ ctrl &= ~DOC_CTRL_VIOLATION & ~DOC_CTRL_CE;
+ doc_writeb(docg3, ctrl, DOC_FLASHCONTROL);
+
+ for (i = 0; i < 10; i++) {
+ usleep_range(3000, 4000);
+ pwr_down = doc_register_readb(docg3, DOC_POWERMODE);
+ if (pwr_down & DOC_POWERDOWN_READY)
+ break;
+ }
+ if (pwr_down & DOC_POWERDOWN_READY) {
+ doc_dbg("docg3_suspend(): floor %d powerdown ok\n",
+ floor);
+ } else {
+ doc_err("docg3_suspend(): floor %d powerdown failed\n",
+ floor);
+ return -EIO;
+ }
+ }
+
+ mtd = docg3_floors[0];
+ docg3 = mtd->priv;
+ doc_set_asic_mode(docg3, DOC_ASICMODE_POWERDOWN);
+ return 0;
+}
+
+/**
+ * docg3_probe - Probe the IO space for a DiskOnChip G3 chip
+ * @pdev: platform device
+ *
+ * Probes for a G3 chip at the specified IO space in the platform data
+ * ressources. The floor 0 must be available.
+ *
+ * Returns 0 on success, -ENOMEM, -ENXIO on error
+ */
+static int __init docg3_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtd_info *mtd;
+ struct resource *ress;
+ void __iomem *base;
+ int ret, floor;
+ struct docg3_cascade *cascade;
+
+ ret = -ENXIO;
+ ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!ress) {
+ dev_err(dev, "No I/O memory resource defined\n");
+ return ret;
+ }
+
+ ret = -ENOMEM;
+ base = devm_ioremap(dev, ress->start, DOC_IOSPACE_SIZE);
+ if (!base) {
+ dev_err(dev, "devm_ioremap dev failed\n");
+ return ret;
+ }
+
+ cascade = devm_kcalloc(dev, DOC_MAX_NBFLOORS, sizeof(*cascade),
+ GFP_KERNEL);
+ if (!cascade)
+ return ret;
+ cascade->base = base;
+ mutex_init(&cascade->lock);
+ cascade->bch = bch_init(DOC_ECC_BCH_M, DOC_ECC_BCH_T,
+ DOC_ECC_BCH_PRIMPOLY, false);
+ if (!cascade->bch)
+ return ret;
+
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
+ mtd = doc_probe_device(cascade, floor, dev);
+ if (IS_ERR(mtd)) {
+ ret = PTR_ERR(mtd);
+ goto err_probe;
+ }
+ if (!mtd) {
+ if (floor == 0)
+ goto notfound;
+ else
+ continue;
+ }
+ cascade->floors[floor] = mtd;
+ ret = mtd_device_parse_register(mtd, part_probes, NULL, NULL,
+ 0);
+ if (ret)
+ goto err_probe;
+
+ doc_dbg_register(cascade->floors[floor]);
+ }
+
+ ret = doc_register_sysfs(pdev, cascade);
+ if (ret)
+ goto err_probe;
+
+ platform_set_drvdata(pdev, cascade);
+ return 0;
+
+notfound:
+ ret = -ENODEV;
+ dev_info(dev, "No supported DiskOnChip found\n");
+err_probe:
+ bch_free(cascade->bch);
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
+ if (cascade->floors[floor])
+ doc_release_device(cascade->floors[floor]);
+ return ret;
+}
+
+/**
+ * docg3_release - Release the driver
+ * @pdev: the platform device
+ *
+ * Returns 0
+ */
+static int docg3_release(struct platform_device *pdev)
+{
+ struct docg3_cascade *cascade = platform_get_drvdata(pdev);
+ struct docg3 *docg3 = cascade->floors[0]->priv;
+ int floor;
+
+ doc_unregister_sysfs(pdev, cascade);
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
+ if (cascade->floors[floor])
+ doc_release_device(cascade->floors[floor]);
+
+ bch_free(docg3->cascade->bch);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id docg3_dt_ids[] = {
+ { .compatible = "m-systems,diskonchip-g3" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, docg3_dt_ids);
+#endif
+
+static struct platform_driver g3_driver = {
+ .driver = {
+ .name = "docg3",
+ .of_match_table = of_match_ptr(docg3_dt_ids),
+ },
+ .suspend = docg3_suspend,
+ .resume = docg3_resume,
+ .remove = docg3_release,
+};
+
+module_platform_driver_probe(g3_driver, docg3_probe);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
+MODULE_DESCRIPTION("MTD driver for DiskOnChip G3");
diff --git a/drivers/mtd/devices/docg3.h b/drivers/mtd/devices/docg3.h
new file mode 100644
index 0000000000..2c0c5114e4
--- /dev/null
+++ b/drivers/mtd/devices/docg3.h
@@ -0,0 +1,343 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Handles the M-Systems DiskOnChip G3 chip
+ *
+ * Copyright (C) 2011 Robert Jarzmik
+ */
+
+#ifndef _MTD_DOCG3_H
+#define _MTD_DOCG3_H
+
+#include <linux/mtd/mtd.h>
+
+/*
+ * Flash memory areas :
+ * - 0x0000 .. 0x07ff : IPL
+ * - 0x0800 .. 0x0fff : Data area
+ * - 0x1000 .. 0x17ff : Registers
+ * - 0x1800 .. 0x1fff : Unknown
+ */
+#define DOC_IOSPACE_IPL 0x0000
+#define DOC_IOSPACE_DATA 0x0800
+#define DOC_IOSPACE_SIZE 0x2000
+
+/*
+ * DOC G3 layout and adressing scheme
+ * A page address for the block "b", plane "P" and page "p":
+ * address = [bbbb bPpp pppp]
+ */
+
+#define DOC_ADDR_PAGE_MASK 0x3f
+#define DOC_ADDR_BLOCK_SHIFT 6
+#define DOC_LAYOUT_NBPLANES 2
+#define DOC_LAYOUT_PAGES_PER_BLOCK 64
+#define DOC_LAYOUT_PAGE_SIZE 512
+#define DOC_LAYOUT_OOB_SIZE 16
+#define DOC_LAYOUT_WEAR_SIZE 8
+#define DOC_LAYOUT_PAGE_OOB_SIZE \
+ (DOC_LAYOUT_PAGE_SIZE + DOC_LAYOUT_OOB_SIZE)
+#define DOC_LAYOUT_WEAR_OFFSET (DOC_LAYOUT_PAGE_OOB_SIZE * 2)
+#define DOC_LAYOUT_BLOCK_SIZE \
+ (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_PAGE_SIZE)
+
+/*
+ * ECC related constants
+ */
+#define DOC_ECC_BCH_M 14
+#define DOC_ECC_BCH_T 4
+#define DOC_ECC_BCH_PRIMPOLY 0x4443
+#define DOC_ECC_BCH_SIZE 7
+#define DOC_ECC_BCH_COVERED_BYTES \
+ (DOC_LAYOUT_PAGE_SIZE + DOC_LAYOUT_OOB_PAGEINFO_SZ + \
+ DOC_LAYOUT_OOB_HAMMING_SZ)
+#define DOC_ECC_BCH_TOTAL_BYTES \
+ (DOC_ECC_BCH_COVERED_BYTES + DOC_LAYOUT_OOB_BCH_SZ)
+
+/*
+ * Blocks distribution
+ */
+#define DOC_LAYOUT_BLOCK_BBT 0
+#define DOC_LAYOUT_BLOCK_OTP 0
+#define DOC_LAYOUT_BLOCK_FIRST_DATA 6
+
+#define DOC_LAYOUT_PAGE_BBT 4
+
+/*
+ * Extra page OOB (16 bytes wide) layout
+ */
+#define DOC_LAYOUT_OOB_PAGEINFO_OFS 0
+#define DOC_LAYOUT_OOB_HAMMING_OFS 7
+#define DOC_LAYOUT_OOB_BCH_OFS 8
+#define DOC_LAYOUT_OOB_UNUSED_OFS 15
+#define DOC_LAYOUT_OOB_PAGEINFO_SZ 7
+#define DOC_LAYOUT_OOB_HAMMING_SZ 1
+#define DOC_LAYOUT_OOB_BCH_SZ 7
+#define DOC_LAYOUT_OOB_UNUSED_SZ 1
+
+
+#define DOC_CHIPID_G3 0x200
+#define DOC_ERASE_MARK 0xaa
+#define DOC_MAX_NBFLOORS 4
+/*
+ * Flash registers
+ */
+#define DOC_CHIPID 0x1000
+#define DOC_TEST 0x1004
+#define DOC_BUSLOCK 0x1006
+#define DOC_ENDIANCONTROL 0x1008
+#define DOC_DEVICESELECT 0x100a
+#define DOC_ASICMODE 0x100c
+#define DOC_CONFIGURATION 0x100e
+#define DOC_INTERRUPTCONTROL 0x1010
+#define DOC_READADDRESS 0x101a
+#define DOC_DATAEND 0x101e
+#define DOC_INTERRUPTSTATUS 0x1020
+
+#define DOC_FLASHSEQUENCE 0x1032
+#define DOC_FLASHCOMMAND 0x1034
+#define DOC_FLASHADDRESS 0x1036
+#define DOC_FLASHCONTROL 0x1038
+#define DOC_NOP 0x103e
+
+#define DOC_ECCCONF0 0x1040
+#define DOC_ECCCONF1 0x1042
+#define DOC_ECCPRESET 0x1044
+#define DOC_HAMMINGPARITY 0x1046
+#define DOC_BCH_HW_ECC(idx) (0x1048 + idx)
+
+#define DOC_PROTECTION 0x1056
+#define DOC_DPS0_KEY 0x105c
+#define DOC_DPS1_KEY 0x105e
+#define DOC_DPS0_ADDRLOW 0x1060
+#define DOC_DPS0_ADDRHIGH 0x1062
+#define DOC_DPS1_ADDRLOW 0x1064
+#define DOC_DPS1_ADDRHIGH 0x1066
+#define DOC_DPS0_STATUS 0x106c
+#define DOC_DPS1_STATUS 0x106e
+
+#define DOC_ASICMODECONFIRM 0x1072
+#define DOC_CHIPID_INV 0x1074
+#define DOC_POWERMODE 0x107c
+
+/*
+ * Flash sequences
+ * A sequence is preset before one or more commands are input to the chip.
+ */
+#define DOC_SEQ_RESET 0x00
+#define DOC_SEQ_PAGE_SIZE_532 0x03
+#define DOC_SEQ_SET_FASTMODE 0x05
+#define DOC_SEQ_SET_RELIABLEMODE 0x09
+#define DOC_SEQ_READ 0x12
+#define DOC_SEQ_SET_PLANE1 0x0e
+#define DOC_SEQ_SET_PLANE2 0x10
+#define DOC_SEQ_PAGE_SETUP 0x1d
+#define DOC_SEQ_ERASE 0x27
+#define DOC_SEQ_PLANES_STATUS 0x31
+
+/*
+ * Flash commands
+ */
+#define DOC_CMD_READ_PLANE1 0x00
+#define DOC_CMD_SET_ADDR_READ 0x05
+#define DOC_CMD_READ_ALL_PLANES 0x30
+#define DOC_CMD_READ_PLANE2 0x50
+#define DOC_CMD_READ_FLASH 0xe0
+#define DOC_CMD_PAGE_SIZE_532 0x3c
+
+#define DOC_CMD_PROG_BLOCK_ADDR 0x60
+#define DOC_CMD_PROG_CYCLE1 0x80
+#define DOC_CMD_PROG_CYCLE2 0x10
+#define DOC_CMD_PROG_CYCLE3 0x11
+#define DOC_CMD_ERASECYCLE2 0xd0
+#define DOC_CMD_READ_STATUS 0x70
+#define DOC_CMD_PLANES_STATUS 0x71
+
+#define DOC_CMD_RELIABLE_MODE 0x22
+#define DOC_CMD_FAST_MODE 0xa2
+
+#define DOC_CMD_RESET 0xff
+
+/*
+ * Flash register : DOC_FLASHCONTROL
+ */
+#define DOC_CTRL_VIOLATION 0x20
+#define DOC_CTRL_CE 0x10
+#define DOC_CTRL_UNKNOWN_BITS 0x08
+#define DOC_CTRL_PROTECTION_ERROR 0x04
+#define DOC_CTRL_SEQUENCE_ERROR 0x02
+#define DOC_CTRL_FLASHREADY 0x01
+
+/*
+ * Flash register : DOC_ASICMODE
+ */
+#define DOC_ASICMODE_RESET 0x00
+#define DOC_ASICMODE_NORMAL 0x01
+#define DOC_ASICMODE_POWERDOWN 0x02
+#define DOC_ASICMODE_MDWREN 0x04
+#define DOC_ASICMODE_BDETCT_RESET 0x08
+#define DOC_ASICMODE_RSTIN_RESET 0x10
+#define DOC_ASICMODE_RAM_WE 0x20
+
+/*
+ * Flash register : DOC_ECCCONF0
+ */
+#define DOC_ECCCONF0_WRITE_MODE 0x0000
+#define DOC_ECCCONF0_READ_MODE 0x8000
+#define DOC_ECCCONF0_AUTO_ECC_ENABLE 0x4000
+#define DOC_ECCCONF0_HAMMING_ENABLE 0x1000
+#define DOC_ECCCONF0_BCH_ENABLE 0x0800
+#define DOC_ECCCONF0_DATA_BYTES_MASK 0x07ff
+
+/*
+ * Flash register : DOC_ECCCONF1
+ */
+#define DOC_ECCCONF1_BCH_SYNDROM_ERR 0x80
+#define DOC_ECCCONF1_UNKOWN1 0x40
+#define DOC_ECCCONF1_PAGE_IS_WRITTEN 0x20
+#define DOC_ECCCONF1_UNKOWN3 0x10
+#define DOC_ECCCONF1_HAMMING_BITS_MASK 0x0f
+
+/*
+ * Flash register : DOC_PROTECTION
+ */
+#define DOC_PROTECT_FOUNDRY_OTP_LOCK 0x01
+#define DOC_PROTECT_CUSTOMER_OTP_LOCK 0x02
+#define DOC_PROTECT_LOCK_INPUT 0x04
+#define DOC_PROTECT_STICKY_LOCK 0x08
+#define DOC_PROTECT_PROTECTION_ENABLED 0x10
+#define DOC_PROTECT_IPL_DOWNLOAD_LOCK 0x20
+#define DOC_PROTECT_PROTECTION_ERROR 0x80
+
+/*
+ * Flash register : DOC_DPS0_STATUS and DOC_DPS1_STATUS
+ */
+#define DOC_DPS_OTP_PROTECTED 0x01
+#define DOC_DPS_READ_PROTECTED 0x02
+#define DOC_DPS_WRITE_PROTECTED 0x04
+#define DOC_DPS_HW_LOCK_ENABLED 0x08
+#define DOC_DPS_KEY_OK 0x80
+
+/*
+ * Flash register : DOC_CONFIGURATION
+ */
+#define DOC_CONF_IF_CFG 0x80
+#define DOC_CONF_MAX_ID_MASK 0x30
+#define DOC_CONF_VCCQ_3V 0x01
+
+/*
+ * Flash register : DOC_READADDRESS
+ */
+#define DOC_READADDR_INC 0x8000
+#define DOC_READADDR_ONE_BYTE 0x4000
+#define DOC_READADDR_ADDR_MASK 0x1fff
+
+/*
+ * Flash register : DOC_POWERMODE
+ */
+#define DOC_POWERDOWN_READY 0x80
+
+/*
+ * Status of erase and write operation
+ */
+#define DOC_PLANES_STATUS_FAIL 0x01
+#define DOC_PLANES_STATUS_PLANE0_KO 0x02
+#define DOC_PLANES_STATUS_PLANE1_KO 0x04
+
+/*
+ * DPS key management
+ *
+ * Each floor of docg3 has 2 protection areas: DPS0 and DPS1. These areas span
+ * across block boundaries, and define whether these blocks can be read or
+ * written.
+ * The definition is dynamically stored in page 0 of blocks (2,3) for DPS0, and
+ * page 0 of blocks (4,5) for DPS1.
+ */
+#define DOC_LAYOUT_DPS_KEY_LENGTH 8
+
+/**
+ * struct docg3_cascade - Cascade of 1 to 4 docg3 chips
+ * @floors: floors (ie. one physical docg3 chip is one floor)
+ * @base: IO space to access all chips in the cascade
+ * @bch: the BCH correcting control structure
+ * @lock: lock to protect docg3 IO space from concurrent accesses
+ */
+struct docg3_cascade {
+ struct mtd_info *floors[DOC_MAX_NBFLOORS];
+ void __iomem *base;
+ struct bch_control *bch;
+ struct mutex lock;
+};
+
+/**
+ * struct docg3 - DiskOnChip driver private data
+ * @dev: the device currently under control
+ * @cascade: the cascade this device belongs to
+ * @device_id: number of the cascaded DoCG3 device (0, 1, 2 or 3)
+ * @if_cfg: if true, reads are on 16bits, else reads are on 8bits
+
+ * @reliable: if 0, docg3 in normal mode, if 1 docg3 in fast mode, if 2 in
+ * reliable mode
+ * Fast mode implies more errors than normal mode.
+ * Reliable mode implies that page 2*n and 2*n+1 are clones.
+ * @bbt: bad block table cache
+ * @oob_write_ofs: offset of the MTD where this OOB should belong (ie. in next
+ * page_write)
+ * @oob_autoecc: if 1, use only bytes 0-7, 15, and fill the others with HW ECC
+ * if 0, use all the 16 bytes.
+ * @oob_write_buf: prepared OOB for next page_write
+ */
+struct docg3 {
+ struct device *dev;
+ struct docg3_cascade *cascade;
+ unsigned int device_id:4;
+ unsigned int if_cfg:1;
+ unsigned int reliable:2;
+ int max_block;
+ u8 *bbt;
+ loff_t oob_write_ofs;
+ int oob_autoecc;
+ u8 oob_write_buf[DOC_LAYOUT_OOB_SIZE];
+};
+
+#define doc_err(fmt, arg...) dev_err(docg3->dev, (fmt), ## arg)
+#define doc_info(fmt, arg...) dev_info(docg3->dev, (fmt), ## arg)
+#define doc_dbg(fmt, arg...) dev_dbg(docg3->dev, (fmt), ## arg)
+#define doc_vdbg(fmt, arg...) dev_vdbg(docg3->dev, (fmt), ## arg)
+#endif
+
+/*
+ * Trace events part
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM docg3
+
+#if !defined(_MTD_DOCG3_TRACE) || defined(TRACE_HEADER_MULTI_READ)
+#define _MTD_DOCG3_TRACE
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(docg3_io,
+ TP_PROTO(int op, int width, u16 reg, int val),
+ TP_ARGS(op, width, reg, val),
+ TP_STRUCT__entry(
+ __field(int, op)
+ __field(unsigned char, width)
+ __field(u16, reg)
+ __field(int, val)),
+ TP_fast_assign(
+ __entry->op = op;
+ __entry->width = width;
+ __entry->reg = reg;
+ __entry->val = val;),
+ TP_printk("docg3: %s%02d reg=%04x, val=%04x",
+ __entry->op ? "write" : "read", __entry->width,
+ __entry->reg, __entry->val)
+ );
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE docg3
+#include <trace/define_trace.h>
diff --git a/drivers/mtd/devices/mchp23k256.c b/drivers/mtd/devices/mchp23k256.c
new file mode 100644
index 0000000000..d533475fda
--- /dev/null
+++ b/drivers/mtd/devices/mchp23k256.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * mchp23k256.c
+ *
+ * Driver for Microchip 23k256 SPI RAM chips
+ *
+ * Copyright © 2016 Andrew Lunn <andrew@lunn.ch>
+ */
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/sizes.h>
+#include <linux/spi/flash.h>
+#include <linux/spi/spi.h>
+#include <linux/of.h>
+
+#define MAX_CMD_SIZE 4
+
+struct mchp23_caps {
+ u8 addr_width;
+ unsigned int size;
+};
+
+struct mchp23k256_flash {
+ struct spi_device *spi;
+ struct mutex lock;
+ struct mtd_info mtd;
+ const struct mchp23_caps *caps;
+};
+
+#define MCHP23K256_CMD_WRITE_STATUS 0x01
+#define MCHP23K256_CMD_WRITE 0x02
+#define MCHP23K256_CMD_READ 0x03
+#define MCHP23K256_MODE_SEQ BIT(6)
+
+#define to_mchp23k256_flash(x) container_of(x, struct mchp23k256_flash, mtd)
+
+static void mchp23k256_addr2cmd(struct mchp23k256_flash *flash,
+ unsigned int addr, u8 *cmd)
+{
+ int i;
+
+ /*
+ * Address is sent in big endian (MSB first) and we skip
+ * the first entry of the cmd array which contains the cmd
+ * opcode.
+ */
+ for (i = flash->caps->addr_width; i > 0; i--, addr >>= 8)
+ cmd[i] = addr;
+}
+
+static int mchp23k256_cmdsz(struct mchp23k256_flash *flash)
+{
+ return 1 + flash->caps->addr_width;
+}
+
+static int mchp23k256_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const unsigned char *buf)
+{
+ struct mchp23k256_flash *flash = to_mchp23k256_flash(mtd);
+ struct spi_transfer transfer[2] = {};
+ struct spi_message message;
+ unsigned char command[MAX_CMD_SIZE];
+ int ret, cmd_len;
+
+ spi_message_init(&message);
+
+ cmd_len = mchp23k256_cmdsz(flash);
+
+ command[0] = MCHP23K256_CMD_WRITE;
+ mchp23k256_addr2cmd(flash, to, command);
+
+ transfer[0].tx_buf = command;
+ transfer[0].len = cmd_len;
+ spi_message_add_tail(&transfer[0], &message);
+
+ transfer[1].tx_buf = buf;
+ transfer[1].len = len;
+ spi_message_add_tail(&transfer[1], &message);
+
+ mutex_lock(&flash->lock);
+
+ ret = spi_sync(flash->spi, &message);
+
+ mutex_unlock(&flash->lock);
+
+ if (ret)
+ return ret;
+
+ if (retlen && message.actual_length > cmd_len)
+ *retlen += message.actual_length - cmd_len;
+
+ return 0;
+}
+
+static int mchp23k256_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, unsigned char *buf)
+{
+ struct mchp23k256_flash *flash = to_mchp23k256_flash(mtd);
+ struct spi_transfer transfer[2] = {};
+ struct spi_message message;
+ unsigned char command[MAX_CMD_SIZE];
+ int ret, cmd_len;
+
+ spi_message_init(&message);
+
+ cmd_len = mchp23k256_cmdsz(flash);
+
+ memset(&transfer, 0, sizeof(transfer));
+ command[0] = MCHP23K256_CMD_READ;
+ mchp23k256_addr2cmd(flash, from, command);
+
+ transfer[0].tx_buf = command;
+ transfer[0].len = cmd_len;
+ spi_message_add_tail(&transfer[0], &message);
+
+ transfer[1].rx_buf = buf;
+ transfer[1].len = len;
+ spi_message_add_tail(&transfer[1], &message);
+
+ mutex_lock(&flash->lock);
+
+ ret = spi_sync(flash->spi, &message);
+
+ mutex_unlock(&flash->lock);
+
+ if (ret)
+ return ret;
+
+ if (retlen && message.actual_length > cmd_len)
+ *retlen += message.actual_length - cmd_len;
+
+ return 0;
+}
+
+/*
+ * Set the device into sequential mode. This allows read/writes to the
+ * entire SRAM in a single operation
+ */
+static int mchp23k256_set_mode(struct spi_device *spi)
+{
+ struct spi_transfer transfer = {};
+ struct spi_message message;
+ unsigned char command[2];
+
+ spi_message_init(&message);
+
+ command[0] = MCHP23K256_CMD_WRITE_STATUS;
+ command[1] = MCHP23K256_MODE_SEQ;
+
+ transfer.tx_buf = command;
+ transfer.len = sizeof(command);
+ spi_message_add_tail(&transfer, &message);
+
+ return spi_sync(spi, &message);
+}
+
+static const struct mchp23_caps mchp23k256_caps = {
+ .size = SZ_32K,
+ .addr_width = 2,
+};
+
+static const struct mchp23_caps mchp23lcv1024_caps = {
+ .size = SZ_128K,
+ .addr_width = 3,
+};
+
+static int mchp23k256_probe(struct spi_device *spi)
+{
+ struct mchp23k256_flash *flash;
+ struct flash_platform_data *data;
+ int err;
+
+ flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL);
+ if (!flash)
+ return -ENOMEM;
+
+ flash->spi = spi;
+ mutex_init(&flash->lock);
+ spi_set_drvdata(spi, flash);
+
+ err = mchp23k256_set_mode(spi);
+ if (err)
+ return err;
+
+ data = dev_get_platdata(&spi->dev);
+
+ flash->caps = of_device_get_match_data(&spi->dev);
+ if (!flash->caps)
+ flash->caps = &mchp23k256_caps;
+
+ mtd_set_of_node(&flash->mtd, spi->dev.of_node);
+ flash->mtd.dev.parent = &spi->dev;
+ flash->mtd.type = MTD_RAM;
+ flash->mtd.flags = MTD_CAP_RAM;
+ flash->mtd.writesize = 1;
+ flash->mtd.size = flash->caps->size;
+ flash->mtd._read = mchp23k256_read;
+ flash->mtd._write = mchp23k256_write;
+
+ err = mtd_device_register(&flash->mtd, data ? data->parts : NULL,
+ data ? data->nr_parts : 0);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void mchp23k256_remove(struct spi_device *spi)
+{
+ struct mchp23k256_flash *flash = spi_get_drvdata(spi);
+
+ WARN_ON(mtd_device_unregister(&flash->mtd));
+}
+
+static const struct of_device_id mchp23k256_of_table[] = {
+ {
+ .compatible = "microchip,mchp23k256",
+ .data = &mchp23k256_caps,
+ },
+ {
+ .compatible = "microchip,mchp23lcv1024",
+ .data = &mchp23lcv1024_caps,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mchp23k256_of_table);
+
+static const struct spi_device_id mchp23k256_spi_ids[] = {
+ {
+ .name = "mchp23k256",
+ .driver_data = (kernel_ulong_t)&mchp23k256_caps,
+ },
+ {
+ .name = "mchp23lcv1024",
+ .driver_data = (kernel_ulong_t)&mchp23lcv1024_caps,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, mchp23k256_spi_ids);
+
+static struct spi_driver mchp23k256_driver = {
+ .driver = {
+ .name = "mchp23k256",
+ .of_match_table = mchp23k256_of_table,
+ },
+ .probe = mchp23k256_probe,
+ .remove = mchp23k256_remove,
+ .id_table = mchp23k256_spi_ids,
+};
+
+module_spi_driver(mchp23k256_driver);
+
+MODULE_DESCRIPTION("MTD SPI driver for MCHP23K256 RAM chips");
+MODULE_AUTHOR("Andrew Lunn <andre@lunn.ch>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:mchp23k256");
diff --git a/drivers/mtd/devices/mchp48l640.c b/drivers/mtd/devices/mchp48l640.c
new file mode 100644
index 0000000000..f576e6a890
--- /dev/null
+++ b/drivers/mtd/devices/mchp48l640.c
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Microchip 48L640 64 Kb SPI Serial EERAM
+ *
+ * Copyright Heiko Schocher <hs@denx.de>
+ *
+ * datasheet: http://ww1.microchip.com/downloads/en/DeviceDoc/20006055B.pdf
+ *
+ * we set continuous mode but reading/writing more bytes than
+ * pagesize seems to bring chip into state where readden values
+ * are wrong ... no idea why.
+ *
+ */
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/sizes.h>
+#include <linux/spi/flash.h>
+#include <linux/spi/spi.h>
+#include <linux/of.h>
+
+struct mchp48_caps {
+ unsigned int size;
+ unsigned int page_size;
+};
+
+struct mchp48l640_flash {
+ struct spi_device *spi;
+ struct mutex lock;
+ struct mtd_info mtd;
+ const struct mchp48_caps *caps;
+};
+
+#define MCHP48L640_CMD_WREN 0x06
+#define MCHP48L640_CMD_WRDI 0x04
+#define MCHP48L640_CMD_WRITE 0x02
+#define MCHP48L640_CMD_READ 0x03
+#define MCHP48L640_CMD_WRSR 0x01
+#define MCHP48L640_CMD_RDSR 0x05
+
+#define MCHP48L640_STATUS_RDY 0x01
+#define MCHP48L640_STATUS_WEL 0x02
+#define MCHP48L640_STATUS_BP0 0x04
+#define MCHP48L640_STATUS_BP1 0x08
+#define MCHP48L640_STATUS_SWM 0x10
+#define MCHP48L640_STATUS_PRO 0x20
+#define MCHP48L640_STATUS_ASE 0x40
+
+#define MCHP48L640_TIMEOUT 100
+
+#define MAX_CMD_SIZE 0x10
+
+#define to_mchp48l640_flash(x) container_of(x, struct mchp48l640_flash, mtd)
+
+static int mchp48l640_mkcmd(struct mchp48l640_flash *flash, u8 cmd, loff_t addr, char *buf)
+{
+ buf[0] = cmd;
+ buf[1] = addr >> 8;
+ buf[2] = addr;
+
+ return 3;
+}
+
+static int mchp48l640_read_status(struct mchp48l640_flash *flash, int *status)
+{
+ unsigned char cmd[2];
+ int ret;
+
+ cmd[0] = MCHP48L640_CMD_RDSR;
+ cmd[1] = 0x00;
+ mutex_lock(&flash->lock);
+ ret = spi_write_then_read(flash->spi, &cmd[0], 1, &cmd[1], 1);
+ mutex_unlock(&flash->lock);
+ if (!ret)
+ *status = cmd[1];
+ dev_dbg(&flash->spi->dev, "read status ret: %d status: %x", ret, *status);
+
+ return ret;
+}
+
+static int mchp48l640_waitforbit(struct mchp48l640_flash *flash, int bit, bool set)
+{
+ int ret, status;
+ unsigned long deadline;
+
+ deadline = jiffies + msecs_to_jiffies(MCHP48L640_TIMEOUT);
+ do {
+ ret = mchp48l640_read_status(flash, &status);
+ dev_dbg(&flash->spi->dev, "read status ret: %d bit: %x %sset status: %x",
+ ret, bit, (set ? "" : "not"), status);
+ if (ret)
+ return ret;
+
+ if (set) {
+ if ((status & bit) == bit)
+ return 0;
+ } else {
+ if ((status & bit) == 0)
+ return 0;
+ }
+
+ usleep_range(1000, 2000);
+ } while (!time_after_eq(jiffies, deadline));
+
+ dev_err(&flash->spi->dev, "Timeout waiting for bit %x %s set in status register.",
+ bit, (set ? "" : "not"));
+ return -ETIMEDOUT;
+}
+
+static int mchp48l640_write_prepare(struct mchp48l640_flash *flash, bool enable)
+{
+ unsigned char cmd[2];
+ int ret;
+
+ if (enable)
+ cmd[0] = MCHP48L640_CMD_WREN;
+ else
+ cmd[0] = MCHP48L640_CMD_WRDI;
+
+ mutex_lock(&flash->lock);
+ ret = spi_write(flash->spi, cmd, 1);
+ mutex_unlock(&flash->lock);
+
+ if (ret)
+ dev_err(&flash->spi->dev, "write %sable failed ret: %d",
+ (enable ? "en" : "dis"), ret);
+
+ dev_dbg(&flash->spi->dev, "write %sable success ret: %d",
+ (enable ? "en" : "dis"), ret);
+ if (enable)
+ return mchp48l640_waitforbit(flash, MCHP48L640_STATUS_WEL, true);
+
+ return ret;
+}
+
+static int mchp48l640_set_mode(struct mchp48l640_flash *flash)
+{
+ unsigned char cmd[2];
+ int ret;
+
+ ret = mchp48l640_write_prepare(flash, true);
+ if (ret)
+ return ret;
+
+ cmd[0] = MCHP48L640_CMD_WRSR;
+ cmd[1] = MCHP48L640_STATUS_PRO;
+
+ mutex_lock(&flash->lock);
+ ret = spi_write(flash->spi, cmd, 2);
+ mutex_unlock(&flash->lock);
+ if (ret)
+ dev_err(&flash->spi->dev, "Could not set continuous mode ret: %d", ret);
+
+ return mchp48l640_waitforbit(flash, MCHP48L640_STATUS_PRO, true);
+}
+
+static int mchp48l640_wait_rdy(struct mchp48l640_flash *flash)
+{
+ return mchp48l640_waitforbit(flash, MCHP48L640_STATUS_RDY, false);
+};
+
+static int mchp48l640_write_page(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const unsigned char *buf)
+{
+ struct mchp48l640_flash *flash = to_mchp48l640_flash(mtd);
+ unsigned char *cmd;
+ int ret;
+ int cmdlen;
+
+ cmd = kmalloc((3 + len), GFP_KERNEL | GFP_DMA);
+ if (!cmd)
+ return -ENOMEM;
+
+ ret = mchp48l640_wait_rdy(flash);
+ if (ret)
+ goto fail;
+
+ ret = mchp48l640_write_prepare(flash, true);
+ if (ret)
+ goto fail;
+
+ mutex_lock(&flash->lock);
+ cmdlen = mchp48l640_mkcmd(flash, MCHP48L640_CMD_WRITE, to, cmd);
+ memcpy(&cmd[cmdlen], buf, len);
+ ret = spi_write(flash->spi, cmd, cmdlen + len);
+ mutex_unlock(&flash->lock);
+ if (!ret)
+ *retlen += len;
+ else
+ goto fail;
+
+ ret = mchp48l640_waitforbit(flash, MCHP48L640_STATUS_WEL, false);
+ if (ret)
+ goto fail;
+
+ kfree(cmd);
+ return 0;
+fail:
+ kfree(cmd);
+ dev_err(&flash->spi->dev, "write fail with: %d", ret);
+ return ret;
+};
+
+static int mchp48l640_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const unsigned char *buf)
+{
+ struct mchp48l640_flash *flash = to_mchp48l640_flash(mtd);
+ int ret;
+ size_t wlen = 0;
+ loff_t woff = to;
+ size_t ws;
+ size_t page_sz = flash->caps->page_size;
+
+ /*
+ * we set PRO bit (page rollover), but writing length > page size
+ * does result in total chaos, so write in 32 byte chunks.
+ */
+ while (wlen < len) {
+ ws = min((len - wlen), page_sz);
+ ret = mchp48l640_write_page(mtd, woff, ws, retlen, &buf[wlen]);
+ if (ret)
+ return ret;
+ wlen += ws;
+ woff += ws;
+ }
+
+ return 0;
+}
+
+static int mchp48l640_read_page(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, unsigned char *buf)
+{
+ struct mchp48l640_flash *flash = to_mchp48l640_flash(mtd);
+ unsigned char *cmd;
+ int ret;
+ int cmdlen;
+
+ cmd = kmalloc((3 + len), GFP_KERNEL | GFP_DMA);
+ if (!cmd)
+ return -ENOMEM;
+
+ ret = mchp48l640_wait_rdy(flash);
+ if (ret)
+ goto fail;
+
+ mutex_lock(&flash->lock);
+ cmdlen = mchp48l640_mkcmd(flash, MCHP48L640_CMD_READ, from, cmd);
+ ret = spi_write_then_read(flash->spi, cmd, cmdlen, buf, len);
+ mutex_unlock(&flash->lock);
+ if (!ret)
+ *retlen += len;
+
+ kfree(cmd);
+ return ret;
+
+fail:
+ kfree(cmd);
+ dev_err(&flash->spi->dev, "read fail with: %d", ret);
+ return ret;
+}
+
+static int mchp48l640_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, unsigned char *buf)
+{
+ struct mchp48l640_flash *flash = to_mchp48l640_flash(mtd);
+ int ret;
+ size_t wlen = 0;
+ loff_t woff = from;
+ size_t ws;
+ size_t page_sz = flash->caps->page_size;
+
+ /*
+ * we set PRO bit (page rollover), but if read length > page size
+ * does result in total chaos in result ...
+ */
+ while (wlen < len) {
+ ws = min((len - wlen), page_sz);
+ ret = mchp48l640_read_page(mtd, woff, ws, retlen, &buf[wlen]);
+ if (ret)
+ return ret;
+ wlen += ws;
+ woff += ws;
+ }
+
+ return 0;
+};
+
+static const struct mchp48_caps mchp48l640_caps = {
+ .size = SZ_8K,
+ .page_size = 32,
+};
+
+static int mchp48l640_probe(struct spi_device *spi)
+{
+ struct mchp48l640_flash *flash;
+ struct flash_platform_data *data;
+ int err;
+ int status;
+
+ flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL);
+ if (!flash)
+ return -ENOMEM;
+
+ flash->spi = spi;
+ mutex_init(&flash->lock);
+ spi_set_drvdata(spi, flash);
+
+ err = mchp48l640_read_status(flash, &status);
+ if (err)
+ return err;
+
+ err = mchp48l640_set_mode(flash);
+ if (err)
+ return err;
+
+ data = dev_get_platdata(&spi->dev);
+
+ flash->caps = of_device_get_match_data(&spi->dev);
+ if (!flash->caps)
+ flash->caps = &mchp48l640_caps;
+
+ mtd_set_of_node(&flash->mtd, spi->dev.of_node);
+ flash->mtd.dev.parent = &spi->dev;
+ flash->mtd.type = MTD_RAM;
+ flash->mtd.flags = MTD_CAP_RAM;
+ flash->mtd.writesize = flash->caps->page_size;
+ flash->mtd.size = flash->caps->size;
+ flash->mtd._read = mchp48l640_read;
+ flash->mtd._write = mchp48l640_write;
+
+ err = mtd_device_register(&flash->mtd, data ? data->parts : NULL,
+ data ? data->nr_parts : 0);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void mchp48l640_remove(struct spi_device *spi)
+{
+ struct mchp48l640_flash *flash = spi_get_drvdata(spi);
+
+ WARN_ON(mtd_device_unregister(&flash->mtd));
+}
+
+static const struct of_device_id mchp48l640_of_table[] = {
+ {
+ .compatible = "microchip,48l640",
+ .data = &mchp48l640_caps,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mchp48l640_of_table);
+
+static const struct spi_device_id mchp48l640_spi_ids[] = {
+ {
+ .name = "48l640",
+ .driver_data = (kernel_ulong_t)&mchp48l640_caps,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, mchp48l640_spi_ids);
+
+static struct spi_driver mchp48l640_driver = {
+ .driver = {
+ .name = "mchp48l640",
+ .of_match_table = mchp48l640_of_table,
+ },
+ .probe = mchp48l640_probe,
+ .remove = mchp48l640_remove,
+ .id_table = mchp48l640_spi_ids,
+};
+
+module_spi_driver(mchp48l640_driver);
+
+MODULE_DESCRIPTION("MTD SPI driver for Microchip 48l640 EERAM chips");
+MODULE_AUTHOR("Heiko Schocher <hs@denx.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:mchp48l640");
diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c
new file mode 100644
index 0000000000..08f76ff839
--- /dev/null
+++ b/drivers/mtd/devices/ms02-nv.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2001 Maciej W. Rozycki
+ */
+
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/addrspace.h>
+#include <asm/bootinfo.h>
+#include <asm/dec/ioasic_addrs.h>
+#include <asm/dec/kn02.h>
+#include <asm/dec/kn03.h>
+#include <asm/io.h>
+#include <asm/paccess.h>
+
+#include "ms02-nv.h"
+
+
+static char version[] __initdata =
+ "ms02-nv.c: v.1.0.0 13 Aug 2001 Maciej W. Rozycki.\n";
+
+MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
+MODULE_DESCRIPTION("DEC MS02-NV NVRAM module driver");
+MODULE_LICENSE("GPL");
+
+
+/*
+ * Addresses we probe for an MS02-NV at. Modules may be located
+ * at any 8MiB boundary within a 0MiB up to 112MiB range or at any 32MiB
+ * boundary within a 0MiB up to 448MiB range. We don't support a module
+ * at 0MiB, though.
+ */
+static ulong ms02nv_addrs[] __initdata = {
+ 0x07000000, 0x06800000, 0x06000000, 0x05800000, 0x05000000,
+ 0x04800000, 0x04000000, 0x03800000, 0x03000000, 0x02800000,
+ 0x02000000, 0x01800000, 0x01000000, 0x00800000
+};
+
+static const char ms02nv_name[] = "DEC MS02-NV NVRAM";
+static const char ms02nv_res_diag_ram[] = "Diagnostic RAM";
+static const char ms02nv_res_user_ram[] = "General-purpose RAM";
+static const char ms02nv_res_csr[] = "Control and status register";
+
+static struct mtd_info *root_ms02nv_mtd;
+
+
+static int ms02nv_read(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf)
+{
+ struct ms02nv_private *mp = mtd->priv;
+
+ memcpy(buf, mp->uaddr + from, len);
+ *retlen = len;
+ return 0;
+}
+
+static int ms02nv_write(struct mtd_info *mtd, loff_t to,
+ size_t len, size_t *retlen, const u_char *buf)
+{
+ struct ms02nv_private *mp = mtd->priv;
+
+ memcpy(mp->uaddr + to, buf, len);
+ *retlen = len;
+ return 0;
+}
+
+
+static inline uint ms02nv_probe_one(ulong addr)
+{
+ ms02nv_uint *ms02nv_diagp;
+ ms02nv_uint *ms02nv_magicp;
+ uint ms02nv_diag;
+ uint ms02nv_magic;
+ size_t size;
+
+ int err;
+
+ /*
+ * The firmware writes MS02NV_ID at MS02NV_MAGIC and also
+ * a diagnostic status at MS02NV_DIAG.
+ */
+ ms02nv_diagp = (ms02nv_uint *)(CKSEG1ADDR(addr + MS02NV_DIAG));
+ ms02nv_magicp = (ms02nv_uint *)(CKSEG1ADDR(addr + MS02NV_MAGIC));
+ err = get_dbe(ms02nv_magic, ms02nv_magicp);
+ if (err)
+ return 0;
+ if (ms02nv_magic != MS02NV_ID)
+ return 0;
+
+ ms02nv_diag = *ms02nv_diagp;
+ size = (ms02nv_diag & MS02NV_DIAG_SIZE_MASK) << MS02NV_DIAG_SIZE_SHIFT;
+ if (size > MS02NV_CSR)
+ size = MS02NV_CSR;
+
+ return size;
+}
+
+static int __init ms02nv_init_one(ulong addr)
+{
+ struct mtd_info *mtd;
+ struct ms02nv_private *mp;
+ struct resource *mod_res;
+ struct resource *diag_res;
+ struct resource *user_res;
+ struct resource *csr_res;
+ ulong fixaddr;
+ size_t size, fixsize;
+
+ static int version_printed;
+
+ int ret = -ENODEV;
+
+ /* The module decodes 8MiB of address space. */
+ mod_res = kzalloc(sizeof(*mod_res), GFP_KERNEL);
+ if (!mod_res)
+ return -ENOMEM;
+
+ mod_res->name = ms02nv_name;
+ mod_res->start = addr;
+ mod_res->end = addr + MS02NV_SLOT_SIZE - 1;
+ mod_res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (request_resource(&iomem_resource, mod_res) < 0)
+ goto err_out_mod_res;
+
+ size = ms02nv_probe_one(addr);
+ if (!size)
+ goto err_out_mod_res_rel;
+
+ if (!version_printed) {
+ printk(KERN_INFO "%s", version);
+ version_printed = 1;
+ }
+
+ ret = -ENOMEM;
+ mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
+ if (!mtd)
+ goto err_out_mod_res_rel;
+ mp = kzalloc(sizeof(*mp), GFP_KERNEL);
+ if (!mp)
+ goto err_out_mtd;
+
+ mtd->priv = mp;
+ mp->resource.module = mod_res;
+
+ /* Firmware's diagnostic NVRAM area. */
+ diag_res = kzalloc(sizeof(*diag_res), GFP_KERNEL);
+ if (!diag_res)
+ goto err_out_mp;
+
+ diag_res->name = ms02nv_res_diag_ram;
+ diag_res->start = addr;
+ diag_res->end = addr + MS02NV_RAM - 1;
+ diag_res->flags = IORESOURCE_BUSY;
+ request_resource(mod_res, diag_res);
+
+ mp->resource.diag_ram = diag_res;
+
+ /* User-available general-purpose NVRAM area. */
+ user_res = kzalloc(sizeof(*user_res), GFP_KERNEL);
+ if (!user_res)
+ goto err_out_diag_res;
+
+ user_res->name = ms02nv_res_user_ram;
+ user_res->start = addr + MS02NV_RAM;
+ user_res->end = addr + size - 1;
+ user_res->flags = IORESOURCE_BUSY;
+ request_resource(mod_res, user_res);
+
+ mp->resource.user_ram = user_res;
+
+ /* Control and status register. */
+ csr_res = kzalloc(sizeof(*csr_res), GFP_KERNEL);
+ if (!csr_res)
+ goto err_out_user_res;
+
+ csr_res->name = ms02nv_res_csr;
+ csr_res->start = addr + MS02NV_CSR;
+ csr_res->end = addr + MS02NV_CSR + 3;
+ csr_res->flags = IORESOURCE_BUSY;
+ request_resource(mod_res, csr_res);
+
+ mp->resource.csr = csr_res;
+
+ mp->addr = phys_to_virt(addr);
+ mp->size = size;
+
+ /*
+ * Hide the firmware's diagnostic area. It may get destroyed
+ * upon a reboot. Take paging into account for mapping support.
+ */
+ fixaddr = (addr + MS02NV_RAM + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
+ fixsize = (size - (fixaddr - addr)) & ~(PAGE_SIZE - 1);
+ mp->uaddr = phys_to_virt(fixaddr);
+
+ mtd->type = MTD_RAM;
+ mtd->flags = MTD_CAP_RAM;
+ mtd->size = fixsize;
+ mtd->name = ms02nv_name;
+ mtd->owner = THIS_MODULE;
+ mtd->_read = ms02nv_read;
+ mtd->_write = ms02nv_write;
+ mtd->writesize = 1;
+
+ ret = -EIO;
+ if (mtd_device_register(mtd, NULL, 0)) {
+ printk(KERN_ERR
+ "ms02-nv: Unable to register MTD device, aborting!\n");
+ goto err_out_csr_res;
+ }
+
+ printk(KERN_INFO "mtd%d: %s at 0x%08lx, size %zuMiB.\n",
+ mtd->index, ms02nv_name, addr, size >> 20);
+
+ mp->next = root_ms02nv_mtd;
+ root_ms02nv_mtd = mtd;
+
+ return 0;
+
+
+err_out_csr_res:
+ release_resource(csr_res);
+ kfree(csr_res);
+err_out_user_res:
+ release_resource(user_res);
+ kfree(user_res);
+err_out_diag_res:
+ release_resource(diag_res);
+ kfree(diag_res);
+err_out_mp:
+ kfree(mp);
+err_out_mtd:
+ kfree(mtd);
+err_out_mod_res_rel:
+ release_resource(mod_res);
+err_out_mod_res:
+ kfree(mod_res);
+ return ret;
+}
+
+static void __exit ms02nv_remove_one(void)
+{
+ struct mtd_info *mtd = root_ms02nv_mtd;
+ struct ms02nv_private *mp = mtd->priv;
+
+ root_ms02nv_mtd = mp->next;
+
+ mtd_device_unregister(mtd);
+
+ release_resource(mp->resource.csr);
+ kfree(mp->resource.csr);
+ release_resource(mp->resource.user_ram);
+ kfree(mp->resource.user_ram);
+ release_resource(mp->resource.diag_ram);
+ kfree(mp->resource.diag_ram);
+ release_resource(mp->resource.module);
+ kfree(mp->resource.module);
+ kfree(mp);
+ kfree(mtd);
+}
+
+
+static int __init ms02nv_init(void)
+{
+ volatile u32 *csr;
+ uint stride = 0;
+ int count = 0;
+ int i;
+
+ switch (mips_machtype) {
+ case MACH_DS5000_200:
+ csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CSR);
+ if (*csr & KN02_CSR_BNK32M)
+ stride = 2;
+ break;
+ case MACH_DS5000_2X0:
+ case MACH_DS5900:
+ csr = (volatile u32 *)CKSEG1ADDR(KN03_SLOT_BASE + IOASIC_MCR);
+ if (*csr & KN03_MCR_BNK32M)
+ stride = 2;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ms02nv_addrs); i++)
+ if (!ms02nv_init_one(ms02nv_addrs[i] << stride))
+ count++;
+
+ return (count > 0) ? 0 : -ENODEV;
+}
+
+static void __exit ms02nv_cleanup(void)
+{
+ while (root_ms02nv_mtd)
+ ms02nv_remove_one();
+}
+
+
+module_init(ms02nv_init);
+module_exit(ms02nv_cleanup);
diff --git a/drivers/mtd/devices/ms02-nv.h b/drivers/mtd/devices/ms02-nv.h
new file mode 100644
index 0000000000..737e4735db
--- /dev/null
+++ b/drivers/mtd/devices/ms02-nv.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2001, 2003 Maciej W. Rozycki
+ *
+ * DEC MS02-NV (54-20948-01) battery backed-up NVRAM module for
+ * DECstation/DECsystem 5000/2x0 and DECsystem 5900 and 5900/260
+ * systems.
+ */
+
+#include <linux/ioport.h>
+#include <linux/mtd/mtd.h>
+
+/*
+ * Addresses are decoded as follows:
+ *
+ * 0x000000 - 0x3fffff SRAM
+ * 0x400000 - 0x7fffff CSR
+ *
+ * Within the SRAM area the following ranges are forced by the system
+ * firmware:
+ *
+ * 0x000000 - 0x0003ff diagnostic area, destroyed upon a reboot
+ * 0x000400 - ENDofRAM storage area, available to operating systems
+ *
+ * but we can't really use the available area right from 0x000400 as
+ * the first word is used by the firmware as a status flag passed
+ * from an operating system. If anything but the valid data magic
+ * ID value is found, the firmware considers the SRAM clean, i.e.
+ * containing no valid data, and disables the battery resulting in
+ * data being erased as soon as power is switched off. So the choice
+ * for the start address of the user-available is 0x001000 which is
+ * nicely page aligned. The area between 0x000404 and 0x000fff may
+ * be used by the driver for own needs.
+ *
+ * The diagnostic area defines two status words to be read by an
+ * operating system, a magic ID to distinguish a MS02-NV board from
+ * anything else and a status information providing results of tests
+ * as well as the size of SRAM available, which can be 1MiB or 2MiB
+ * (that's what the firmware handles; no idea if 2MiB modules ever
+ * existed).
+ *
+ * The firmware only handles the MS02-NV board if installed in the
+ * last (15th) slot, so for any other location the status information
+ * stored in the SRAM cannot be relied upon. But from the hardware
+ * point of view there is no problem using up to 14 such boards in a
+ * system -- only the 1st slot needs to be filled with a DRAM module.
+ * The MS02-NV board is ECC-protected, like other MS02 memory boards.
+ *
+ * The state of the battery as provided by the CSR is reflected on
+ * the two onboard LEDs. When facing the battery side of the board,
+ * with the LEDs at the top left and the battery at the bottom right
+ * (i.e. looking from the back side of the system box), their meaning
+ * is as follows (the system has to be powered on):
+ *
+ * left LED battery disable status: lit = enabled
+ * right LED battery condition status: lit = OK
+ */
+
+/* MS02-NV iomem register offsets. */
+#define MS02NV_CSR 0x400000 /* control & status register */
+
+/* MS02-NV CSR status bits. */
+#define MS02NV_CSR_BATT_OK 0x01 /* battery OK */
+#define MS02NV_CSR_BATT_OFF 0x02 /* battery disabled */
+
+
+/* MS02-NV memory offsets. */
+#define MS02NV_DIAG 0x0003f8 /* diagnostic status */
+#define MS02NV_MAGIC 0x0003fc /* MS02-NV magic ID */
+#define MS02NV_VALID 0x000400 /* valid data magic ID */
+#define MS02NV_RAM 0x001000 /* user-exposed RAM start */
+
+/* MS02-NV diagnostic status bits. */
+#define MS02NV_DIAG_TEST 0x01 /* SRAM test done (?) */
+#define MS02NV_DIAG_RO 0x02 /* SRAM r/o test done */
+#define MS02NV_DIAG_RW 0x04 /* SRAM r/w test done */
+#define MS02NV_DIAG_FAIL 0x08 /* SRAM test failed */
+#define MS02NV_DIAG_SIZE_MASK 0xf0 /* SRAM size mask */
+#define MS02NV_DIAG_SIZE_SHIFT 0x10 /* SRAM size shift (left) */
+
+/* MS02-NV general constants. */
+#define MS02NV_ID 0x03021966 /* MS02-NV magic ID value */
+#define MS02NV_VALID_ID 0xbd100248 /* valid data magic ID value */
+#define MS02NV_SLOT_SIZE 0x800000 /* size of the address space
+ decoded by the module */
+
+
+typedef volatile u32 ms02nv_uint;
+
+struct ms02nv_private {
+ struct mtd_info *next;
+ struct {
+ struct resource *module;
+ struct resource *diag_ram;
+ struct resource *user_ram;
+ struct resource *csr;
+ } resource;
+ u_char *addr;
+ size_t size;
+ u_char *uaddr;
+};
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
new file mode 100644
index 0000000000..0c1b933036
--- /dev/null
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -0,0 +1,946 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Atmel AT45xxx DataFlash MTD driver for lightweight SPI framework
+ *
+ * Largely derived from at91_dataflash.c:
+ * Copyright (C) 2003-2005 SAN People (Pty) Ltd
+*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/math64.h>
+#include <linux/of.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+/*
+ * DataFlash is a kind of SPI flash. Most AT45 chips have two buffers in
+ * each chip, which may be used for double buffered I/O; but this driver
+ * doesn't (yet) use these for any kind of i/o overlap or prefetching.
+ *
+ * Sometimes DataFlash is packaged in MMC-format cards, although the
+ * MMC stack can't (yet?) distinguish between MMC and DataFlash
+ * protocols during enumeration.
+ */
+
+/* reads can bypass the buffers */
+#define OP_READ_CONTINUOUS 0xE8
+#define OP_READ_PAGE 0xD2
+
+/* group B requests can run even while status reports "busy" */
+#define OP_READ_STATUS 0xD7 /* group B */
+
+/* move data between host and buffer */
+#define OP_READ_BUFFER1 0xD4 /* group B */
+#define OP_READ_BUFFER2 0xD6 /* group B */
+#define OP_WRITE_BUFFER1 0x84 /* group B */
+#define OP_WRITE_BUFFER2 0x87 /* group B */
+
+/* erasing flash */
+#define OP_ERASE_PAGE 0x81
+#define OP_ERASE_BLOCK 0x50
+
+/* move data between buffer and flash */
+#define OP_TRANSFER_BUF1 0x53
+#define OP_TRANSFER_BUF2 0x55
+#define OP_MREAD_BUFFER1 0xD4
+#define OP_MREAD_BUFFER2 0xD6
+#define OP_MWERASE_BUFFER1 0x83
+#define OP_MWERASE_BUFFER2 0x86
+#define OP_MWRITE_BUFFER1 0x88 /* sector must be pre-erased */
+#define OP_MWRITE_BUFFER2 0x89 /* sector must be pre-erased */
+
+/* write to buffer, then write-erase to flash */
+#define OP_PROGRAM_VIA_BUF1 0x82
+#define OP_PROGRAM_VIA_BUF2 0x85
+
+/* compare buffer to flash */
+#define OP_COMPARE_BUF1 0x60
+#define OP_COMPARE_BUF2 0x61
+
+/* read flash to buffer, then write-erase to flash */
+#define OP_REWRITE_VIA_BUF1 0x58
+#define OP_REWRITE_VIA_BUF2 0x59
+
+/* newer chips report JEDEC manufacturer and device IDs; chip
+ * serial number and OTP bits; and per-sector writeprotect.
+ */
+#define OP_READ_ID 0x9F
+#define OP_READ_SECURITY 0x77
+#define OP_WRITE_SECURITY_REVC 0x9A
+#define OP_WRITE_SECURITY 0x9B /* revision D */
+
+#define CFI_MFR_ATMEL 0x1F
+
+#define DATAFLASH_SHIFT_EXTID 24
+#define DATAFLASH_SHIFT_ID 40
+
+struct dataflash {
+ u8 command[4];
+ char name[24];
+
+ unsigned short page_offset; /* offset in flash address */
+ unsigned int page_size; /* of bytes per page */
+
+ struct mutex lock;
+ struct spi_device *spi;
+
+ struct mtd_info mtd;
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id dataflash_dt_ids[] = {
+ { .compatible = "atmel,at45", },
+ { .compatible = "atmel,dataflash", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dataflash_dt_ids);
+#endif
+
+static const struct spi_device_id dataflash_spi_ids[] = {
+ { .name = "at45", },
+ { .name = "dataflash", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, dataflash_spi_ids);
+
+/* ......................................................................... */
+
+/*
+ * Return the status of the DataFlash device.
+ */
+static inline int dataflash_status(struct spi_device *spi)
+{
+ /* NOTE: at45db321c over 25 MHz wants to write
+ * a dummy byte after the opcode...
+ */
+ return spi_w8r8(spi, OP_READ_STATUS);
+}
+
+/*
+ * Poll the DataFlash device until it is READY.
+ * This usually takes 5-20 msec or so; more for sector erase.
+ */
+static int dataflash_waitready(struct spi_device *spi)
+{
+ int status;
+
+ for (;;) {
+ status = dataflash_status(spi);
+ if (status < 0) {
+ dev_dbg(&spi->dev, "status %d?\n", status);
+ status = 0;
+ }
+
+ if (status & (1 << 7)) /* RDY/nBSY */
+ return status;
+
+ usleep_range(3000, 4000);
+ }
+}
+
+/* ......................................................................... */
+
+/*
+ * Erase pages of flash.
+ */
+static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct dataflash *priv = mtd->priv;
+ struct spi_device *spi = priv->spi;
+ struct spi_transfer x = { };
+ struct spi_message msg;
+ unsigned blocksize = priv->page_size << 3;
+ u8 *command;
+ u32 rem;
+
+ dev_dbg(&spi->dev, "erase addr=0x%llx len 0x%llx\n",
+ (long long)instr->addr, (long long)instr->len);
+
+ div_u64_rem(instr->len, priv->page_size, &rem);
+ if (rem)
+ return -EINVAL;
+ div_u64_rem(instr->addr, priv->page_size, &rem);
+ if (rem)
+ return -EINVAL;
+
+ spi_message_init(&msg);
+
+ x.tx_buf = command = priv->command;
+ x.len = 4;
+ spi_message_add_tail(&x, &msg);
+
+ mutex_lock(&priv->lock);
+ while (instr->len > 0) {
+ unsigned int pageaddr;
+ int status;
+ int do_block;
+
+ /* Calculate flash page address; use block erase (for speed) if
+ * we're at a block boundary and need to erase the whole block.
+ */
+ pageaddr = div_u64(instr->addr, priv->page_size);
+ do_block = (pageaddr & 0x7) == 0 && instr->len >= blocksize;
+ pageaddr = pageaddr << priv->page_offset;
+
+ command[0] = do_block ? OP_ERASE_BLOCK : OP_ERASE_PAGE;
+ command[1] = (u8)(pageaddr >> 16);
+ command[2] = (u8)(pageaddr >> 8);
+ command[3] = 0;
+
+ dev_dbg(&spi->dev, "ERASE %s: (%x) %x %x %x [%i]\n",
+ do_block ? "block" : "page",
+ command[0], command[1], command[2], command[3],
+ pageaddr);
+
+ status = spi_sync(spi, &msg);
+ (void) dataflash_waitready(spi);
+
+ if (status < 0) {
+ dev_err(&spi->dev, "erase %x, err %d\n",
+ pageaddr, status);
+ /* REVISIT: can retry instr->retries times; or
+ * giveup and instr->fail_addr = instr->addr;
+ */
+ continue;
+ }
+
+ if (do_block) {
+ instr->addr += blocksize;
+ instr->len -= blocksize;
+ } else {
+ instr->addr += priv->page_size;
+ instr->len -= priv->page_size;
+ }
+ }
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+/*
+ * Read from the DataFlash device.
+ * from : Start offset in flash device
+ * len : Amount to read
+ * retlen : About of data actually read
+ * buf : Buffer containing the data
+ */
+static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct dataflash *priv = mtd->priv;
+ struct spi_transfer x[2] = { };
+ struct spi_message msg;
+ unsigned int addr;
+ u8 *command;
+ int status;
+
+ dev_dbg(&priv->spi->dev, "read 0x%x..0x%x\n",
+ (unsigned int)from, (unsigned int)(from + len));
+
+ /* Calculate flash page/byte address */
+ addr = (((unsigned)from / priv->page_size) << priv->page_offset)
+ + ((unsigned)from % priv->page_size);
+
+ command = priv->command;
+
+ dev_dbg(&priv->spi->dev, "READ: (%x) %x %x %x\n",
+ command[0], command[1], command[2], command[3]);
+
+ spi_message_init(&msg);
+
+ x[0].tx_buf = command;
+ x[0].len = 8;
+ spi_message_add_tail(&x[0], &msg);
+
+ x[1].rx_buf = buf;
+ x[1].len = len;
+ spi_message_add_tail(&x[1], &msg);
+
+ mutex_lock(&priv->lock);
+
+ /* Continuous read, max clock = f(car) which may be less than
+ * the peak rate available. Some chips support commands with
+ * fewer "don't care" bytes. Both buffers stay unchanged.
+ */
+ command[0] = OP_READ_CONTINUOUS;
+ command[1] = (u8)(addr >> 16);
+ command[2] = (u8)(addr >> 8);
+ command[3] = (u8)(addr >> 0);
+ /* plus 4 "don't care" bytes */
+
+ status = spi_sync(priv->spi, &msg);
+ mutex_unlock(&priv->lock);
+
+ if (status >= 0) {
+ *retlen = msg.actual_length - 8;
+ status = 0;
+ } else
+ dev_dbg(&priv->spi->dev, "read %x..%x --> %d\n",
+ (unsigned)from, (unsigned)(from + len),
+ status);
+ return status;
+}
+
+/*
+ * Write to the DataFlash device.
+ * to : Start offset in flash device
+ * len : Amount to write
+ * retlen : Amount of data actually written
+ * buf : Buffer containing the data
+ */
+static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t * retlen, const u_char * buf)
+{
+ struct dataflash *priv = mtd->priv;
+ struct spi_device *spi = priv->spi;
+ struct spi_transfer x[2] = { };
+ struct spi_message msg;
+ unsigned int pageaddr, addr, offset, writelen;
+ size_t remaining = len;
+ u_char *writebuf = (u_char *) buf;
+ int status = -EINVAL;
+ u8 *command;
+
+ dev_dbg(&spi->dev, "write 0x%x..0x%x\n",
+ (unsigned int)to, (unsigned int)(to + len));
+
+ spi_message_init(&msg);
+
+ x[0].tx_buf = command = priv->command;
+ x[0].len = 4;
+ spi_message_add_tail(&x[0], &msg);
+
+ pageaddr = ((unsigned)to / priv->page_size);
+ offset = ((unsigned)to % priv->page_size);
+ if (offset + len > priv->page_size)
+ writelen = priv->page_size - offset;
+ else
+ writelen = len;
+
+ mutex_lock(&priv->lock);
+ while (remaining > 0) {
+ dev_dbg(&spi->dev, "write @ %i:%i len=%i\n",
+ pageaddr, offset, writelen);
+
+ /* REVISIT:
+ * (a) each page in a sector must be rewritten at least
+ * once every 10K sibling erase/program operations.
+ * (b) for pages that are already erased, we could
+ * use WRITE+MWRITE not PROGRAM for ~30% speedup.
+ * (c) WRITE to buffer could be done while waiting for
+ * a previous MWRITE/MWERASE to complete ...
+ * (d) error handling here seems to be mostly missing.
+ *
+ * Two persistent bits per page, plus a per-sector counter,
+ * could support (a) and (b) ... we might consider using
+ * the second half of sector zero, which is just one block,
+ * to track that state. (On AT91, that sector should also
+ * support boot-from-DataFlash.)
+ */
+
+ addr = pageaddr << priv->page_offset;
+
+ /* (1) Maybe transfer partial page to Buffer1 */
+ if (writelen != priv->page_size) {
+ command[0] = OP_TRANSFER_BUF1;
+ command[1] = (addr & 0x00FF0000) >> 16;
+ command[2] = (addr & 0x0000FF00) >> 8;
+ command[3] = 0;
+
+ dev_dbg(&spi->dev, "TRANSFER: (%x) %x %x %x\n",
+ command[0], command[1], command[2], command[3]);
+
+ status = spi_sync(spi, &msg);
+ if (status < 0)
+ dev_dbg(&spi->dev, "xfer %u -> %d\n",
+ addr, status);
+
+ (void) dataflash_waitready(priv->spi);
+ }
+
+ /* (2) Program full page via Buffer1 */
+ addr += offset;
+ command[0] = OP_PROGRAM_VIA_BUF1;
+ command[1] = (addr & 0x00FF0000) >> 16;
+ command[2] = (addr & 0x0000FF00) >> 8;
+ command[3] = (addr & 0x000000FF);
+
+ dev_dbg(&spi->dev, "PROGRAM: (%x) %x %x %x\n",
+ command[0], command[1], command[2], command[3]);
+
+ x[1].tx_buf = writebuf;
+ x[1].len = writelen;
+ spi_message_add_tail(x + 1, &msg);
+ status = spi_sync(spi, &msg);
+ spi_transfer_del(x + 1);
+ if (status < 0)
+ dev_dbg(&spi->dev, "pgm %u/%u -> %d\n",
+ addr, writelen, status);
+
+ (void) dataflash_waitready(priv->spi);
+
+
+#ifdef CONFIG_MTD_DATAFLASH_WRITE_VERIFY
+
+ /* (3) Compare to Buffer1 */
+ addr = pageaddr << priv->page_offset;
+ command[0] = OP_COMPARE_BUF1;
+ command[1] = (addr & 0x00FF0000) >> 16;
+ command[2] = (addr & 0x0000FF00) >> 8;
+ command[3] = 0;
+
+ dev_dbg(&spi->dev, "COMPARE: (%x) %x %x %x\n",
+ command[0], command[1], command[2], command[3]);
+
+ status = spi_sync(spi, &msg);
+ if (status < 0)
+ dev_dbg(&spi->dev, "compare %u -> %d\n",
+ addr, status);
+
+ status = dataflash_waitready(priv->spi);
+
+ /* Check result of the compare operation */
+ if (status & (1 << 6)) {
+ dev_err(&spi->dev, "compare page %u, err %d\n",
+ pageaddr, status);
+ remaining = 0;
+ status = -EIO;
+ break;
+ } else
+ status = 0;
+
+#endif /* CONFIG_MTD_DATAFLASH_WRITE_VERIFY */
+
+ remaining = remaining - writelen;
+ pageaddr++;
+ offset = 0;
+ writebuf += writelen;
+ *retlen += writelen;
+
+ if (remaining > priv->page_size)
+ writelen = priv->page_size;
+ else
+ writelen = remaining;
+ }
+ mutex_unlock(&priv->lock);
+
+ return status;
+}
+
+/* ......................................................................... */
+
+#ifdef CONFIG_MTD_DATAFLASH_OTP
+
+static int dataflash_get_otp_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *info)
+{
+ /* Report both blocks as identical: bytes 0..64, locked.
+ * Unless the user block changed from all-ones, we can't
+ * tell whether it's still writable; so we assume it isn't.
+ */
+ info->start = 0;
+ info->length = 64;
+ info->locked = 1;
+ *retlen = sizeof(*info);
+ return 0;
+}
+
+static ssize_t otp_read(struct spi_device *spi, unsigned base,
+ u8 *buf, loff_t off, size_t len)
+{
+ struct spi_message m;
+ size_t l;
+ u8 *scratch;
+ struct spi_transfer t;
+ int status;
+
+ if (off > 64)
+ return -EINVAL;
+
+ if ((off + len) > 64)
+ len = 64 - off;
+
+ spi_message_init(&m);
+
+ l = 4 + base + off + len;
+ scratch = kzalloc(l, GFP_KERNEL);
+ if (!scratch)
+ return -ENOMEM;
+
+ /* OUT: OP_READ_SECURITY, 3 don't-care bytes, zeroes
+ * IN: ignore 4 bytes, data bytes 0..N (max 127)
+ */
+ scratch[0] = OP_READ_SECURITY;
+
+ memset(&t, 0, sizeof t);
+ t.tx_buf = scratch;
+ t.rx_buf = scratch;
+ t.len = l;
+ spi_message_add_tail(&t, &m);
+
+ dataflash_waitready(spi);
+
+ status = spi_sync(spi, &m);
+ if (status >= 0) {
+ memcpy(buf, scratch + 4 + base + off, len);
+ status = len;
+ }
+
+ kfree(scratch);
+ return status;
+}
+
+static int dataflash_read_fact_otp(struct mtd_info *mtd,
+ loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+ struct dataflash *priv = mtd->priv;
+ int status;
+
+ /* 64 bytes, from 0..63 ... start at 64 on-chip */
+ mutex_lock(&priv->lock);
+ status = otp_read(priv->spi, 64, buf, from, len);
+ mutex_unlock(&priv->lock);
+
+ if (status < 0)
+ return status;
+ *retlen = status;
+ return 0;
+}
+
+static int dataflash_read_user_otp(struct mtd_info *mtd,
+ loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+ struct dataflash *priv = mtd->priv;
+ int status;
+
+ /* 64 bytes, from 0..63 ... start at 0 on-chip */
+ mutex_lock(&priv->lock);
+ status = otp_read(priv->spi, 0, buf, from, len);
+ mutex_unlock(&priv->lock);
+
+ if (status < 0)
+ return status;
+ *retlen = status;
+ return 0;
+}
+
+static int dataflash_write_user_otp(struct mtd_info *mtd,
+ loff_t from, size_t len, size_t *retlen, const u_char *buf)
+{
+ struct spi_message m;
+ const size_t l = 4 + 64;
+ u8 *scratch;
+ struct spi_transfer t;
+ struct dataflash *priv = mtd->priv;
+ int status;
+
+ if (from >= 64) {
+ /*
+ * Attempting to write beyond the end of OTP memory,
+ * no data can be written.
+ */
+ *retlen = 0;
+ return 0;
+ }
+
+ /* Truncate the write to fit into OTP memory. */
+ if ((from + len) > 64)
+ len = 64 - from;
+
+ /* OUT: OP_WRITE_SECURITY, 3 zeroes, 64 data-or-zero bytes
+ * IN: ignore all
+ */
+ scratch = kzalloc(l, GFP_KERNEL);
+ if (!scratch)
+ return -ENOMEM;
+ scratch[0] = OP_WRITE_SECURITY;
+ memcpy(scratch + 4 + from, buf, len);
+
+ spi_message_init(&m);
+
+ memset(&t, 0, sizeof t);
+ t.tx_buf = scratch;
+ t.len = l;
+ spi_message_add_tail(&t, &m);
+
+ /* Write the OTP bits, if they've not yet been written.
+ * This modifies SRAM buffer1.
+ */
+ mutex_lock(&priv->lock);
+ dataflash_waitready(priv->spi);
+ status = spi_sync(priv->spi, &m);
+ mutex_unlock(&priv->lock);
+
+ kfree(scratch);
+
+ if (status >= 0) {
+ status = 0;
+ *retlen = len;
+ }
+ return status;
+}
+
+static char *otp_setup(struct mtd_info *device, char revision)
+{
+ device->_get_fact_prot_info = dataflash_get_otp_info;
+ device->_read_fact_prot_reg = dataflash_read_fact_otp;
+ device->_get_user_prot_info = dataflash_get_otp_info;
+ device->_read_user_prot_reg = dataflash_read_user_otp;
+
+ /* rev c parts (at45db321c and at45db1281 only!) use a
+ * different write procedure; not (yet?) implemented.
+ */
+ if (revision > 'c')
+ device->_write_user_prot_reg = dataflash_write_user_otp;
+
+ return ", OTP";
+}
+
+#else
+
+static char *otp_setup(struct mtd_info *device, char revision)
+{
+ return " (OTP)";
+}
+
+#endif
+
+/* ......................................................................... */
+
+/*
+ * Register DataFlash device with MTD subsystem.
+ */
+static int add_dataflash_otp(struct spi_device *spi, char *name, int nr_pages,
+ int pagesize, int pageoffset, char revision)
+{
+ struct dataflash *priv;
+ struct mtd_info *device;
+ struct flash_platform_data *pdata = dev_get_platdata(&spi->dev);
+ char *otp_tag = "";
+ int err = 0;
+
+ priv = kzalloc(sizeof *priv, GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->lock);
+ priv->spi = spi;
+ priv->page_size = pagesize;
+ priv->page_offset = pageoffset;
+
+ /* name must be usable with cmdlinepart */
+ sprintf(priv->name, "spi%d.%d-%s",
+ spi->master->bus_num, spi_get_chipselect(spi, 0),
+ name);
+
+ device = &priv->mtd;
+ device->name = (pdata && pdata->name) ? pdata->name : priv->name;
+ device->size = nr_pages * pagesize;
+ device->erasesize = pagesize;
+ device->writesize = pagesize;
+ device->type = MTD_DATAFLASH;
+ device->flags = MTD_WRITEABLE;
+ device->_erase = dataflash_erase;
+ device->_read = dataflash_read;
+ device->_write = dataflash_write;
+ device->priv = priv;
+
+ device->dev.parent = &spi->dev;
+ mtd_set_of_node(device, spi->dev.of_node);
+
+ if (revision >= 'c')
+ otp_tag = otp_setup(device, revision);
+
+ dev_info(&spi->dev, "%s (%lld KBytes) pagesize %d bytes%s\n",
+ name, (long long)((device->size + 1023) >> 10),
+ pagesize, otp_tag);
+ spi_set_drvdata(spi, priv);
+
+ err = mtd_device_register(device,
+ pdata ? pdata->parts : NULL,
+ pdata ? pdata->nr_parts : 0);
+
+ if (!err)
+ return 0;
+
+ kfree(priv);
+ return err;
+}
+
+static inline int add_dataflash(struct spi_device *spi, char *name,
+ int nr_pages, int pagesize, int pageoffset)
+{
+ return add_dataflash_otp(spi, name, nr_pages, pagesize,
+ pageoffset, 0);
+}
+
+struct flash_info {
+ char *name;
+
+ /* JEDEC id has a high byte of zero plus three data bytes:
+ * the manufacturer id, then a two byte device id.
+ */
+ u64 jedec_id;
+
+ /* The size listed here is what works with OP_ERASE_PAGE. */
+ unsigned nr_pages;
+ u16 pagesize;
+ u16 pageoffset;
+
+ u16 flags;
+#define SUP_EXTID 0x0004 /* supports extended ID data */
+#define SUP_POW2PS 0x0002 /* supports 2^N byte pages */
+#define IS_POW2PS 0x0001 /* uses 2^N byte pages */
+};
+
+static struct flash_info dataflash_data[] = {
+
+ /*
+ * NOTE: chips with SUP_POW2PS (rev D and up) need two entries,
+ * one with IS_POW2PS and the other without. The entry with the
+ * non-2^N byte page size can't name exact chip revisions without
+ * losing backwards compatibility for cmdlinepart.
+ *
+ * These newer chips also support 128-byte security registers (with
+ * 64 bytes one-time-programmable) and software write-protection.
+ */
+ { "AT45DB011B", 0x1f2200, 512, 264, 9, SUP_POW2PS},
+ { "at45db011d", 0x1f2200, 512, 256, 8, SUP_POW2PS | IS_POW2PS},
+
+ { "AT45DB021B", 0x1f2300, 1024, 264, 9, SUP_POW2PS},
+ { "at45db021d", 0x1f2300, 1024, 256, 8, SUP_POW2PS | IS_POW2PS},
+
+ { "AT45DB041x", 0x1f2400, 2048, 264, 9, SUP_POW2PS},
+ { "at45db041d", 0x1f2400, 2048, 256, 8, SUP_POW2PS | IS_POW2PS},
+
+ { "AT45DB081B", 0x1f2500, 4096, 264, 9, SUP_POW2PS},
+ { "at45db081d", 0x1f2500, 4096, 256, 8, SUP_POW2PS | IS_POW2PS},
+
+ { "AT45DB161x", 0x1f2600, 4096, 528, 10, SUP_POW2PS},
+ { "at45db161d", 0x1f2600, 4096, 512, 9, SUP_POW2PS | IS_POW2PS},
+
+ { "AT45DB321x", 0x1f2700, 8192, 528, 10, 0}, /* rev C */
+
+ { "AT45DB321x", 0x1f2701, 8192, 528, 10, SUP_POW2PS},
+ { "at45db321d", 0x1f2701, 8192, 512, 9, SUP_POW2PS | IS_POW2PS},
+
+ { "AT45DB642x", 0x1f2800, 8192, 1056, 11, SUP_POW2PS},
+ { "at45db642d", 0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS},
+
+ { "AT45DB641E", 0x1f28000100ULL, 32768, 264, 9, SUP_EXTID | SUP_POW2PS},
+ { "at45db641e", 0x1f28000100ULL, 32768, 256, 8, SUP_EXTID | SUP_POW2PS | IS_POW2PS},
+};
+
+static struct flash_info *jedec_lookup(struct spi_device *spi,
+ u64 jedec, bool use_extid)
+{
+ struct flash_info *info;
+ int status;
+
+ for (info = dataflash_data;
+ info < dataflash_data + ARRAY_SIZE(dataflash_data);
+ info++) {
+ if (use_extid && !(info->flags & SUP_EXTID))
+ continue;
+
+ if (info->jedec_id == jedec) {
+ dev_dbg(&spi->dev, "OTP, sector protect%s\n",
+ (info->flags & SUP_POW2PS) ?
+ ", binary pagesize" : "");
+ if (info->flags & SUP_POW2PS) {
+ status = dataflash_status(spi);
+ if (status < 0) {
+ dev_dbg(&spi->dev, "status error %d\n",
+ status);
+ return ERR_PTR(status);
+ }
+ if (status & 0x1) {
+ if (info->flags & IS_POW2PS)
+ return info;
+ } else {
+ if (!(info->flags & IS_POW2PS))
+ return info;
+ }
+ } else
+ return info;
+ }
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+static struct flash_info *jedec_probe(struct spi_device *spi)
+{
+ int ret;
+ u8 code = OP_READ_ID;
+ u64 jedec;
+ u8 id[sizeof(jedec)] = {0};
+ const unsigned int id_size = 5;
+ struct flash_info *info;
+
+ /*
+ * JEDEC also defines an optional "extended device information"
+ * string for after vendor-specific data, after the three bytes
+ * we use here. Supporting some chips might require using it.
+ *
+ * If the vendor ID isn't Atmel's (0x1f), assume this call failed.
+ * That's not an error; only rev C and newer chips handle it, and
+ * only Atmel sells these chips.
+ */
+ ret = spi_write_then_read(spi, &code, 1, id, id_size);
+ if (ret < 0) {
+ dev_dbg(&spi->dev, "error %d reading JEDEC ID\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ if (id[0] != CFI_MFR_ATMEL)
+ return NULL;
+
+ jedec = be64_to_cpup((__be64 *)id);
+
+ /*
+ * First, try to match device using extended device
+ * information
+ */
+ info = jedec_lookup(spi, jedec >> DATAFLASH_SHIFT_EXTID, true);
+ if (!IS_ERR(info))
+ return info;
+ /*
+ * If that fails, make another pass using regular ID
+ * information
+ */
+ info = jedec_lookup(spi, jedec >> DATAFLASH_SHIFT_ID, false);
+ if (!IS_ERR(info))
+ return info;
+ /*
+ * Treat other chips as errors ... we won't know the right page
+ * size (it might be binary) even when we can tell which density
+ * class is involved (legacy chip id scheme).
+ */
+ dev_warn(&spi->dev, "JEDEC id %016llx not handled\n", jedec);
+ return ERR_PTR(-ENODEV);
+}
+
+/*
+ * Detect and initialize DataFlash device, using JEDEC IDs on newer chips
+ * or else the ID code embedded in the status bits:
+ *
+ * Device Density ID code #Pages PageSize Offset
+ * AT45DB011B 1Mbit (128K) xx0011xx (0x0c) 512 264 9
+ * AT45DB021B 2Mbit (256K) xx0101xx (0x14) 1024 264 9
+ * AT45DB041B 4Mbit (512K) xx0111xx (0x1c) 2048 264 9
+ * AT45DB081B 8Mbit (1M) xx1001xx (0x24) 4096 264 9
+ * AT45DB0161B 16Mbit (2M) xx1011xx (0x2c) 4096 528 10
+ * AT45DB0321B 32Mbit (4M) xx1101xx (0x34) 8192 528 10
+ * AT45DB0642 64Mbit (8M) xx111xxx (0x3c) 8192 1056 11
+ * AT45DB1282 128Mbit (16M) xx0100xx (0x10) 16384 1056 11
+ */
+static int dataflash_probe(struct spi_device *spi)
+{
+ int status;
+ struct flash_info *info;
+
+ /*
+ * Try to detect dataflash by JEDEC ID.
+ * If it succeeds we know we have either a C or D part.
+ * D will support power of 2 pagesize option.
+ * Both support the security register, though with different
+ * write procedures.
+ */
+ info = jedec_probe(spi);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+ if (info != NULL)
+ return add_dataflash_otp(spi, info->name, info->nr_pages,
+ info->pagesize, info->pageoffset,
+ (info->flags & SUP_POW2PS) ? 'd' : 'c');
+
+ /*
+ * Older chips support only legacy commands, identifing
+ * capacity using bits in the status byte.
+ */
+ status = dataflash_status(spi);
+ if (status <= 0 || status == 0xff) {
+ dev_dbg(&spi->dev, "status error %d\n", status);
+ if (status == 0 || status == 0xff)
+ status = -ENODEV;
+ return status;
+ }
+
+ /* if there's a device there, assume it's dataflash.
+ * board setup should have set spi->max_speed_max to
+ * match f(car) for continuous reads, mode 0 or 3.
+ */
+ switch (status & 0x3c) {
+ case 0x0c: /* 0 0 1 1 x x */
+ status = add_dataflash(spi, "AT45DB011B", 512, 264, 9);
+ break;
+ case 0x14: /* 0 1 0 1 x x */
+ status = add_dataflash(spi, "AT45DB021B", 1024, 264, 9);
+ break;
+ case 0x1c: /* 0 1 1 1 x x */
+ status = add_dataflash(spi, "AT45DB041x", 2048, 264, 9);
+ break;
+ case 0x24: /* 1 0 0 1 x x */
+ status = add_dataflash(spi, "AT45DB081B", 4096, 264, 9);
+ break;
+ case 0x2c: /* 1 0 1 1 x x */
+ status = add_dataflash(spi, "AT45DB161x", 4096, 528, 10);
+ break;
+ case 0x34: /* 1 1 0 1 x x */
+ status = add_dataflash(spi, "AT45DB321x", 8192, 528, 10);
+ break;
+ case 0x38: /* 1 1 1 x x x */
+ case 0x3c:
+ status = add_dataflash(spi, "AT45DB642x", 8192, 1056, 11);
+ break;
+ /* obsolete AT45DB1282 not (yet?) supported */
+ default:
+ dev_info(&spi->dev, "unsupported device (%x)\n",
+ status & 0x3c);
+ status = -ENODEV;
+ }
+
+ if (status < 0)
+ dev_dbg(&spi->dev, "add_dataflash --> %d\n", status);
+
+ return status;
+}
+
+static void dataflash_remove(struct spi_device *spi)
+{
+ struct dataflash *flash = spi_get_drvdata(spi);
+
+ dev_dbg(&spi->dev, "remove\n");
+
+ WARN_ON(mtd_device_unregister(&flash->mtd));
+
+ kfree(flash);
+}
+
+static struct spi_driver dataflash_driver = {
+ .driver = {
+ .name = "mtd_dataflash",
+ .of_match_table = of_match_ptr(dataflash_dt_ids),
+ },
+ .probe = dataflash_probe,
+ .remove = dataflash_remove,
+ .id_table = dataflash_spi_ids,
+
+ /* FIXME: investigate suspend and resume... */
+};
+
+module_spi_driver(dataflash_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrew Victor, David Brownell");
+MODULE_DESCRIPTION("MTD DataFlash driver");
+MODULE_ALIAS("spi:mtd_dataflash");
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
new file mode 100644
index 0000000000..1c97fabc4b
--- /dev/null
+++ b/drivers/mtd/devices/mtdram.c
@@ -0,0 +1,187 @@
+/*
+ * mtdram - a test mtd device
+ * Author: Alexander Larsson <alex@cendio.se>
+ *
+ * Copyright (c) 1999 Alexander Larsson <alex@cendio.se>
+ * Copyright (c) 2005 Joern Engel <joern@wh.fh-wedel.de>
+ *
+ * This code is GPL
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/mtdram.h>
+
+static unsigned long total_size = CONFIG_MTDRAM_TOTAL_SIZE;
+static unsigned long erase_size = CONFIG_MTDRAM_ERASE_SIZE;
+static unsigned long writebuf_size = 64;
+#define MTDRAM_TOTAL_SIZE (total_size * 1024)
+#define MTDRAM_ERASE_SIZE (erase_size * 1024)
+
+module_param(total_size, ulong, 0);
+MODULE_PARM_DESC(total_size, "Total device size in KiB");
+module_param(erase_size, ulong, 0);
+MODULE_PARM_DESC(erase_size, "Device erase block size in KiB");
+module_param(writebuf_size, ulong, 0);
+MODULE_PARM_DESC(writebuf_size, "Device write buf size in Bytes (Default: 64)");
+
+// We could store these in the mtd structure, but we only support 1 device..
+static struct mtd_info *mtd_info;
+
+static int check_offs_len(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret = 0;
+
+ /* Start address must align on block boundary */
+ if (mtd_mod_by_eb(ofs, mtd)) {
+ pr_debug("%s: unaligned address\n", __func__);
+ ret = -EINVAL;
+ }
+
+ /* Length must align on block boundary */
+ if (mtd_mod_by_eb(len, mtd)) {
+ pr_debug("%s: length not block aligned\n", __func__);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int ram_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ if (check_offs_len(mtd, instr->addr, instr->len))
+ return -EINVAL;
+ memset((char *)mtd->priv + instr->addr, 0xff, instr->len);
+
+ return 0;
+}
+
+static int ram_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys)
+{
+ *virt = mtd->priv + from;
+ *retlen = len;
+
+ if (phys) {
+ /* limit retlen to the number of contiguous physical pages */
+ unsigned long page_ofs = offset_in_page(*virt);
+ void *addr = *virt - page_ofs;
+ unsigned long pfn1, pfn0 = vmalloc_to_pfn(addr);
+
+ *phys = __pfn_to_phys(pfn0) + page_ofs;
+ len += page_ofs;
+ while (len > PAGE_SIZE) {
+ len -= PAGE_SIZE;
+ addr += PAGE_SIZE;
+ pfn0++;
+ pfn1 = vmalloc_to_pfn(addr);
+ if (pfn1 != pfn0) {
+ *retlen = addr - *virt;
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ return 0;
+}
+
+static int ram_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ memcpy(buf, mtd->priv + from, len);
+ *retlen = len;
+ return 0;
+}
+
+static int ram_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ memcpy((char *)mtd->priv + to, buf, len);
+ *retlen = len;
+ return 0;
+}
+
+static void __exit cleanup_mtdram(void)
+{
+ if (mtd_info) {
+ mtd_device_unregister(mtd_info);
+ vfree(mtd_info->priv);
+ kfree(mtd_info);
+ }
+}
+
+int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
+ unsigned long size, const char *name)
+{
+ memset(mtd, 0, sizeof(*mtd));
+
+ /* Setup the MTD structure */
+ mtd->name = name;
+ mtd->type = MTD_RAM;
+ mtd->flags = MTD_CAP_RAM;
+ mtd->size = size;
+ mtd->writesize = 1;
+ mtd->writebufsize = writebuf_size;
+ mtd->erasesize = MTDRAM_ERASE_SIZE;
+ mtd->priv = mapped_address;
+
+ mtd->owner = THIS_MODULE;
+ mtd->_erase = ram_erase;
+ mtd->_point = ram_point;
+ mtd->_unpoint = ram_unpoint;
+ mtd->_read = ram_read;
+ mtd->_write = ram_write;
+
+ if (mtd_device_register(mtd, NULL, 0))
+ return -EIO;
+
+ return 0;
+}
+
+static int __init init_mtdram(void)
+{
+ void *addr;
+ int err;
+
+ if (!total_size)
+ return -EINVAL;
+
+ /* Allocate some memory */
+ mtd_info = kmalloc(sizeof(struct mtd_info), GFP_KERNEL);
+ if (!mtd_info)
+ return -ENOMEM;
+
+ addr = vmalloc(MTDRAM_TOTAL_SIZE);
+ if (!addr) {
+ kfree(mtd_info);
+ mtd_info = NULL;
+ return -ENOMEM;
+ }
+ err = mtdram_init_device(mtd_info, addr, MTDRAM_TOTAL_SIZE, "mtdram test device");
+ if (err) {
+ vfree(addr);
+ kfree(mtd_info);
+ mtd_info = NULL;
+ return err;
+ }
+ memset(mtd_info->priv, 0xff, MTDRAM_TOTAL_SIZE);
+ return err;
+}
+
+module_init(init_mtdram);
+module_exit(cleanup_mtdram);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alexander Larsson <alexl@redhat.com>");
+MODULE_DESCRIPTION("Simulated MTD driver for testing");
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
new file mode 100644
index 0000000000..208bd4d871
--- /dev/null
+++ b/drivers/mtd/devices/phram.c
@@ -0,0 +1,442 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) ???? Jochen Schäuble <psionic@psionic.de>
+ * Copyright (c) 2003-2004 Joern Engel <joern@wh.fh-wedel.de>
+ *
+ * Usage:
+ *
+ * one commend line parameter per device, each in the form:
+ * phram=<name>,<start>,<len>[,<erasesize>]
+ * <name> may be up to 63 characters.
+ * <start>, <len>, and <erasesize> can be octal, decimal or hexadecimal. If followed
+ * by "ki", "Mi" or "Gi", the numbers will be interpreted as kilo, mega or
+ * gigabytes. <erasesize> is optional and defaults to PAGE_SIZE.
+ *
+ * Example:
+ * phram=swap,64Mi,128Mi phram=test,900Mi,1Mi,64Ki
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <asm/div64.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of.h>
+
+struct phram_mtd_list {
+ struct mtd_info mtd;
+ struct list_head list;
+ bool cached;
+};
+
+static LIST_HEAD(phram_list);
+
+static int phram_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ u_char *start = mtd->priv;
+
+ memset(start + instr->addr, 0xff, instr->len);
+
+ return 0;
+}
+
+static int phram_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys)
+{
+ *virt = mtd->priv + from;
+ *retlen = len;
+ return 0;
+}
+
+static int phram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ return 0;
+}
+
+static int phram_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ u_char *start = mtd->priv;
+
+ memcpy(buf, start + from, len);
+ *retlen = len;
+ return 0;
+}
+
+static int phram_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ u_char *start = mtd->priv;
+
+ memcpy(start + to, buf, len);
+ *retlen = len;
+ return 0;
+}
+
+static int phram_map(struct phram_mtd_list *phram, phys_addr_t start, size_t len)
+{
+ void *addr = NULL;
+
+ if (phram->cached)
+ addr = memremap(start, len, MEMREMAP_WB);
+ else
+ addr = (void __force *)ioremap(start, len);
+ if (!addr)
+ return -EIO;
+
+ phram->mtd.priv = addr;
+
+ return 0;
+}
+
+static void phram_unmap(struct phram_mtd_list *phram)
+{
+ void *addr = phram->mtd.priv;
+
+ if (phram->cached) {
+ memunmap(addr);
+ return;
+ }
+
+ iounmap((void __iomem *)addr);
+}
+
+static void unregister_devices(void)
+{
+ struct phram_mtd_list *this, *safe;
+
+ list_for_each_entry_safe(this, safe, &phram_list, list) {
+ mtd_device_unregister(&this->mtd);
+ phram_unmap(this);
+ kfree(this->mtd.name);
+ kfree(this);
+ }
+}
+
+static int register_device(struct platform_device *pdev, const char *name,
+ phys_addr_t start, size_t len, uint32_t erasesize)
+{
+ struct device_node *np = pdev ? pdev->dev.of_node : NULL;
+ bool cached = np ? !of_property_read_bool(np, "no-map") : false;
+ struct phram_mtd_list *new;
+ int ret = -ENOMEM;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ goto out0;
+
+ new->cached = cached;
+
+ ret = phram_map(new, start, len);
+ if (ret) {
+ pr_err("ioremap failed\n");
+ goto out1;
+ }
+
+
+ new->mtd.name = name;
+ new->mtd.size = len;
+ new->mtd.flags = MTD_CAP_RAM;
+ new->mtd._erase = phram_erase;
+ new->mtd._point = phram_point;
+ new->mtd._unpoint = phram_unpoint;
+ new->mtd._read = phram_read;
+ new->mtd._write = phram_write;
+ new->mtd.owner = THIS_MODULE;
+ new->mtd.type = MTD_RAM;
+ new->mtd.erasesize = erasesize;
+ new->mtd.writesize = 1;
+
+ mtd_set_of_node(&new->mtd, np);
+
+ ret = -EAGAIN;
+ if (mtd_device_register(&new->mtd, NULL, 0)) {
+ pr_err("Failed to register new device\n");
+ goto out2;
+ }
+
+ if (pdev)
+ platform_set_drvdata(pdev, new);
+ else
+ list_add_tail(&new->list, &phram_list);
+
+ return 0;
+
+out2:
+ phram_unmap(new);
+out1:
+ kfree(new);
+out0:
+ return ret;
+}
+
+static int parse_num64(uint64_t *num64, char *token)
+{
+ size_t len;
+ int shift = 0;
+ int ret;
+
+ len = strlen(token);
+ /* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */
+ if (len > 2) {
+ if (token[len - 1] == 'i') {
+ switch (token[len - 2]) {
+ case 'G':
+ shift += 10;
+ fallthrough;
+ case 'M':
+ shift += 10;
+ fallthrough;
+ case 'k':
+ shift += 10;
+ token[len - 2] = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ }
+
+ ret = kstrtou64(token, 0, num64);
+ *num64 <<= shift;
+
+ return ret;
+}
+
+static int parse_name(char **pname, const char *token)
+{
+ size_t len;
+ char *name;
+
+ len = strlen(token) + 1;
+ if (len > 64)
+ return -ENOSPC;
+
+ name = kstrdup(token, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ *pname = name;
+ return 0;
+}
+
+
+static inline void kill_final_newline(char *str)
+{
+ char *newline = strrchr(str, '\n');
+
+ if (newline && !newline[1])
+ *newline = 0;
+}
+
+
+#define parse_err(fmt, args...) do { \
+ pr_err(fmt , ## args); \
+ return 1; \
+} while (0)
+
+#ifndef MODULE
+static int phram_init_called;
+/*
+ * This shall contain the module parameter if any. It is of the form:
+ * - phram=<device>,<address>,<size>[,<erasesize>] for module case
+ * - phram.phram=<device>,<address>,<size>[,<erasesize>] for built-in case
+ * We leave 64 bytes for the device name, 20 for the address , 20 for the
+ * size and 20 for the erasesize.
+ * Example: phram.phram=rootfs,0xa0000000,512Mi,65536
+ */
+static char phram_paramline[64 + 20 + 20 + 20];
+#endif
+
+static int phram_setup(const char *val)
+{
+ char buf[64 + 20 + 20 + 20], *str = buf;
+ char *token[4];
+ char *name;
+ uint64_t start;
+ uint64_t len;
+ uint64_t erasesize = PAGE_SIZE;
+ uint32_t rem;
+ int i, ret;
+
+ if (strnlen(val, sizeof(buf)) >= sizeof(buf))
+ parse_err("parameter too long\n");
+
+ strcpy(str, val);
+ kill_final_newline(str);
+
+ for (i = 0; i < 4; i++)
+ token[i] = strsep(&str, ",");
+
+ if (str)
+ parse_err("too many arguments\n");
+
+ if (!token[2])
+ parse_err("not enough arguments\n");
+
+ ret = parse_name(&name, token[0]);
+ if (ret)
+ return ret;
+
+ ret = parse_num64(&start, token[1]);
+ if (ret) {
+ parse_err("illegal start address\n");
+ goto error;
+ }
+
+ ret = parse_num64(&len, token[2]);
+ if (ret) {
+ parse_err("illegal device length\n");
+ goto error;
+ }
+
+ if (token[3]) {
+ ret = parse_num64(&erasesize, token[3]);
+ if (ret) {
+ parse_err("illegal erasesize\n");
+ goto error;
+ }
+ }
+
+ if (len == 0 || erasesize == 0 || erasesize > len
+ || erasesize > UINT_MAX) {
+ parse_err("illegal erasesize or len\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ div_u64_rem(len, (uint32_t)erasesize, &rem);
+ if (rem) {
+ parse_err("len is not multiple of erasesize\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ ret = register_device(NULL, name, start, len, (uint32_t)erasesize);
+ if (ret)
+ goto error;
+
+ pr_info("%s device: %#llx at %#llx for erasesize %#llx\n", name, len, start, erasesize);
+ return 0;
+
+error:
+ kfree(name);
+ return ret;
+}
+
+static int phram_param_call(const char *val, const struct kernel_param *kp)
+{
+#ifdef MODULE
+ return phram_setup(val);
+#else
+ /*
+ * If more parameters are later passed in via
+ * /sys/module/phram/parameters/phram
+ * and init_phram() has already been called,
+ * we can parse the argument now.
+ */
+
+ if (phram_init_called)
+ return phram_setup(val);
+
+ /*
+ * During early boot stage, we only save the parameters
+ * here. We must parse them later: if the param passed
+ * from kernel boot command line, phram_param_call() is
+ * called so early that it is not possible to resolve
+ * the device (even kmalloc() fails). Defer that work to
+ * phram_setup().
+ */
+
+ if (strlen(val) >= sizeof(phram_paramline))
+ return -ENOSPC;
+ strcpy(phram_paramline, val);
+
+ return 0;
+#endif
+}
+
+module_param_call(phram, phram_param_call, NULL, NULL, 0200);
+MODULE_PARM_DESC(phram, "Memory region to map. \"phram=<name>,<start>,<length>[,<erasesize>]\"");
+
+#ifdef CONFIG_OF
+static const struct of_device_id phram_of_match[] = {
+ { .compatible = "phram" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, phram_of_match);
+#endif
+
+static int phram_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOMEM;
+
+ /* mtd_set_of_node() reads name from "label" */
+ return register_device(pdev, NULL, res->start, resource_size(res),
+ PAGE_SIZE);
+}
+
+static int phram_remove(struct platform_device *pdev)
+{
+ struct phram_mtd_list *phram = platform_get_drvdata(pdev);
+
+ mtd_device_unregister(&phram->mtd);
+ phram_unmap(phram);
+ kfree(phram);
+
+ return 0;
+}
+
+static struct platform_driver phram_driver = {
+ .probe = phram_probe,
+ .remove = phram_remove,
+ .driver = {
+ .name = "phram",
+ .of_match_table = of_match_ptr(phram_of_match),
+ },
+};
+
+static int __init init_phram(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&phram_driver);
+ if (ret)
+ return ret;
+
+#ifndef MODULE
+ if (phram_paramline[0])
+ ret = phram_setup(phram_paramline);
+ phram_init_called = 1;
+#endif
+
+ if (ret)
+ platform_driver_unregister(&phram_driver);
+
+ return ret;
+}
+
+static void __exit cleanup_phram(void)
+{
+ unregister_devices();
+ platform_driver_unregister(&phram_driver);
+}
+
+module_init(init_phram);
+module_exit(cleanup_phram);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Joern Engel <joern@wh.fh-wedel.de>");
+MODULE_DESCRIPTION("MTD driver for physical RAM");
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
new file mode 100644
index 0000000000..6597fc2aad
--- /dev/null
+++ b/drivers/mtd/devices/pmc551.c
@@ -0,0 +1,847 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PMC551 PCI Mezzanine Ram Device
+ *
+ * Author:
+ * Mark Ferrell <mferrell@mvista.com>
+ * Copyright 1999,2000 Nortel Networks
+ *
+ * Description:
+ * This driver is intended to support the PMC551 PCI Ram device
+ * from Ramix Inc. The PMC551 is a PMC Mezzanine module for
+ * cPCI embedded systems. The device contains a single SROM
+ * that initially programs the V370PDC chipset onboard the
+ * device, and various banks of DRAM/SDRAM onboard. This driver
+ * implements this PCI Ram device as an MTD (Memory Technology
+ * Device) so that it can be used to hold a file system, or for
+ * added swap space in embedded systems. Since the memory on
+ * this board isn't as fast as main memory we do not try to hook
+ * it into main memory as that would simply reduce performance
+ * on the system. Using it as a block device allows us to use
+ * it as high speed swap or for a high speed disk device of some
+ * sort. Which becomes very useful on diskless systems in the
+ * embedded market I might add.
+ *
+ * Notes:
+ * Due to what I assume is more buggy SROM, the 64M PMC551 I
+ * have available claims that all 4 of its DRAM banks have 64MiB
+ * of ram configured (making a grand total of 256MiB onboard).
+ * This is slightly annoying since the BAR0 size reflects the
+ * aperture size, not the dram size, and the V370PDC supplies no
+ * other method for memory size discovery. This problem is
+ * mostly only relevant when compiled as a module, as the
+ * unloading of the module with an aperture size smaller than
+ * the ram will cause the driver to detect the onboard memory
+ * size to be equal to the aperture size when the module is
+ * reloaded. Soooo, to help, the module supports an msize
+ * option to allow the specification of the onboard memory, and
+ * an asize option, to allow the specification of the aperture
+ * size. The aperture must be equal to or less then the memory
+ * size, the driver will correct this if you screw it up. This
+ * problem is not relevant for compiled in drivers as compiled
+ * in drivers only init once.
+ *
+ * Credits:
+ * Saeed Karamooz <saeed@ramix.com> of Ramix INC. for the
+ * initial example code of how to initialize this device and for
+ * help with questions I had concerning operation of the device.
+ *
+ * Most of the MTD code for this driver was originally written
+ * for the slram.o module in the MTD drivers package which
+ * allows the mapping of system memory into an MTD device.
+ * Since the PMC551 memory module is accessed in the same
+ * fashion as system memory, the slram.c code became a very nice
+ * fit to the needs of this driver. All we added was PCI
+ * detection/initialization to the driver and automatically figure
+ * out the size via the PCI detection.o, later changes by Corey
+ * Minyard set up the card to utilize a 1M sliding apature.
+ *
+ * Corey Minyard <minyard@nortelnetworks.com>
+ * * Modified driver to utilize a sliding aperture instead of
+ * mapping all memory into kernel space which turned out to
+ * be very wasteful.
+ * * Located a bug in the SROM's initialization sequence that
+ * made the memory unusable, added a fix to code to touch up
+ * the DRAM some.
+ *
+ * Bugs/FIXMEs:
+ * * MUST fix the init function to not spin on a register
+ * waiting for it to set .. this does not safely handle busted
+ * devices that never reset the register correctly which will
+ * cause the system to hang w/ a reboot being the only chance at
+ * recover. [sort of fixed, could be better]
+ * * Add I2C handling of the SROM so we can read the SROM's information
+ * about the aperture size. This should always accurately reflect the
+ * onboard memory size.
+ * * Comb the init routine. It's still a bit cludgy on a few things.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/major.h>
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#include <asm/io.h>
+#include <linux/pci.h>
+#include <linux/mtd/mtd.h>
+
+#define PMC551_VERSION \
+ "Ramix PMC551 PCI Mezzanine Ram Driver. (C) 1999,2000 Nortel Networks.\n"
+
+#define PCI_VENDOR_ID_V3_SEMI 0x11b0
+#define PCI_DEVICE_ID_V3_SEMI_V370PDC 0x0200
+
+#define PMC551_PCI_MEM_MAP0 0x50
+#define PMC551_PCI_MEM_MAP1 0x54
+#define PMC551_PCI_MEM_MAP_MAP_ADDR_MASK 0x3ff00000
+#define PMC551_PCI_MEM_MAP_APERTURE_MASK 0x000000f0
+#define PMC551_PCI_MEM_MAP_REG_EN 0x00000002
+#define PMC551_PCI_MEM_MAP_ENABLE 0x00000001
+
+#define PMC551_SDRAM_MA 0x60
+#define PMC551_SDRAM_CMD 0x62
+#define PMC551_DRAM_CFG 0x64
+#define PMC551_SYS_CTRL_REG 0x78
+
+#define PMC551_DRAM_BLK0 0x68
+#define PMC551_DRAM_BLK1 0x6c
+#define PMC551_DRAM_BLK2 0x70
+#define PMC551_DRAM_BLK3 0x74
+#define PMC551_DRAM_BLK_GET_SIZE(x) (524288 << ((x >> 4) & 0x0f))
+#define PMC551_DRAM_BLK_SET_COL_MUX(x, v) (((x) & ~0x00007000) | (((v) & 0x7) << 12))
+#define PMC551_DRAM_BLK_SET_ROW_MUX(x, v) (((x) & ~0x00000f00) | (((v) & 0xf) << 8))
+
+struct mypriv {
+ struct pci_dev *dev;
+ u_char *start;
+ u32 base_map0;
+ u32 curr_map0;
+ u32 asize;
+ struct mtd_info *nextpmc551;
+};
+
+static struct mtd_info *pmc551list;
+
+static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys);
+
+static int pmc551_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct mypriv *priv = mtd->priv;
+ u32 soff_hi; /* start address offset hi */
+ u32 eoff_hi, eoff_lo; /* end address offset hi/lo */
+ unsigned long end;
+ u_char *ptr;
+ size_t retlen;
+
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_erase(pos:%ld, len:%ld)\n", (long)instr->addr,
+ (long)instr->len);
+#endif
+
+ end = instr->addr + instr->len - 1;
+ eoff_hi = end & ~(priv->asize - 1);
+ soff_hi = instr->addr & ~(priv->asize - 1);
+ eoff_lo = end & (priv->asize - 1);
+
+ pmc551_point(mtd, instr->addr, instr->len, &retlen,
+ (void **)&ptr, NULL);
+
+ if (soff_hi == eoff_hi || mtd->size == priv->asize) {
+ /* The whole thing fits within one access, so just one shot
+ will do it. */
+ memset(ptr, 0xff, instr->len);
+ } else {
+ /* We have to do multiple writes to get all the data
+ written. */
+ while (soff_hi != eoff_hi) {
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_erase() soff_hi: %ld, "
+ "eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi);
+#endif
+ memset(ptr, 0xff, priv->asize);
+ if (soff_hi + priv->asize >= mtd->size) {
+ goto out;
+ }
+ soff_hi += priv->asize;
+ pmc551_point(mtd, (priv->base_map0 | soff_hi),
+ priv->asize, &retlen,
+ (void **)&ptr, NULL);
+ }
+ memset(ptr, 0xff, eoff_lo);
+ }
+
+ out:
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_erase() done\n");
+#endif
+
+ return 0;
+}
+
+static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys)
+{
+ struct mypriv *priv = mtd->priv;
+ u32 soff_hi;
+ u32 soff_lo;
+
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_point(%ld, %ld)\n", (long)from, (long)len);
+#endif
+
+ soff_hi = from & ~(priv->asize - 1);
+ soff_lo = from & (priv->asize - 1);
+
+ /* Cheap hack optimization */
+ if (priv->curr_map0 != from) {
+ pci_write_config_dword(priv->dev, PMC551_PCI_MEM_MAP0,
+ (priv->base_map0 | soff_hi));
+ priv->curr_map0 = soff_hi;
+ }
+
+ *virt = priv->start + soff_lo;
+ *retlen = len;
+ return 0;
+}
+
+static int pmc551_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_unpoint()\n");
+#endif
+ return 0;
+}
+
+static int pmc551_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t * retlen, u_char * buf)
+{
+ struct mypriv *priv = mtd->priv;
+ u32 soff_hi; /* start address offset hi */
+ u32 eoff_hi, eoff_lo; /* end address offset hi/lo */
+ unsigned long end;
+ u_char *ptr;
+ u_char *copyto = buf;
+
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_read(pos:%ld, len:%ld) asize: %ld\n",
+ (long)from, (long)len, (long)priv->asize);
+#endif
+
+ end = from + len - 1;
+ soff_hi = from & ~(priv->asize - 1);
+ eoff_hi = end & ~(priv->asize - 1);
+ eoff_lo = end & (priv->asize - 1);
+
+ pmc551_point(mtd, from, len, retlen, (void **)&ptr, NULL);
+
+ if (soff_hi == eoff_hi) {
+ /* The whole thing fits within one access, so just one shot
+ will do it. */
+ memcpy(copyto, ptr, len);
+ copyto += len;
+ } else {
+ /* We have to do multiple writes to get all the data
+ written. */
+ while (soff_hi != eoff_hi) {
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_read() soff_hi: %ld, "
+ "eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi);
+#endif
+ memcpy(copyto, ptr, priv->asize);
+ copyto += priv->asize;
+ if (soff_hi + priv->asize >= mtd->size) {
+ goto out;
+ }
+ soff_hi += priv->asize;
+ pmc551_point(mtd, soff_hi, priv->asize, retlen,
+ (void **)&ptr, NULL);
+ }
+ memcpy(copyto, ptr, eoff_lo);
+ copyto += eoff_lo;
+ }
+
+ out:
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_read() done\n");
+#endif
+ *retlen = copyto - buf;
+ return 0;
+}
+
+static int pmc551_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t * retlen, const u_char * buf)
+{
+ struct mypriv *priv = mtd->priv;
+ u32 soff_hi; /* start address offset hi */
+ u32 eoff_hi, eoff_lo; /* end address offset hi/lo */
+ unsigned long end;
+ u_char *ptr;
+ const u_char *copyfrom = buf;
+
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_write(pos:%ld, len:%ld) asize:%ld\n",
+ (long)to, (long)len, (long)priv->asize);
+#endif
+
+ end = to + len - 1;
+ soff_hi = to & ~(priv->asize - 1);
+ eoff_hi = end & ~(priv->asize - 1);
+ eoff_lo = end & (priv->asize - 1);
+
+ pmc551_point(mtd, to, len, retlen, (void **)&ptr, NULL);
+
+ if (soff_hi == eoff_hi) {
+ /* The whole thing fits within one access, so just one shot
+ will do it. */
+ memcpy(ptr, copyfrom, len);
+ copyfrom += len;
+ } else {
+ /* We have to do multiple writes to get all the data
+ written. */
+ while (soff_hi != eoff_hi) {
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_write() soff_hi: %ld, "
+ "eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi);
+#endif
+ memcpy(ptr, copyfrom, priv->asize);
+ copyfrom += priv->asize;
+ if (soff_hi >= mtd->size) {
+ goto out;
+ }
+ soff_hi += priv->asize;
+ pmc551_point(mtd, soff_hi, priv->asize, retlen,
+ (void **)&ptr, NULL);
+ }
+ memcpy(ptr, copyfrom, eoff_lo);
+ copyfrom += eoff_lo;
+ }
+
+ out:
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_write() done\n");
+#endif
+ *retlen = copyfrom - buf;
+ return 0;
+}
+
+/*
+ * Fixup routines for the V370PDC
+ * PCI device ID 0x020011b0
+ *
+ * This function basically kick starts the DRAM oboard the card and gets it
+ * ready to be used. Before this is done the device reads VERY erratic, so
+ * much that it can crash the Linux 2.2.x series kernels when a user cat's
+ * /proc/pci .. though that is mainly a kernel bug in handling the PCI DEVSEL
+ * register. FIXME: stop spinning on registers .. must implement a timeout
+ * mechanism
+ * returns the size of the memory region found.
+ */
+static int __init fixup_pmc551(struct pci_dev *dev)
+{
+#ifdef CONFIG_MTD_PMC551_BUGFIX
+ u32 dram_data;
+#endif
+ u32 size, dcmd, cfg, dtmp;
+ u16 cmd, tmp, i;
+ u8 bcmd, counter;
+
+ /* Sanity Check */
+ if (!dev) {
+ return -ENODEV;
+ }
+
+ /*
+ * Attempt to reset the card
+ * FIXME: Stop Spinning registers
+ */
+ counter = 0;
+ /* unlock registers */
+ pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, 0xA5);
+ /* read in old data */
+ pci_read_config_byte(dev, PMC551_SYS_CTRL_REG, &bcmd);
+ /* bang the reset line up and down for a few */
+ for (i = 0; i < 10; i++) {
+ counter = 0;
+ bcmd &= ~0x80;
+ while (counter++ < 100) {
+ pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd);
+ }
+ counter = 0;
+ bcmd |= 0x80;
+ while (counter++ < 100) {
+ pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd);
+ }
+ }
+ bcmd |= (0x40 | 0x20);
+ pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd);
+
+ /*
+ * Take care and turn off the memory on the device while we
+ * tweak the configurations
+ */
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ tmp = cmd & ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
+ pci_write_config_word(dev, PCI_COMMAND, tmp);
+
+ /*
+ * Disable existing aperture before probing memory size
+ */
+ pci_read_config_dword(dev, PMC551_PCI_MEM_MAP0, &dcmd);
+ dtmp = (dcmd | PMC551_PCI_MEM_MAP_ENABLE | PMC551_PCI_MEM_MAP_REG_EN);
+ pci_write_config_dword(dev, PMC551_PCI_MEM_MAP0, dtmp);
+ /*
+ * Grab old BAR0 config so that we can figure out memory size
+ * This is another bit of kludge going on. The reason for the
+ * redundancy is I am hoping to retain the original configuration
+ * previously assigned to the card by the BIOS or some previous
+ * fixup routine in the kernel. So we read the old config into cfg,
+ * then write all 1's to the memory space, read back the result into
+ * "size", and then write back all the old config.
+ */
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &cfg);
+#ifndef CONFIG_MTD_PMC551_BUGFIX
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, ~0);
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &size);
+ size = (size & PCI_BASE_ADDRESS_MEM_MASK);
+ size &= ~(size - 1);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, cfg);
+#else
+ /*
+ * Get the size of the memory by reading all the DRAM size values
+ * and adding them up.
+ *
+ * KLUDGE ALERT: the boards we are using have invalid column and
+ * row mux values. We fix them here, but this will break other
+ * memory configurations.
+ */
+ pci_read_config_dword(dev, PMC551_DRAM_BLK0, &dram_data);
+ size = PMC551_DRAM_BLK_GET_SIZE(dram_data);
+ dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
+ dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
+ pci_write_config_dword(dev, PMC551_DRAM_BLK0, dram_data);
+
+ pci_read_config_dword(dev, PMC551_DRAM_BLK1, &dram_data);
+ size += PMC551_DRAM_BLK_GET_SIZE(dram_data);
+ dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
+ dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
+ pci_write_config_dword(dev, PMC551_DRAM_BLK1, dram_data);
+
+ pci_read_config_dword(dev, PMC551_DRAM_BLK2, &dram_data);
+ size += PMC551_DRAM_BLK_GET_SIZE(dram_data);
+ dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
+ dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
+ pci_write_config_dword(dev, PMC551_DRAM_BLK2, dram_data);
+
+ pci_read_config_dword(dev, PMC551_DRAM_BLK3, &dram_data);
+ size += PMC551_DRAM_BLK_GET_SIZE(dram_data);
+ dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
+ dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
+ pci_write_config_dword(dev, PMC551_DRAM_BLK3, dram_data);
+
+ /*
+ * Oops .. something went wrong
+ */
+ if ((size &= PCI_BASE_ADDRESS_MEM_MASK) == 0) {
+ return -ENODEV;
+ }
+#endif /* CONFIG_MTD_PMC551_BUGFIX */
+
+ if ((cfg & PCI_BASE_ADDRESS_SPACE) != PCI_BASE_ADDRESS_SPACE_MEMORY) {
+ return -ENODEV;
+ }
+
+ /*
+ * Precharge Dram
+ */
+ pci_write_config_word(dev, PMC551_SDRAM_MA, 0x0400);
+ pci_write_config_word(dev, PMC551_SDRAM_CMD, 0x00bf);
+
+ /*
+ * Wait until command has gone through
+ * FIXME: register spinning issue
+ */
+ do {
+ pci_read_config_word(dev, PMC551_SDRAM_CMD, &cmd);
+ if (counter++ > 100)
+ break;
+ } while ((PCI_COMMAND_IO) & cmd);
+
+ /*
+ * Turn on auto refresh
+ * The loop is taken directly from Ramix's example code. I assume that
+ * this must be held high for some duration of time, but I can find no
+ * documentation refrencing the reasons why.
+ */
+ for (i = 1; i <= 8; i++) {
+ pci_write_config_word(dev, PMC551_SDRAM_CMD, 0x0df);
+
+ /*
+ * Make certain command has gone through
+ * FIXME: register spinning issue
+ */
+ counter = 0;
+ do {
+ pci_read_config_word(dev, PMC551_SDRAM_CMD, &cmd);
+ if (counter++ > 100)
+ break;
+ } while ((PCI_COMMAND_IO) & cmd);
+ }
+
+ pci_write_config_word(dev, PMC551_SDRAM_MA, 0x0020);
+ pci_write_config_word(dev, PMC551_SDRAM_CMD, 0x0ff);
+
+ /*
+ * Wait until command completes
+ * FIXME: register spinning issue
+ */
+ counter = 0;
+ do {
+ pci_read_config_word(dev, PMC551_SDRAM_CMD, &cmd);
+ if (counter++ > 100)
+ break;
+ } while ((PCI_COMMAND_IO) & cmd);
+
+ pci_read_config_dword(dev, PMC551_DRAM_CFG, &dcmd);
+ dcmd |= 0x02000000;
+ pci_write_config_dword(dev, PMC551_DRAM_CFG, dcmd);
+
+ /*
+ * Check to make certain fast back-to-back, if not
+ * then set it so
+ */
+ pci_read_config_word(dev, PCI_STATUS, &cmd);
+ if ((cmd & PCI_COMMAND_FAST_BACK) == 0) {
+ cmd |= PCI_COMMAND_FAST_BACK;
+ pci_write_config_word(dev, PCI_STATUS, cmd);
+ }
+
+ /*
+ * Check to make certain the DEVSEL is set correctly, this device
+ * has a tendency to assert DEVSEL and TRDY when a write is performed
+ * to the memory when memory is read-only
+ */
+ if ((cmd & PCI_STATUS_DEVSEL_MASK) != 0x0) {
+ cmd &= ~PCI_STATUS_DEVSEL_MASK;
+ pci_write_config_word(dev, PCI_STATUS, cmd);
+ }
+ /*
+ * Set to be prefetchable and put everything back based on old cfg.
+ * it's possible that the reset of the V370PDC nuked the original
+ * setup
+ */
+ /*
+ cfg |= PCI_BASE_ADDRESS_MEM_PREFETCH;
+ pci_write_config_dword( dev, PCI_BASE_ADDRESS_0, cfg );
+ */
+
+ /*
+ * Turn PCI memory and I/O bus access back on
+ */
+ pci_write_config_word(dev, PCI_COMMAND,
+ PCI_COMMAND_MEMORY | PCI_COMMAND_IO);
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ /*
+ * Some screen fun
+ */
+ printk(KERN_DEBUG "pmc551: %d%sB (0x%x) of %sprefetchable memory at "
+ "0x%llx\n", (size < 1024) ? size : (size < 1048576) ?
+ size >> 10 : size >> 20,
+ (size < 1024) ? "" : (size < 1048576) ? "Ki" : "Mi", size,
+ ((dcmd & (0x1 << 3)) == 0) ? "non-" : "",
+ (unsigned long long)pci_resource_start(dev, 0));
+
+ /*
+ * Check to see the state of the memory
+ */
+ pci_read_config_dword(dev, PMC551_DRAM_BLK0, &dcmd);
+ printk(KERN_DEBUG "pmc551: DRAM_BLK0 Flags: %s,%s\n"
+ "pmc551: DRAM_BLK0 Size: %d at %d\n"
+ "pmc551: DRAM_BLK0 Row MUX: %d, Col MUX: %d\n",
+ (((0x1 << 1) & dcmd) == 0) ? "RW" : "RO",
+ (((0x1 << 0) & dcmd) == 0) ? "Off" : "On",
+ PMC551_DRAM_BLK_GET_SIZE(dcmd),
+ ((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7),
+ ((dcmd >> 9) & 0xF));
+
+ pci_read_config_dword(dev, PMC551_DRAM_BLK1, &dcmd);
+ printk(KERN_DEBUG "pmc551: DRAM_BLK1 Flags: %s,%s\n"
+ "pmc551: DRAM_BLK1 Size: %d at %d\n"
+ "pmc551: DRAM_BLK1 Row MUX: %d, Col MUX: %d\n",
+ (((0x1 << 1) & dcmd) == 0) ? "RW" : "RO",
+ (((0x1 << 0) & dcmd) == 0) ? "Off" : "On",
+ PMC551_DRAM_BLK_GET_SIZE(dcmd),
+ ((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7),
+ ((dcmd >> 9) & 0xF));
+
+ pci_read_config_dword(dev, PMC551_DRAM_BLK2, &dcmd);
+ printk(KERN_DEBUG "pmc551: DRAM_BLK2 Flags: %s,%s\n"
+ "pmc551: DRAM_BLK2 Size: %d at %d\n"
+ "pmc551: DRAM_BLK2 Row MUX: %d, Col MUX: %d\n",
+ (((0x1 << 1) & dcmd) == 0) ? "RW" : "RO",
+ (((0x1 << 0) & dcmd) == 0) ? "Off" : "On",
+ PMC551_DRAM_BLK_GET_SIZE(dcmd),
+ ((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7),
+ ((dcmd >> 9) & 0xF));
+
+ pci_read_config_dword(dev, PMC551_DRAM_BLK3, &dcmd);
+ printk(KERN_DEBUG "pmc551: DRAM_BLK3 Flags: %s,%s\n"
+ "pmc551: DRAM_BLK3 Size: %d at %d\n"
+ "pmc551: DRAM_BLK3 Row MUX: %d, Col MUX: %d\n",
+ (((0x1 << 1) & dcmd) == 0) ? "RW" : "RO",
+ (((0x1 << 0) & dcmd) == 0) ? "Off" : "On",
+ PMC551_DRAM_BLK_GET_SIZE(dcmd),
+ ((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7),
+ ((dcmd >> 9) & 0xF));
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ printk(KERN_DEBUG "pmc551: Memory Access %s\n",
+ (((0x1 << 1) & cmd) == 0) ? "off" : "on");
+ printk(KERN_DEBUG "pmc551: I/O Access %s\n",
+ (((0x1 << 0) & cmd) == 0) ? "off" : "on");
+
+ pci_read_config_word(dev, PCI_STATUS, &cmd);
+ printk(KERN_DEBUG "pmc551: Devsel %s\n",
+ ((PCI_STATUS_DEVSEL_MASK & cmd) == 0x000) ? "Fast" :
+ ((PCI_STATUS_DEVSEL_MASK & cmd) == 0x200) ? "Medium" :
+ ((PCI_STATUS_DEVSEL_MASK & cmd) == 0x400) ? "Slow" : "Invalid");
+
+ printk(KERN_DEBUG "pmc551: %sFast Back-to-Back\n",
+ ((PCI_COMMAND_FAST_BACK & cmd) == 0) ? "Not " : "");
+
+ pci_read_config_byte(dev, PMC551_SYS_CTRL_REG, &bcmd);
+ printk(KERN_DEBUG "pmc551: EEPROM is under %s control\n"
+ "pmc551: System Control Register is %slocked to PCI access\n"
+ "pmc551: System Control Register is %slocked to EEPROM access\n",
+ (bcmd & 0x1) ? "software" : "hardware",
+ (bcmd & 0x20) ? "" : "un", (bcmd & 0x40) ? "" : "un");
+#endif
+ return size;
+}
+
+/*
+ * Kernel version specific module stuffages
+ */
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mark Ferrell <mferrell@mvista.com>");
+MODULE_DESCRIPTION(PMC551_VERSION);
+
+/*
+ * Stuff these outside the ifdef so as to not bust compiled in driver support
+ */
+static int msize = 0;
+static int asize = 0;
+
+module_param(msize, int, 0);
+MODULE_PARM_DESC(msize, "memory size in MiB [1 - 1024]");
+module_param(asize, int, 0);
+MODULE_PARM_DESC(asize, "aperture size, must be <= memsize [1-1024]");
+
+/*
+ * PMC551 Card Initialization
+ */
+static int __init init_pmc551(void)
+{
+ struct pci_dev *PCI_Device = NULL;
+ struct mypriv *priv;
+ int found = 0;
+ struct mtd_info *mtd;
+ int length = 0;
+
+ if (msize) {
+ msize = (1 << (ffs(msize) - 1)) << 20;
+ if (msize > (1 << 30)) {
+ printk(KERN_NOTICE "pmc551: Invalid memory size [%d]\n",
+ msize);
+ return -EINVAL;
+ }
+ }
+
+ if (asize) {
+ asize = (1 << (ffs(asize) - 1)) << 20;
+ if (asize > (1 << 30)) {
+ printk(KERN_NOTICE "pmc551: Invalid aperture size "
+ "[%d]\n", asize);
+ return -EINVAL;
+ }
+ }
+
+ printk(KERN_INFO PMC551_VERSION);
+
+ /*
+ * PCU-bus chipset probe.
+ */
+ for (;;) {
+
+ if ((PCI_Device = pci_get_device(PCI_VENDOR_ID_V3_SEMI,
+ PCI_DEVICE_ID_V3_SEMI_V370PDC,
+ PCI_Device)) == NULL) {
+ break;
+ }
+
+ printk(KERN_NOTICE "pmc551: Found PCI V370PDC at 0x%llx\n",
+ (unsigned long long)pci_resource_start(PCI_Device, 0));
+
+ /*
+ * The PMC551 device acts VERY weird if you don't init it
+ * first. i.e. it will not correctly report devsel. If for
+ * some reason the sdram is in a wrote-protected state the
+ * device will DEVSEL when it is written to causing problems
+ * with the oldproc.c driver in
+ * some kernels (2.2.*)
+ */
+ if ((length = fixup_pmc551(PCI_Device)) <= 0) {
+ printk(KERN_NOTICE "pmc551: Cannot init SDRAM\n");
+ break;
+ }
+
+ /*
+ * This is needed until the driver is capable of reading the
+ * onboard I2C SROM to discover the "real" memory size.
+ */
+ if (msize) {
+ length = msize;
+ printk(KERN_NOTICE "pmc551: Using specified memory "
+ "size 0x%x\n", length);
+ } else {
+ msize = length;
+ }
+
+ mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
+ if (!mtd)
+ break;
+
+ priv = kzalloc(sizeof(struct mypriv), GFP_KERNEL);
+ if (!priv) {
+ kfree(mtd);
+ break;
+ }
+ mtd->priv = priv;
+ priv->dev = PCI_Device;
+
+ if (asize > length) {
+ printk(KERN_NOTICE "pmc551: reducing aperture size to "
+ "fit %dM\n", length >> 20);
+ priv->asize = asize = length;
+ } else if (asize == 0 || asize == length) {
+ printk(KERN_NOTICE "pmc551: Using existing aperture "
+ "size %dM\n", length >> 20);
+ priv->asize = asize = length;
+ } else {
+ printk(KERN_NOTICE "pmc551: Using specified aperture "
+ "size %dM\n", asize >> 20);
+ priv->asize = asize;
+ }
+ priv->start = pci_iomap(PCI_Device, 0, priv->asize);
+
+ if (!priv->start) {
+ printk(KERN_NOTICE "pmc551: Unable to map IO space\n");
+ kfree(mtd->priv);
+ kfree(mtd);
+ break;
+ }
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551: setting aperture to %d\n",
+ ffs(priv->asize >> 20) - 1);
+#endif
+
+ priv->base_map0 = (PMC551_PCI_MEM_MAP_REG_EN
+ | PMC551_PCI_MEM_MAP_ENABLE
+ | (ffs(priv->asize >> 20) - 1) << 4);
+ priv->curr_map0 = priv->base_map0;
+ pci_write_config_dword(priv->dev, PMC551_PCI_MEM_MAP0,
+ priv->curr_map0);
+
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551: aperture set to %d\n",
+ (priv->base_map0 & 0xF0) >> 4);
+#endif
+
+ mtd->size = msize;
+ mtd->flags = MTD_CAP_RAM;
+ mtd->_erase = pmc551_erase;
+ mtd->_read = pmc551_read;
+ mtd->_write = pmc551_write;
+ mtd->_point = pmc551_point;
+ mtd->_unpoint = pmc551_unpoint;
+ mtd->type = MTD_RAM;
+ mtd->name = "PMC551 RAM board";
+ mtd->erasesize = 0x10000;
+ mtd->writesize = 1;
+ mtd->owner = THIS_MODULE;
+
+ if (mtd_device_register(mtd, NULL, 0)) {
+ printk(KERN_NOTICE "pmc551: Failed to register new device\n");
+ pci_iounmap(PCI_Device, priv->start);
+ kfree(mtd->priv);
+ kfree(mtd);
+ break;
+ }
+
+ /* Keep a reference as the mtd_device_register worked */
+ pci_dev_get(PCI_Device);
+
+ printk(KERN_NOTICE "Registered pmc551 memory device.\n");
+ printk(KERN_NOTICE "Mapped %dMiB of memory from 0x%p to 0x%p\n",
+ priv->asize >> 20,
+ priv->start, priv->start + priv->asize);
+ printk(KERN_NOTICE "Total memory is %d%sB\n",
+ (length < 1024) ? length :
+ (length < 1048576) ? length >> 10 : length >> 20,
+ (length < 1024) ? "" : (length < 1048576) ? "Ki" : "Mi");
+ priv->nextpmc551 = pmc551list;
+ pmc551list = mtd;
+ found++;
+ }
+
+ /* Exited early, reference left over */
+ pci_dev_put(PCI_Device);
+
+ if (!pmc551list) {
+ printk(KERN_NOTICE "pmc551: not detected\n");
+ return -ENODEV;
+ } else {
+ printk(KERN_NOTICE "pmc551: %d pmc551 devices loaded\n", found);
+ return 0;
+ }
+}
+
+/*
+ * PMC551 Card Cleanup
+ */
+static void __exit cleanup_pmc551(void)
+{
+ int found = 0;
+ struct mtd_info *mtd;
+ struct mypriv *priv;
+
+ while ((mtd = pmc551list)) {
+ priv = mtd->priv;
+ pmc551list = priv->nextpmc551;
+
+ if (priv->start) {
+ printk(KERN_DEBUG "pmc551: unmapping %dMiB starting at "
+ "0x%p\n", priv->asize >> 20, priv->start);
+ pci_iounmap(priv->dev, priv->start);
+ }
+ pci_dev_put(priv->dev);
+
+ kfree(mtd->priv);
+ mtd_device_unregister(mtd);
+ kfree(mtd);
+ found++;
+ }
+
+ printk(KERN_NOTICE "pmc551: %d pmc551 devices unloaded\n", found);
+}
+
+module_init(init_pmc551);
+module_exit(cleanup_pmc551);
diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c
new file mode 100644
index 0000000000..36e060386e
--- /dev/null
+++ b/drivers/mtd/devices/powernv_flash.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * OPAL PNOR flash MTD abstraction
+ *
+ * Copyright IBM 2015
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <asm/opal.h>
+
+
+/*
+ * This driver creates the a Linux MTD abstraction for platform PNOR flash
+ * backed by OPAL calls
+ */
+
+struct powernv_flash {
+ struct mtd_info mtd;
+ u32 id;
+};
+
+enum flash_op {
+ FLASH_OP_READ,
+ FLASH_OP_WRITE,
+ FLASH_OP_ERASE,
+};
+
+/*
+ * Don't return -ERESTARTSYS if we can't get a token, the MTD core
+ * might have split up the call from userspace and called into the
+ * driver more than once, we'll already have done some amount of work.
+ */
+static int powernv_flash_async_op(struct mtd_info *mtd, enum flash_op op,
+ loff_t offset, size_t len, size_t *retlen, u_char *buf)
+{
+ struct powernv_flash *info = (struct powernv_flash *)mtd->priv;
+ struct device *dev = &mtd->dev;
+ int token;
+ struct opal_msg msg;
+ int rc;
+
+ dev_dbg(dev, "%s(op=%d, offset=0x%llx, len=%zu)\n",
+ __func__, op, offset, len);
+
+ token = opal_async_get_token_interruptible();
+ if (token < 0) {
+ if (token != -ERESTARTSYS)
+ dev_err(dev, "Failed to get an async token\n");
+ else
+ token = -EINTR;
+ return token;
+ }
+
+ switch (op) {
+ case FLASH_OP_READ:
+ rc = opal_flash_read(info->id, offset, __pa(buf), len, token);
+ break;
+ case FLASH_OP_WRITE:
+ rc = opal_flash_write(info->id, offset, __pa(buf), len, token);
+ break;
+ case FLASH_OP_ERASE:
+ rc = opal_flash_erase(info->id, offset, len, token);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ opal_async_release_token(token);
+ return -EIO;
+ }
+
+ if (rc == OPAL_ASYNC_COMPLETION) {
+ rc = opal_async_wait_response_interruptible(token, &msg);
+ if (rc) {
+ /*
+ * If we return the mtd core will free the
+ * buffer we've just passed to OPAL but OPAL
+ * will continue to read or write from that
+ * memory.
+ * It may be tempting to ultimately return 0
+ * if we're doing a read or a write since we
+ * are going to end up waiting until OPAL is
+ * done. However, because the MTD core sends
+ * us the userspace request in chunks, we need
+ * it to know we've been interrupted.
+ */
+ rc = -EINTR;
+ if (opal_async_wait_response(token, &msg))
+ dev_err(dev, "opal_async_wait_response() failed\n");
+ goto out;
+ }
+ rc = opal_get_async_rc(msg);
+ }
+
+ /*
+ * OPAL does mutual exclusion on the flash, it will return
+ * OPAL_BUSY.
+ * During firmware updates by the service processor OPAL may
+ * be (temporarily) prevented from accessing the flash, in
+ * this case OPAL will also return OPAL_BUSY.
+ * Both cases aren't errors exactly but the flash could have
+ * changed, userspace should be informed.
+ */
+ if (rc != OPAL_SUCCESS && rc != OPAL_BUSY)
+ dev_err(dev, "opal_flash_async_op(op=%d) failed (rc %d)\n",
+ op, rc);
+
+ if (rc == OPAL_SUCCESS && retlen)
+ *retlen = len;
+
+ rc = opal_error_code(rc);
+out:
+ opal_async_release_token(token);
+ return rc;
+}
+
+/**
+ * powernv_flash_read
+ * @mtd: the device
+ * @from: the offset to read from
+ * @len: the number of bytes to read
+ * @retlen: the number of bytes actually read
+ * @buf: the filled in buffer
+ *
+ * Returns 0 if read successful, or -ERRNO if an error occurred
+ */
+static int powernv_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ return powernv_flash_async_op(mtd, FLASH_OP_READ, from,
+ len, retlen, buf);
+}
+
+/**
+ * powernv_flash_write
+ * @mtd: the device
+ * @to: the offset to write to
+ * @len: the number of bytes to write
+ * @retlen: the number of bytes actually written
+ * @buf: the buffer to get bytes from
+ *
+ * Returns 0 if write successful, -ERRNO if error occurred
+ */
+static int powernv_flash_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ return powernv_flash_async_op(mtd, FLASH_OP_WRITE, to,
+ len, retlen, (u_char *)buf);
+}
+
+/**
+ * powernv_flash_erase
+ * @mtd: the device
+ * @erase: the erase info
+ * Returns 0 if erase successful or -ERRNO if an error occurred
+ */
+static int powernv_flash_erase(struct mtd_info *mtd, struct erase_info *erase)
+{
+ int rc;
+
+ rc = powernv_flash_async_op(mtd, FLASH_OP_ERASE, erase->addr,
+ erase->len, NULL, NULL);
+ if (rc)
+ erase->fail_addr = erase->addr;
+
+ return rc;
+}
+
+/**
+ * powernv_flash_set_driver_info - Fill the mtd_info structure and docg3
+ * @dev: The device structure
+ * @mtd: The structure to fill
+ */
+static int powernv_flash_set_driver_info(struct device *dev,
+ struct mtd_info *mtd)
+{
+ u64 size;
+ u32 erase_size;
+ int rc;
+
+ rc = of_property_read_u32(dev->of_node, "ibm,flash-block-size",
+ &erase_size);
+ if (rc) {
+ dev_err(dev, "couldn't get resource block size information\n");
+ return rc;
+ }
+
+ rc = of_property_read_u64(dev->of_node, "reg", &size);
+ if (rc) {
+ dev_err(dev, "couldn't get resource size information\n");
+ return rc;
+ }
+
+ /*
+ * Going to have to check what details I need to set and how to
+ * get them
+ */
+ mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
+ mtd->type = MTD_NORFLASH;
+ mtd->flags = MTD_WRITEABLE;
+ mtd->size = size;
+ mtd->erasesize = erase_size;
+ mtd->writebufsize = mtd->writesize = 1;
+ mtd->owner = THIS_MODULE;
+ mtd->_erase = powernv_flash_erase;
+ mtd->_read = powernv_flash_read;
+ mtd->_write = powernv_flash_write;
+ mtd->dev.parent = dev;
+ mtd_set_of_node(mtd, dev->of_node);
+ return 0;
+}
+
+/**
+ * powernv_flash_probe
+ * @pdev: platform device
+ *
+ * Returns 0 on success, -ENOMEM, -ENXIO on error
+ */
+static int powernv_flash_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct powernv_flash *data;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->mtd.priv = data;
+
+ ret = of_property_read_u32(dev->of_node, "ibm,opal-id", &(data->id));
+ if (ret) {
+ dev_err(dev, "no device property 'ibm,opal-id'\n");
+ return ret;
+ }
+
+ ret = powernv_flash_set_driver_info(dev, &data->mtd);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dev, data);
+
+ /*
+ * The current flash that skiboot exposes is one contiguous flash chip
+ * with an ffs partition at the start, it should prove easier for users
+ * to deal with partitions or not as they see fit
+ */
+ return mtd_device_register(&data->mtd, NULL, 0);
+}
+
+/**
+ * op_release - Release the driver
+ * @pdev: the platform device
+ *
+ * Returns 0
+ */
+static int powernv_flash_release(struct platform_device *pdev)
+{
+ struct powernv_flash *data = dev_get_drvdata(&(pdev->dev));
+
+ /* All resources should be freed automatically */
+ WARN_ON(mtd_device_unregister(&data->mtd));
+
+ return 0;
+}
+
+static const struct of_device_id powernv_flash_match[] = {
+ { .compatible = "ibm,opal-flash" },
+ {}
+};
+
+static struct platform_driver powernv_flash_driver = {
+ .driver = {
+ .name = "powernv_flash",
+ .of_match_table = powernv_flash_match,
+ },
+ .remove = powernv_flash_release,
+ .probe = powernv_flash_probe,
+};
+
+module_platform_driver(powernv_flash_driver);
+
+MODULE_DEVICE_TABLE(of, powernv_flash_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cyril Bur <cyril.bur@au1.ibm.com>");
+MODULE_DESCRIPTION("MTD abstraction for OPAL flash");
diff --git a/drivers/mtd/devices/serial_flash_cmds.h b/drivers/mtd/devices/serial_flash_cmds.h
new file mode 100644
index 0000000000..8b51a872fd
--- /dev/null
+++ b/drivers/mtd/devices/serial_flash_cmds.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Generic/SFDP Flash Commands and Device Capabilities
+ *
+ * Copyright (C) 2013 Lee Jones <lee.jones@lianro.org>
+ */
+
+#ifndef _MTD_SERIAL_FLASH_CMDS_H
+#define _MTD_SERIAL_FLASH_CMDS_H
+
+/* Generic Flash Commands/OPCODEs */
+#define SPINOR_OP_WRVCR 0x81
+#define SPINOR_OP_RDVCR 0x85
+
+/* JEDEC Standard - Serial Flash Discoverable Parmeters (SFDP) Commands */
+#define SPINOR_OP_WRITE 0x02 /* PAGE PROGRAM */
+#define SPINOR_OP_WRITE_1_1_2 0xa2 /* DUAL INPUT PROGRAM */
+#define SPINOR_OP_WRITE_1_2_2 0xd2 /* DUAL INPUT EXT PROGRAM */
+#define SPINOR_OP_WRITE_1_1_4 0x32 /* QUAD INPUT PROGRAM */
+#define SPINOR_OP_WRITE_1_4_4 0x12 /* QUAD INPUT EXT PROGRAM */
+
+/* Configuration flags */
+#define FLASH_FLAG_SINGLE 0x000000ff
+#define FLASH_FLAG_READ_WRITE 0x00000001
+#define FLASH_FLAG_READ_FAST 0x00000002
+#define FLASH_FLAG_SE_4K 0x00000004
+#define FLASH_FLAG_SE_32K 0x00000008
+#define FLASH_FLAG_CE 0x00000010
+#define FLASH_FLAG_32BIT_ADDR 0x00000020
+#define FLASH_FLAG_RESET 0x00000040
+#define FLASH_FLAG_DYB_LOCKING 0x00000080
+
+#define FLASH_FLAG_DUAL 0x0000ff00
+#define FLASH_FLAG_READ_1_1_2 0x00000100
+#define FLASH_FLAG_READ_1_2_2 0x00000200
+#define FLASH_FLAG_READ_2_2_2 0x00000400
+#define FLASH_FLAG_WRITE_1_1_2 0x00001000
+#define FLASH_FLAG_WRITE_1_2_2 0x00002000
+#define FLASH_FLAG_WRITE_2_2_2 0x00004000
+
+#define FLASH_FLAG_QUAD 0x00ff0000
+#define FLASH_FLAG_READ_1_1_4 0x00010000
+#define FLASH_FLAG_READ_1_4_4 0x00020000
+#define FLASH_FLAG_READ_4_4_4 0x00040000
+#define FLASH_FLAG_WRITE_1_1_4 0x00100000
+#define FLASH_FLAG_WRITE_1_4_4 0x00200000
+#define FLASH_FLAG_WRITE_4_4_4 0x00400000
+
+#endif /* _MTD_SERIAL_FLASH_CMDS_H */
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
new file mode 100644
index 0000000000..28131a127d
--- /dev/null
+++ b/drivers/mtd/devices/slram.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*======================================================================
+
+ This driver provides a method to access memory not used by the kernel
+ itself (i.e. if the kernel commandline mem=xxx is used). To actually
+ use slram at least mtdblock or mtdchar is required (for block or
+ character device access).
+
+ Usage:
+
+ if compiled as loadable module:
+ modprobe slram map=<name>,<start>,<end/offset>
+ if statically linked into the kernel use the following kernel cmd.line
+ slram=<name>,<start>,<end/offset>
+
+ <name>: name of the device that will be listed in /proc/mtd
+ <start>: start of the memory region, decimal or hex (0xabcdef)
+ <end/offset>: end of the memory region. It's possible to use +0x1234
+ to specify the offset instead of the absolute address
+
+ NOTE:
+ With slram it's only possible to map a contiguous memory region. Therefore
+ if there's a device mapped somewhere in the region specified slram will
+ fail to load (see kernel log if modprobe fails).
+
+ -
+
+ Jochen Schaeuble <psionic@psionic.de>
+
+======================================================================*/
+
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/major.h>
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include <linux/mtd/mtd.h>
+
+#define SLRAM_MAX_DEVICES_PARAMS 6 /* 3 parameters / device */
+#define SLRAM_BLK_SZ 0x4000
+
+#define T(fmt, args...) printk(KERN_DEBUG fmt, ## args)
+#define E(fmt, args...) printk(KERN_NOTICE fmt, ## args)
+
+typedef struct slram_priv {
+ u_char *start;
+ u_char *end;
+} slram_priv_t;
+
+typedef struct slram_mtd_list {
+ struct mtd_info *mtdinfo;
+ struct slram_mtd_list *next;
+} slram_mtd_list_t;
+
+#ifdef MODULE
+static char *map[SLRAM_MAX_DEVICES_PARAMS];
+
+module_param_array(map, charp, NULL, 0);
+MODULE_PARM_DESC(map, "List of memory regions to map. \"map=<name>, <start>, <length / end>\"");
+#else
+static char *map;
+#endif
+
+static slram_mtd_list_t *slram_mtdlist = NULL;
+
+static int slram_erase(struct mtd_info *, struct erase_info *);
+static int slram_point(struct mtd_info *, loff_t, size_t, size_t *, void **,
+ resource_size_t *);
+static int slram_unpoint(struct mtd_info *, loff_t, size_t);
+static int slram_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int slram_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+
+static int slram_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ slram_priv_t *priv = mtd->priv;
+
+ memset(priv->start + instr->addr, 0xff, instr->len);
+
+ return(0);
+}
+
+static int slram_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys)
+{
+ slram_priv_t *priv = mtd->priv;
+
+ *virt = priv->start + from;
+ *retlen = len;
+ return(0);
+}
+
+static int slram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ return 0;
+}
+
+static int slram_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ slram_priv_t *priv = mtd->priv;
+
+ memcpy(buf, priv->start + from, len);
+ *retlen = len;
+ return(0);
+}
+
+static int slram_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ slram_priv_t *priv = mtd->priv;
+
+ memcpy(priv->start + to, buf, len);
+ *retlen = len;
+ return(0);
+}
+
+/*====================================================================*/
+
+static int register_device(char *name, unsigned long start, unsigned long length)
+{
+ slram_mtd_list_t **curmtd;
+
+ curmtd = &slram_mtdlist;
+ while (*curmtd) {
+ curmtd = &(*curmtd)->next;
+ }
+
+ *curmtd = kmalloc(sizeof(slram_mtd_list_t), GFP_KERNEL);
+ if (!(*curmtd)) {
+ E("slram: Cannot allocate new MTD device.\n");
+ return(-ENOMEM);
+ }
+ (*curmtd)->mtdinfo = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
+ (*curmtd)->next = NULL;
+
+ if ((*curmtd)->mtdinfo) {
+ (*curmtd)->mtdinfo->priv =
+ kzalloc(sizeof(slram_priv_t), GFP_KERNEL);
+
+ if (!(*curmtd)->mtdinfo->priv) {
+ kfree((*curmtd)->mtdinfo);
+ (*curmtd)->mtdinfo = NULL;
+ }
+ }
+
+ if (!(*curmtd)->mtdinfo) {
+ E("slram: Cannot allocate new MTD device.\n");
+ return(-ENOMEM);
+ }
+
+ if (!(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start =
+ memremap(start, length,
+ MEMREMAP_WB | MEMREMAP_WT | MEMREMAP_WC))) {
+ E("slram: memremap failed\n");
+ return -EIO;
+ }
+ ((slram_priv_t *)(*curmtd)->mtdinfo->priv)->end =
+ ((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start + length;
+
+
+ (*curmtd)->mtdinfo->name = name;
+ (*curmtd)->mtdinfo->size = length;
+ (*curmtd)->mtdinfo->flags = MTD_CAP_RAM;
+ (*curmtd)->mtdinfo->_erase = slram_erase;
+ (*curmtd)->mtdinfo->_point = slram_point;
+ (*curmtd)->mtdinfo->_unpoint = slram_unpoint;
+ (*curmtd)->mtdinfo->_read = slram_read;
+ (*curmtd)->mtdinfo->_write = slram_write;
+ (*curmtd)->mtdinfo->owner = THIS_MODULE;
+ (*curmtd)->mtdinfo->type = MTD_RAM;
+ (*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ;
+ (*curmtd)->mtdinfo->writesize = 1;
+
+ if (mtd_device_register((*curmtd)->mtdinfo, NULL, 0)) {
+ E("slram: Failed to register new device\n");
+ memunmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start);
+ kfree((*curmtd)->mtdinfo->priv);
+ kfree((*curmtd)->mtdinfo);
+ return(-EAGAIN);
+ }
+ T("slram: Registered device %s from %luKiB to %luKiB\n", name,
+ (start / 1024), ((start + length) / 1024));
+ T("slram: Mapped from 0x%p to 0x%p\n",
+ ((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start,
+ ((slram_priv_t *)(*curmtd)->mtdinfo->priv)->end);
+ return(0);
+}
+
+static void unregister_devices(void)
+{
+ slram_mtd_list_t *nextitem;
+
+ while (slram_mtdlist) {
+ nextitem = slram_mtdlist->next;
+ mtd_device_unregister(slram_mtdlist->mtdinfo);
+ memunmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start);
+ kfree(slram_mtdlist->mtdinfo->priv);
+ kfree(slram_mtdlist->mtdinfo);
+ kfree(slram_mtdlist);
+ slram_mtdlist = nextitem;
+ }
+}
+
+static unsigned long handle_unit(unsigned long value, char *unit)
+{
+ if ((*unit == 'M') || (*unit == 'm')) {
+ return(value * 1024 * 1024);
+ } else if ((*unit == 'K') || (*unit == 'k')) {
+ return(value * 1024);
+ }
+ return(value);
+}
+
+static int parse_cmdline(char *devname, char *szstart, char *szlength)
+{
+ char *buffer;
+ unsigned long devstart;
+ unsigned long devlength;
+
+ if ((!devname) || (!szstart) || (!szlength)) {
+ unregister_devices();
+ return(-EINVAL);
+ }
+
+ devstart = simple_strtoul(szstart, &buffer, 0);
+ devstart = handle_unit(devstart, buffer);
+
+ if (*(szlength) != '+') {
+ devlength = simple_strtoul(szlength, &buffer, 0);
+ devlength = handle_unit(devlength, buffer);
+ if (devlength < devstart)
+ goto err_out;
+
+ devlength -= devstart;
+ } else {
+ devlength = simple_strtoul(szlength + 1, &buffer, 0);
+ devlength = handle_unit(devlength, buffer);
+ }
+ T("slram: devname=%s, devstart=0x%lx, devlength=0x%lx\n",
+ devname, devstart, devlength);
+ if (devlength % SLRAM_BLK_SZ != 0)
+ goto err_out;
+
+ if ((devstart = register_device(devname, devstart, devlength))){
+ unregister_devices();
+ return((int)devstart);
+ }
+ return(0);
+
+err_out:
+ E("slram: Illegal length parameter.\n");
+ return(-EINVAL);
+}
+
+#ifndef MODULE
+
+static int __init mtd_slram_setup(char *str)
+{
+ map = str;
+ return(1);
+}
+
+__setup("slram=", mtd_slram_setup);
+
+#endif
+
+static int __init init_slram(void)
+{
+ char *devname;
+
+#ifndef MODULE
+ char *devstart;
+ char *devlength;
+
+ if (!map) {
+ E("slram: not enough parameters.\n");
+ return(-EINVAL);
+ }
+ while (map) {
+ devname = devstart = devlength = NULL;
+
+ if (!(devname = strsep(&map, ","))) {
+ E("slram: No devicename specified.\n");
+ break;
+ }
+ T("slram: devname = %s\n", devname);
+ if ((!map) || (!(devstart = strsep(&map, ",")))) {
+ E("slram: No devicestart specified.\n");
+ }
+ T("slram: devstart = %s\n", devstart);
+ if ((!map) || (!(devlength = strsep(&map, ",")))) {
+ E("slram: No devicelength / -end specified.\n");
+ }
+ T("slram: devlength = %s\n", devlength);
+ if (parse_cmdline(devname, devstart, devlength) != 0) {
+ return(-EINVAL);
+ }
+ }
+#else
+ int count;
+ int i;
+
+ for (count = 0; count < SLRAM_MAX_DEVICES_PARAMS && map[count];
+ count++) {
+ }
+
+ if ((count % 3 != 0) || (count == 0)) {
+ E("slram: not enough parameters.\n");
+ return(-EINVAL);
+ }
+ for (i = 0; i < (count / 3); i++) {
+ devname = map[i * 3];
+
+ if (parse_cmdline(devname, map[i * 3 + 1], map[i * 3 + 2])!=0) {
+ return(-EINVAL);
+ }
+
+ }
+#endif /* !MODULE */
+
+ return(0);
+}
+
+static void __exit cleanup_slram(void)
+{
+ unregister_devices();
+}
+
+module_init(init_slram);
+module_exit(cleanup_slram);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jochen Schaeuble <psionic@psionic.de>");
+MODULE_DESCRIPTION("MTD driver for uncached system RAM");
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
new file mode 100644
index 0000000000..0a35e5236a
--- /dev/null
+++ b/drivers/mtd/devices/spear_smi.c
@@ -0,0 +1,1104 @@
+/*
+ * SMI (Serial Memory Controller) device driver for Serial NOR Flash on
+ * SPEAr platform
+ * The serial nor interface is largely based on m25p80.c, however the SPI
+ * interface has been replaced by SMI.
+ *
+ * Copyright © 2010 STMicroelectronics.
+ * Ashish Priyadarshi
+ * Shiraz Hashim <shiraz.linux.kernel@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/param.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spear_smi.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+/* SMI clock rate */
+#define SMI_MAX_CLOCK_FREQ 50000000 /* 50 MHz */
+
+/* MAX time out to safely come out of a erase or write busy conditions */
+#define SMI_PROBE_TIMEOUT (HZ / 10)
+#define SMI_MAX_TIME_OUT (3 * HZ)
+
+/* timeout for command completion */
+#define SMI_CMD_TIMEOUT (HZ / 10)
+
+/* registers of smi */
+#define SMI_CR1 0x0 /* SMI control register 1 */
+#define SMI_CR2 0x4 /* SMI control register 2 */
+#define SMI_SR 0x8 /* SMI status register */
+#define SMI_TR 0xC /* SMI transmit register */
+#define SMI_RR 0x10 /* SMI receive register */
+
+/* defines for control_reg 1 */
+#define BANK_EN (0xF << 0) /* enables all banks */
+#define DSEL_TIME (0x6 << 4) /* Deselect time 6 + 1 SMI_CK periods */
+#define SW_MODE (0x1 << 28) /* enables SW Mode */
+#define WB_MODE (0x1 << 29) /* Write Burst Mode */
+#define FAST_MODE (0x1 << 15) /* Fast Mode */
+#define HOLD1 (0x1 << 16) /* Clock Hold period selection */
+
+/* defines for control_reg 2 */
+#define SEND (0x1 << 7) /* Send data */
+#define TFIE (0x1 << 8) /* Transmission Flag Interrupt Enable */
+#define WCIE (0x1 << 9) /* Write Complete Interrupt Enable */
+#define RD_STATUS_REG (0x1 << 10) /* reads status reg */
+#define WE (0x1 << 11) /* Write Enable */
+
+#define TX_LEN_SHIFT 0
+#define RX_LEN_SHIFT 4
+#define BANK_SHIFT 12
+
+/* defines for status register */
+#define SR_WIP 0x1 /* Write in progress */
+#define SR_WEL 0x2 /* Write enable latch */
+#define SR_BP0 0x4 /* Block protect 0 */
+#define SR_BP1 0x8 /* Block protect 1 */
+#define SR_BP2 0x10 /* Block protect 2 */
+#define SR_SRWD 0x80 /* SR write protect */
+#define TFF 0x100 /* Transfer Finished Flag */
+#define WCF 0x200 /* Transfer Finished Flag */
+#define ERF1 0x400 /* Forbidden Write Request */
+#define ERF2 0x800 /* Forbidden Access */
+
+#define WM_SHIFT 12
+
+/* flash opcodes */
+#define OPCODE_RDID 0x9f /* Read JEDEC ID */
+
+/* Flash Device Ids maintenance section */
+
+/* data structure to maintain flash ids from different vendors */
+struct flash_device {
+ char *name;
+ u8 erase_cmd;
+ u32 device_id;
+ u32 pagesize;
+ unsigned long sectorsize;
+ unsigned long size_in_bytes;
+};
+
+#define FLASH_ID(n, es, id, psize, ssize, size) \
+{ \
+ .name = n, \
+ .erase_cmd = es, \
+ .device_id = id, \
+ .pagesize = psize, \
+ .sectorsize = ssize, \
+ .size_in_bytes = size \
+}
+
+static struct flash_device flash_devices[] = {
+ FLASH_ID("st m25p16" , 0xd8, 0x00152020, 0x100, 0x10000, 0x200000),
+ FLASH_ID("st m25p32" , 0xd8, 0x00162020, 0x100, 0x10000, 0x400000),
+ FLASH_ID("st m25p64" , 0xd8, 0x00172020, 0x100, 0x10000, 0x800000),
+ FLASH_ID("st m25p128" , 0xd8, 0x00182020, 0x100, 0x40000, 0x1000000),
+ FLASH_ID("st m25p05" , 0xd8, 0x00102020, 0x80 , 0x8000 , 0x10000),
+ FLASH_ID("st m25p10" , 0xd8, 0x00112020, 0x80 , 0x8000 , 0x20000),
+ FLASH_ID("st m25p20" , 0xd8, 0x00122020, 0x100, 0x10000, 0x40000),
+ FLASH_ID("st m25p40" , 0xd8, 0x00132020, 0x100, 0x10000, 0x80000),
+ FLASH_ID("st m25p80" , 0xd8, 0x00142020, 0x100, 0x10000, 0x100000),
+ FLASH_ID("st m45pe10" , 0xd8, 0x00114020, 0x100, 0x10000, 0x20000),
+ FLASH_ID("st m45pe20" , 0xd8, 0x00124020, 0x100, 0x10000, 0x40000),
+ FLASH_ID("st m45pe40" , 0xd8, 0x00134020, 0x100, 0x10000, 0x80000),
+ FLASH_ID("st m45pe80" , 0xd8, 0x00144020, 0x100, 0x10000, 0x100000),
+ FLASH_ID("sp s25fl004" , 0xd8, 0x00120201, 0x100, 0x10000, 0x80000),
+ FLASH_ID("sp s25fl008" , 0xd8, 0x00130201, 0x100, 0x10000, 0x100000),
+ FLASH_ID("sp s25fl016" , 0xd8, 0x00140201, 0x100, 0x10000, 0x200000),
+ FLASH_ID("sp s25fl032" , 0xd8, 0x00150201, 0x100, 0x10000, 0x400000),
+ FLASH_ID("sp s25fl064" , 0xd8, 0x00160201, 0x100, 0x10000, 0x800000),
+ FLASH_ID("atmel 25f512" , 0x52, 0x0065001F, 0x80 , 0x8000 , 0x10000),
+ FLASH_ID("atmel 25f1024" , 0x52, 0x0060001F, 0x100, 0x8000 , 0x20000),
+ FLASH_ID("atmel 25f2048" , 0x52, 0x0063001F, 0x100, 0x10000, 0x40000),
+ FLASH_ID("atmel 25f4096" , 0x52, 0x0064001F, 0x100, 0x10000, 0x80000),
+ FLASH_ID("atmel 25fs040" , 0xd7, 0x0004661F, 0x100, 0x10000, 0x80000),
+ FLASH_ID("mac 25l512" , 0xd8, 0x001020C2, 0x010, 0x10000, 0x10000),
+ FLASH_ID("mac 25l1005" , 0xd8, 0x001120C2, 0x010, 0x10000, 0x20000),
+ FLASH_ID("mac 25l2005" , 0xd8, 0x001220C2, 0x010, 0x10000, 0x40000),
+ FLASH_ID("mac 25l4005" , 0xd8, 0x001320C2, 0x010, 0x10000, 0x80000),
+ FLASH_ID("mac 25l4005a" , 0xd8, 0x001320C2, 0x010, 0x10000, 0x80000),
+ FLASH_ID("mac 25l8005" , 0xd8, 0x001420C2, 0x010, 0x10000, 0x100000),
+ FLASH_ID("mac 25l1605" , 0xd8, 0x001520C2, 0x100, 0x10000, 0x200000),
+ FLASH_ID("mac 25l1605a" , 0xd8, 0x001520C2, 0x010, 0x10000, 0x200000),
+ FLASH_ID("mac 25l3205" , 0xd8, 0x001620C2, 0x100, 0x10000, 0x400000),
+ FLASH_ID("mac 25l3205a" , 0xd8, 0x001620C2, 0x100, 0x10000, 0x400000),
+ FLASH_ID("mac 25l6405" , 0xd8, 0x001720C2, 0x100, 0x10000, 0x800000),
+};
+
+/* Define spear specific structures */
+
+struct spear_snor_flash;
+
+/**
+ * struct spear_smi - Structure for SMI Device
+ *
+ * @clk: functional clock
+ * @status: current status register of SMI.
+ * @clk_rate: functional clock rate of SMI (default: SMI_MAX_CLOCK_FREQ)
+ * @lock: lock to prevent parallel access of SMI.
+ * @io_base: base address for registers of SMI.
+ * @pdev: platform device
+ * @cmd_complete: queue to wait for command completion of NOR-flash.
+ * @num_flashes: number of flashes actually present on board.
+ * @flash: separate structure for each Serial NOR-flash attached to SMI.
+ */
+struct spear_smi {
+ struct clk *clk;
+ u32 status;
+ unsigned long clk_rate;
+ struct mutex lock;
+ void __iomem *io_base;
+ struct platform_device *pdev;
+ wait_queue_head_t cmd_complete;
+ u32 num_flashes;
+ struct spear_snor_flash *flash[MAX_NUM_FLASH_CHIP];
+};
+
+/**
+ * struct spear_snor_flash - Structure for Serial NOR Flash
+ *
+ * @bank: Bank number(0, 1, 2, 3) for each NOR-flash.
+ * @dev_id: Device ID of NOR-flash.
+ * @lock: lock to manage flash read, write and erase operations
+ * @mtd: MTD info for each NOR-flash.
+ * @num_parts: Total number of partition in each bank of NOR-flash.
+ * @parts: Partition info for each bank of NOR-flash.
+ * @page_size: Page size of NOR-flash.
+ * @base_addr: Base address of NOR-flash.
+ * @erase_cmd: erase command may vary on different flash types
+ * @fast_mode: flash supports read in fast mode
+ */
+struct spear_snor_flash {
+ u32 bank;
+ u32 dev_id;
+ struct mutex lock;
+ struct mtd_info mtd;
+ u32 num_parts;
+ struct mtd_partition *parts;
+ u32 page_size;
+ void __iomem *base_addr;
+ u8 erase_cmd;
+ u8 fast_mode;
+};
+
+static inline struct spear_snor_flash *get_flash_data(struct mtd_info *mtd)
+{
+ return container_of(mtd, struct spear_snor_flash, mtd);
+}
+
+/**
+ * spear_smi_read_sr - Read status register of flash through SMI
+ * @dev: structure of SMI information.
+ * @bank: bank to which flash is connected
+ *
+ * This routine will return the status register of the flash chip present at the
+ * given bank.
+ */
+static int spear_smi_read_sr(struct spear_smi *dev, u32 bank)
+{
+ int ret;
+ u32 ctrlreg1;
+
+ mutex_lock(&dev->lock);
+ dev->status = 0; /* Will be set in interrupt handler */
+
+ ctrlreg1 = readl(dev->io_base + SMI_CR1);
+ /* program smi in hw mode */
+ writel(ctrlreg1 & ~(SW_MODE | WB_MODE), dev->io_base + SMI_CR1);
+
+ /* performing a rsr instruction in hw mode */
+ writel((bank << BANK_SHIFT) | RD_STATUS_REG | TFIE,
+ dev->io_base + SMI_CR2);
+
+ /* wait for tff */
+ ret = wait_event_interruptible_timeout(dev->cmd_complete,
+ dev->status & TFF, SMI_CMD_TIMEOUT);
+
+ /* copy dev->status (lower 16 bits) in order to release lock */
+ if (ret > 0)
+ ret = dev->status & 0xffff;
+ else if (ret == 0)
+ ret = -ETIMEDOUT;
+
+ /* restore the ctrl regs state */
+ writel(ctrlreg1, dev->io_base + SMI_CR1);
+ writel(0, dev->io_base + SMI_CR2);
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+/**
+ * spear_smi_wait_till_ready - wait till flash is ready
+ * @dev: structure of SMI information.
+ * @bank: flash corresponding to this bank
+ * @timeout: timeout for busy wait condition
+ *
+ * This routine checks for WIP (write in progress) bit in Status register
+ * If successful the routine returns 0 else -EBUSY
+ */
+static int spear_smi_wait_till_ready(struct spear_smi *dev, u32 bank,
+ unsigned long timeout)
+{
+ unsigned long finish;
+ int status;
+
+ finish = jiffies + timeout;
+ do {
+ status = spear_smi_read_sr(dev, bank);
+ if (status < 0) {
+ if (status == -ETIMEDOUT)
+ continue; /* try till finish */
+ return status;
+ } else if (!(status & SR_WIP)) {
+ return 0;
+ }
+
+ cond_resched();
+ } while (!time_after_eq(jiffies, finish));
+
+ dev_err(&dev->pdev->dev, "smi controller is busy, timeout\n");
+ return -EBUSY;
+}
+
+/**
+ * spear_smi_int_handler - SMI Interrupt Handler.
+ * @irq: irq number
+ * @dev_id: structure of SMI device, embedded in dev_id.
+ *
+ * The handler clears all interrupt conditions and records the status in
+ * dev->status which is used by the driver later.
+ */
+static irqreturn_t spear_smi_int_handler(int irq, void *dev_id)
+{
+ u32 status = 0;
+ struct spear_smi *dev = dev_id;
+
+ status = readl(dev->io_base + SMI_SR);
+
+ if (unlikely(!status))
+ return IRQ_NONE;
+
+ /* clear all interrupt conditions */
+ writel(0, dev->io_base + SMI_SR);
+
+ /* copy the status register in dev->status */
+ dev->status |= status;
+
+ /* send the completion */
+ wake_up_interruptible(&dev->cmd_complete);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * spear_smi_hw_init - initializes the smi controller.
+ * @dev: structure of smi device
+ *
+ * this routine initializes the smi controller wit the default values
+ */
+static void spear_smi_hw_init(struct spear_smi *dev)
+{
+ unsigned long rate = 0;
+ u32 prescale = 0;
+ u32 val;
+
+ rate = clk_get_rate(dev->clk);
+
+ /* functional clock of smi */
+ prescale = DIV_ROUND_UP(rate, dev->clk_rate);
+
+ /*
+ * setting the standard values, fast mode, prescaler for
+ * SMI_MAX_CLOCK_FREQ (50MHz) operation and bank enable
+ */
+ val = HOLD1 | BANK_EN | DSEL_TIME | (prescale << 8);
+
+ mutex_lock(&dev->lock);
+ /* clear all interrupt conditions */
+ writel(0, dev->io_base + SMI_SR);
+
+ writel(val, dev->io_base + SMI_CR1);
+ mutex_unlock(&dev->lock);
+}
+
+/**
+ * get_flash_index - match chip id from a flash list.
+ * @flash_id: a valid nor flash chip id obtained from board.
+ *
+ * try to validate the chip id by matching from a list, if not found then simply
+ * returns negative. In case of success returns index in to the flash devices
+ * array.
+ */
+static int get_flash_index(u32 flash_id)
+{
+ int index;
+
+ /* Matches chip-id to entire list of 'serial-nor flash' ids */
+ for (index = 0; index < ARRAY_SIZE(flash_devices); index++) {
+ if (flash_devices[index].device_id == flash_id)
+ return index;
+ }
+
+ /* Memory chip is not listed and not supported */
+ return -ENODEV;
+}
+
+/**
+ * spear_smi_write_enable - Enable the flash to do write operation
+ * @dev: structure of SMI device
+ * @bank: enable write for flash connected to this bank
+ *
+ * Set write enable latch with Write Enable command.
+ * Returns 0 on success.
+ */
+static int spear_smi_write_enable(struct spear_smi *dev, u32 bank)
+{
+ int ret;
+ u32 ctrlreg1;
+
+ mutex_lock(&dev->lock);
+ dev->status = 0; /* Will be set in interrupt handler */
+
+ ctrlreg1 = readl(dev->io_base + SMI_CR1);
+ /* program smi in h/w mode */
+ writel(ctrlreg1 & ~SW_MODE, dev->io_base + SMI_CR1);
+
+ /* give the flash, write enable command */
+ writel((bank << BANK_SHIFT) | WE | TFIE, dev->io_base + SMI_CR2);
+
+ ret = wait_event_interruptible_timeout(dev->cmd_complete,
+ dev->status & TFF, SMI_CMD_TIMEOUT);
+
+ /* restore the ctrl regs state */
+ writel(ctrlreg1, dev->io_base + SMI_CR1);
+ writel(0, dev->io_base + SMI_CR2);
+
+ if (ret == 0) {
+ ret = -EIO;
+ dev_err(&dev->pdev->dev,
+ "smi controller failed on write enable\n");
+ } else if (ret > 0) {
+ /* check whether write mode status is set for required bank */
+ if (dev->status & (1 << (bank + WM_SHIFT)))
+ ret = 0;
+ else {
+ dev_err(&dev->pdev->dev, "couldn't enable write\n");
+ ret = -EIO;
+ }
+ }
+
+ mutex_unlock(&dev->lock);
+ return ret;
+}
+
+static inline u32
+get_sector_erase_cmd(struct spear_snor_flash *flash, u32 offset)
+{
+ u32 cmd;
+ u8 *x = (u8 *)&cmd;
+
+ x[0] = flash->erase_cmd;
+ x[1] = offset >> 16;
+ x[2] = offset >> 8;
+ x[3] = offset;
+
+ return cmd;
+}
+
+/**
+ * spear_smi_erase_sector - erase one sector of flash
+ * @dev: structure of SMI information
+ * @command: erase command to be send
+ * @bank: bank to which this command needs to be send
+ * @bytes: size of command
+ *
+ * Erase one sector of flash memory at offset ``offset'' which is any
+ * address within the sector which should be erased.
+ * Returns 0 if successful, non-zero otherwise.
+ */
+static int spear_smi_erase_sector(struct spear_smi *dev,
+ u32 bank, u32 command, u32 bytes)
+{
+ u32 ctrlreg1 = 0;
+ int ret;
+
+ ret = spear_smi_wait_till_ready(dev, bank, SMI_MAX_TIME_OUT);
+ if (ret)
+ return ret;
+
+ ret = spear_smi_write_enable(dev, bank);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dev->lock);
+
+ ctrlreg1 = readl(dev->io_base + SMI_CR1);
+ writel((ctrlreg1 | SW_MODE) & ~WB_MODE, dev->io_base + SMI_CR1);
+
+ /* send command in sw mode */
+ writel(command, dev->io_base + SMI_TR);
+
+ writel((bank << BANK_SHIFT) | SEND | TFIE | (bytes << TX_LEN_SHIFT),
+ dev->io_base + SMI_CR2);
+
+ ret = wait_event_interruptible_timeout(dev->cmd_complete,
+ dev->status & TFF, SMI_CMD_TIMEOUT);
+
+ if (ret == 0) {
+ ret = -EIO;
+ dev_err(&dev->pdev->dev, "sector erase failed\n");
+ } else if (ret > 0)
+ ret = 0; /* success */
+
+ /* restore ctrl regs */
+ writel(ctrlreg1, dev->io_base + SMI_CR1);
+ writel(0, dev->io_base + SMI_CR2);
+
+ mutex_unlock(&dev->lock);
+ return ret;
+}
+
+/**
+ * spear_mtd_erase - perform flash erase operation as requested by user
+ * @mtd: Provides the memory characteristics
+ * @e_info: Provides the erase information
+ *
+ * Erase an address range on the flash chip. The address range may extend
+ * one or more erase sectors. Return an error is there is a problem erasing.
+ */
+static int spear_mtd_erase(struct mtd_info *mtd, struct erase_info *e_info)
+{
+ struct spear_snor_flash *flash = get_flash_data(mtd);
+ struct spear_smi *dev = mtd->priv;
+ u32 addr, command, bank;
+ int len, ret;
+
+ if (!flash || !dev)
+ return -ENODEV;
+
+ bank = flash->bank;
+ if (bank > dev->num_flashes - 1) {
+ dev_err(&dev->pdev->dev, "Invalid Bank Num");
+ return -EINVAL;
+ }
+
+ addr = e_info->addr;
+ len = e_info->len;
+
+ mutex_lock(&flash->lock);
+
+ /* now erase sectors in loop */
+ while (len) {
+ command = get_sector_erase_cmd(flash, addr);
+ /* preparing the command for flash */
+ ret = spear_smi_erase_sector(dev, bank, command, 4);
+ if (ret) {
+ mutex_unlock(&flash->lock);
+ return ret;
+ }
+ addr += mtd->erasesize;
+ len -= mtd->erasesize;
+ }
+
+ mutex_unlock(&flash->lock);
+
+ return 0;
+}
+
+/**
+ * spear_mtd_read - performs flash read operation as requested by the user
+ * @mtd: MTD information of the memory bank
+ * @from: Address from which to start read
+ * @len: Number of bytes to be read
+ * @retlen: Fills the Number of bytes actually read
+ * @buf: Fills this after reading
+ *
+ * Read an address range from the flash chip. The address range
+ * may be any size provided it is within the physical boundaries.
+ * Returns 0 on success, non zero otherwise
+ */
+static int spear_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u8 *buf)
+{
+ struct spear_snor_flash *flash = get_flash_data(mtd);
+ struct spear_smi *dev = mtd->priv;
+ void __iomem *src;
+ u32 ctrlreg1, val;
+ int ret;
+
+ if (!flash || !dev)
+ return -ENODEV;
+
+ if (flash->bank > dev->num_flashes - 1) {
+ dev_err(&dev->pdev->dev, "Invalid Bank Num");
+ return -EINVAL;
+ }
+
+ /* select address as per bank number */
+ src = flash->base_addr + from;
+
+ mutex_lock(&flash->lock);
+
+ /* wait till previous write/erase is done. */
+ ret = spear_smi_wait_till_ready(dev, flash->bank, SMI_MAX_TIME_OUT);
+ if (ret) {
+ mutex_unlock(&flash->lock);
+ return ret;
+ }
+
+ mutex_lock(&dev->lock);
+ /* put smi in hw mode not wbt mode */
+ ctrlreg1 = val = readl(dev->io_base + SMI_CR1);
+ val &= ~(SW_MODE | WB_MODE);
+ if (flash->fast_mode)
+ val |= FAST_MODE;
+
+ writel(val, dev->io_base + SMI_CR1);
+
+ memcpy_fromio(buf, src, len);
+
+ /* restore ctrl reg1 */
+ writel(ctrlreg1, dev->io_base + SMI_CR1);
+ mutex_unlock(&dev->lock);
+
+ *retlen = len;
+ mutex_unlock(&flash->lock);
+
+ return 0;
+}
+
+/*
+ * The purpose of this function is to ensure a memcpy_toio() with byte writes
+ * only. Its structure is inspired from the ARM implementation of _memcpy_toio()
+ * which also does single byte writes but cannot be used here as this is just an
+ * implementation detail and not part of the API. Not mentioning the comment
+ * stating that _memcpy_toio() should be optimized.
+ */
+static void spear_smi_memcpy_toio_b(volatile void __iomem *dest,
+ const void *src, size_t len)
+{
+ const unsigned char *from = src;
+
+ while (len) {
+ len--;
+ writeb(*from, dest);
+ from++;
+ dest++;
+ }
+}
+
+static inline int spear_smi_cpy_toio(struct spear_smi *dev, u32 bank,
+ void __iomem *dest, const void *src, size_t len)
+{
+ int ret;
+ u32 ctrlreg1;
+
+ /* wait until finished previous write command. */
+ ret = spear_smi_wait_till_ready(dev, bank, SMI_MAX_TIME_OUT);
+ if (ret)
+ return ret;
+
+ /* put smi in write enable */
+ ret = spear_smi_write_enable(dev, bank);
+ if (ret)
+ return ret;
+
+ /* put smi in hw, write burst mode */
+ mutex_lock(&dev->lock);
+
+ ctrlreg1 = readl(dev->io_base + SMI_CR1);
+ writel((ctrlreg1 | WB_MODE) & ~SW_MODE, dev->io_base + SMI_CR1);
+
+ /*
+ * In Write Burst mode (WB_MODE), the specs states that writes must be:
+ * - incremental
+ * - of the same size
+ * The ARM implementation of memcpy_toio() will optimize the number of
+ * I/O by using as much 4-byte writes as possible, surrounded by
+ * 2-byte/1-byte access if:
+ * - the destination is not 4-byte aligned
+ * - the length is not a multiple of 4-byte.
+ * Avoid this alternance of write access size by using our own 'byte
+ * access' helper if at least one of the two conditions above is true.
+ */
+ if (IS_ALIGNED(len, sizeof(u32)) &&
+ IS_ALIGNED((uintptr_t)dest, sizeof(u32)))
+ memcpy_toio(dest, src, len);
+ else
+ spear_smi_memcpy_toio_b(dest, src, len);
+
+ writel(ctrlreg1, dev->io_base + SMI_CR1);
+
+ mutex_unlock(&dev->lock);
+ return 0;
+}
+
+/**
+ * spear_mtd_write - performs write operation as requested by the user.
+ * @mtd: MTD information of the memory bank.
+ * @to: Address to write.
+ * @len: Number of bytes to be written.
+ * @retlen: Number of bytes actually wrote.
+ * @buf: Buffer from which the data to be taken.
+ *
+ * Write an address range to the flash chip. Data must be written in
+ * flash_page_size chunks. The address range may be any size provided
+ * it is within the physical boundaries.
+ * Returns 0 on success, non zero otherwise
+ */
+static int spear_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u8 *buf)
+{
+ struct spear_snor_flash *flash = get_flash_data(mtd);
+ struct spear_smi *dev = mtd->priv;
+ void __iomem *dest;
+ u32 page_offset, page_size;
+ int ret;
+
+ if (!flash || !dev)
+ return -ENODEV;
+
+ if (flash->bank > dev->num_flashes - 1) {
+ dev_err(&dev->pdev->dev, "Invalid Bank Num");
+ return -EINVAL;
+ }
+
+ /* select address as per bank number */
+ dest = flash->base_addr + to;
+ mutex_lock(&flash->lock);
+
+ page_offset = (u32)to % flash->page_size;
+
+ /* do if all the bytes fit onto one page */
+ if (page_offset + len <= flash->page_size) {
+ ret = spear_smi_cpy_toio(dev, flash->bank, dest, buf, len);
+ if (!ret)
+ *retlen += len;
+ } else {
+ u32 i;
+
+ /* the size of data remaining on the first page */
+ page_size = flash->page_size - page_offset;
+
+ ret = spear_smi_cpy_toio(dev, flash->bank, dest, buf,
+ page_size);
+ if (ret)
+ goto err_write;
+ else
+ *retlen += page_size;
+
+ /* write everything in pagesize chunks */
+ for (i = page_size; i < len; i += page_size) {
+ page_size = len - i;
+ if (page_size > flash->page_size)
+ page_size = flash->page_size;
+
+ ret = spear_smi_cpy_toio(dev, flash->bank, dest + i,
+ buf + i, page_size);
+ if (ret)
+ break;
+ else
+ *retlen += page_size;
+ }
+ }
+
+err_write:
+ mutex_unlock(&flash->lock);
+
+ return ret;
+}
+
+/**
+ * spear_smi_probe_flash - Detects the NOR Flash chip.
+ * @dev: structure of SMI information.
+ * @bank: bank on which flash must be probed
+ *
+ * This routine will check whether there exists a flash chip on a given memory
+ * bank ID.
+ * Return index of the probed flash in flash devices structure
+ */
+static int spear_smi_probe_flash(struct spear_smi *dev, u32 bank)
+{
+ int ret;
+ u32 val = 0;
+
+ ret = spear_smi_wait_till_ready(dev, bank, SMI_PROBE_TIMEOUT);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dev->lock);
+
+ dev->status = 0; /* Will be set in interrupt handler */
+ /* put smi in sw mode */
+ val = readl(dev->io_base + SMI_CR1);
+ writel(val | SW_MODE, dev->io_base + SMI_CR1);
+
+ /* send readid command in sw mode */
+ writel(OPCODE_RDID, dev->io_base + SMI_TR);
+
+ val = (bank << BANK_SHIFT) | SEND | (1 << TX_LEN_SHIFT) |
+ (3 << RX_LEN_SHIFT) | TFIE;
+ writel(val, dev->io_base + SMI_CR2);
+
+ /* wait for TFF */
+ ret = wait_event_interruptible_timeout(dev->cmd_complete,
+ dev->status & TFF, SMI_CMD_TIMEOUT);
+ if (ret <= 0) {
+ ret = -ENODEV;
+ goto err_probe;
+ }
+
+ /* get memory chip id */
+ val = readl(dev->io_base + SMI_RR);
+ val &= 0x00ffffff;
+ ret = get_flash_index(val);
+
+err_probe:
+ /* clear sw mode */
+ val = readl(dev->io_base + SMI_CR1);
+ writel(val & ~SW_MODE, dev->io_base + SMI_CR1);
+
+ mutex_unlock(&dev->lock);
+ return ret;
+}
+
+
+#ifdef CONFIG_OF
+static int spear_smi_probe_config_dt(struct platform_device *pdev,
+ struct device_node *np)
+{
+ struct spear_smi_plat_data *pdata = dev_get_platdata(&pdev->dev);
+ struct device_node *pp;
+ const __be32 *addr;
+ u32 val;
+ int len;
+ int i = 0;
+
+ if (!np)
+ return -ENODEV;
+
+ of_property_read_u32(np, "clock-rate", &val);
+ pdata->clk_rate = val;
+
+ pdata->board_flash_info = devm_kzalloc(&pdev->dev,
+ sizeof(*pdata->board_flash_info),
+ GFP_KERNEL);
+ if (!pdata->board_flash_info)
+ return -ENOMEM;
+
+ /* Fill structs for each subnode (flash device) */
+ for_each_child_of_node(np, pp) {
+ pdata->np[i] = pp;
+
+ /* Read base-addr and size from DT */
+ addr = of_get_property(pp, "reg", &len);
+ pdata->board_flash_info->mem_base = be32_to_cpup(&addr[0]);
+ pdata->board_flash_info->size = be32_to_cpup(&addr[1]);
+
+ pdata->board_flash_info->fast_mode =
+ of_property_read_bool(pp, "st,smi-fast-mode");
+
+ i++;
+ }
+
+ pdata->num_flashes = i;
+
+ return 0;
+}
+#else
+static int spear_smi_probe_config_dt(struct platform_device *pdev,
+ struct device_node *np)
+{
+ return -ENOSYS;
+}
+#endif
+
+static int spear_smi_setup_banks(struct platform_device *pdev,
+ u32 bank, struct device_node *np)
+{
+ struct spear_smi *dev = platform_get_drvdata(pdev);
+ struct spear_smi_flash_info *flash_info;
+ struct spear_smi_plat_data *pdata;
+ struct spear_snor_flash *flash;
+ struct mtd_partition *parts = NULL;
+ int count = 0;
+ int flash_index;
+ int ret = 0;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (bank > pdata->num_flashes - 1)
+ return -EINVAL;
+
+ flash_info = &pdata->board_flash_info[bank];
+ if (!flash_info)
+ return -ENODEV;
+
+ flash = devm_kzalloc(&pdev->dev, sizeof(*flash), GFP_ATOMIC);
+ if (!flash)
+ return -ENOMEM;
+ flash->bank = bank;
+ flash->fast_mode = flash_info->fast_mode ? 1 : 0;
+ mutex_init(&flash->lock);
+
+ /* verify whether nor flash is really present on board */
+ flash_index = spear_smi_probe_flash(dev, bank);
+ if (flash_index < 0) {
+ dev_info(&dev->pdev->dev, "smi-nor%d not found\n", bank);
+ return flash_index;
+ }
+ /* map the memory for nor flash chip */
+ flash->base_addr = devm_ioremap(&pdev->dev, flash_info->mem_base,
+ flash_info->size);
+ if (!flash->base_addr)
+ return -EIO;
+
+ dev->flash[bank] = flash;
+ flash->mtd.priv = dev;
+
+ if (flash_info->name)
+ flash->mtd.name = flash_info->name;
+ else
+ flash->mtd.name = flash_devices[flash_index].name;
+
+ flash->mtd.dev.parent = &pdev->dev;
+ mtd_set_of_node(&flash->mtd, np);
+ flash->mtd.type = MTD_NORFLASH;
+ flash->mtd.writesize = 1;
+ flash->mtd.flags = MTD_CAP_NORFLASH;
+ flash->mtd.size = flash_info->size;
+ flash->mtd.erasesize = flash_devices[flash_index].sectorsize;
+ flash->page_size = flash_devices[flash_index].pagesize;
+ flash->mtd.writebufsize = flash->page_size;
+ flash->erase_cmd = flash_devices[flash_index].erase_cmd;
+ flash->mtd._erase = spear_mtd_erase;
+ flash->mtd._read = spear_mtd_read;
+ flash->mtd._write = spear_mtd_write;
+ flash->dev_id = flash_devices[flash_index].device_id;
+
+ dev_info(&dev->pdev->dev, "mtd .name=%s .size=%llx(%lluM)\n",
+ flash->mtd.name, flash->mtd.size,
+ flash->mtd.size / (1024 * 1024));
+
+ dev_info(&dev->pdev->dev, ".erasesize = 0x%x(%uK)\n",
+ flash->mtd.erasesize, flash->mtd.erasesize / 1024);
+
+#ifndef CONFIG_OF
+ if (flash_info->partitions) {
+ parts = flash_info->partitions;
+ count = flash_info->nr_partitions;
+ }
+#endif
+
+ ret = mtd_device_register(&flash->mtd, parts, count);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "Err MTD partition=%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * spear_smi_probe - Entry routine
+ * @pdev: platform device structure
+ *
+ * This is the first routine which gets invoked during booting and does all
+ * initialization/allocation work. The routine looks for available memory banks,
+ * and do proper init for any found one.
+ * Returns 0 on success, non zero otherwise
+ */
+static int spear_smi_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct spear_smi_plat_data *pdata = NULL;
+ struct spear_smi *dev;
+ int irq, ret = 0;
+ int i;
+
+ if (np) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ pdev->dev.platform_data = pdata;
+ ret = spear_smi_probe_config_dt(pdev, np);
+ if (ret) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "no platform data\n");
+ goto err;
+ }
+ } else {
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "no platform data\n");
+ goto err;
+ }
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ dev->io_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dev->io_base)) {
+ ret = PTR_ERR(dev->io_base);
+ goto err;
+ }
+
+ dev->pdev = pdev;
+ dev->clk_rate = pdata->clk_rate;
+
+ if (dev->clk_rate > SMI_MAX_CLOCK_FREQ)
+ dev->clk_rate = SMI_MAX_CLOCK_FREQ;
+
+ dev->num_flashes = pdata->num_flashes;
+
+ if (dev->num_flashes > MAX_NUM_FLASH_CHIP) {
+ dev_err(&pdev->dev, "exceeding max number of flashes\n");
+ dev->num_flashes = MAX_NUM_FLASH_CHIP;
+ }
+
+ dev->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(dev->clk)) {
+ ret = PTR_ERR(dev->clk);
+ goto err;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, spear_smi_int_handler, 0,
+ pdev->name, dev);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "SMI IRQ allocation failed\n");
+ goto err;
+ }
+
+ mutex_init(&dev->lock);
+ init_waitqueue_head(&dev->cmd_complete);
+ spear_smi_hw_init(dev);
+ platform_set_drvdata(pdev, dev);
+
+ /* loop for each serial nor-flash which is connected to smi */
+ for (i = 0; i < dev->num_flashes; i++) {
+ ret = spear_smi_setup_banks(pdev, i, pdata->np[i]);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "bank setup failed\n");
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
+/**
+ * spear_smi_remove - Exit routine
+ * @pdev: platform device structure
+ *
+ * free all allocations and delete the partitions.
+ */
+static int spear_smi_remove(struct platform_device *pdev)
+{
+ struct spear_smi *dev;
+ struct spear_snor_flash *flash;
+ int i;
+
+ dev = platform_get_drvdata(pdev);
+
+ /* clean up for all nor flash */
+ for (i = 0; i < dev->num_flashes; i++) {
+ flash = dev->flash[i];
+ if (!flash)
+ continue;
+
+ /* clean up mtd stuff */
+ WARN_ON(mtd_device_unregister(&flash->mtd));
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int spear_smi_suspend(struct device *dev)
+{
+ struct spear_smi *sdev = dev_get_drvdata(dev);
+
+ if (sdev && sdev->clk)
+ clk_disable_unprepare(sdev->clk);
+
+ return 0;
+}
+
+static int spear_smi_resume(struct device *dev)
+{
+ struct spear_smi *sdev = dev_get_drvdata(dev);
+ int ret = -EPERM;
+
+ if (sdev && sdev->clk)
+ ret = clk_prepare_enable(sdev->clk);
+
+ if (!ret)
+ spear_smi_hw_init(sdev);
+ return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(spear_smi_pm_ops, spear_smi_suspend, spear_smi_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id spear_smi_id_table[] = {
+ { .compatible = "st,spear600-smi" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, spear_smi_id_table);
+#endif
+
+static struct platform_driver spear_smi_driver = {
+ .driver = {
+ .name = "smi",
+ .bus = &platform_bus_type,
+ .of_match_table = of_match_ptr(spear_smi_id_table),
+ .pm = &spear_smi_pm_ops,
+ },
+ .probe = spear_smi_probe,
+ .remove = spear_smi_remove,
+};
+module_platform_driver(spear_smi_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ashish Priyadarshi, Shiraz Hashim <shiraz.linux.kernel@gmail.com>");
+MODULE_DESCRIPTION("MTD SMI driver for serial nor flash chips");
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
new file mode 100644
index 0000000000..8813994ce9
--- /dev/null
+++ b/drivers/mtd/devices/sst25l.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * sst25l.c
+ *
+ * Driver for SST25L SPI Flash chips
+ *
+ * Copyright © 2009 Bluewater Systems Ltd
+ * Author: Andre Renaud <andre@bluewatersys.com>
+ * Author: Ryan Mallon
+ *
+ * Based on m25p80.c
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+
+/* Erases can take up to 3 seconds! */
+#define MAX_READY_WAIT_JIFFIES msecs_to_jiffies(3000)
+
+#define SST25L_CMD_WRSR 0x01 /* Write status register */
+#define SST25L_CMD_WRDI 0x04 /* Write disable */
+#define SST25L_CMD_RDSR 0x05 /* Read status register */
+#define SST25L_CMD_WREN 0x06 /* Write enable */
+#define SST25L_CMD_READ 0x03 /* High speed read */
+
+#define SST25L_CMD_EWSR 0x50 /* Enable write status register */
+#define SST25L_CMD_SECTOR_ERASE 0x20 /* Erase sector */
+#define SST25L_CMD_READ_ID 0x90 /* Read device ID */
+#define SST25L_CMD_AAI_PROGRAM 0xaf /* Auto address increment */
+
+#define SST25L_STATUS_BUSY (1 << 0) /* Chip is busy */
+#define SST25L_STATUS_WREN (1 << 1) /* Write enabled */
+#define SST25L_STATUS_BP0 (1 << 2) /* Block protection 0 */
+#define SST25L_STATUS_BP1 (1 << 3) /* Block protection 1 */
+
+struct sst25l_flash {
+ struct spi_device *spi;
+ struct mutex lock;
+ struct mtd_info mtd;
+};
+
+struct flash_info {
+ const char *name;
+ uint16_t device_id;
+ unsigned page_size;
+ unsigned nr_pages;
+ unsigned erase_size;
+};
+
+#define to_sst25l_flash(x) container_of(x, struct sst25l_flash, mtd)
+
+static struct flash_info sst25l_flash_info[] = {
+ {"sst25lf020a", 0xbf43, 256, 1024, 4096},
+ {"sst25lf040a", 0xbf44, 256, 2048, 4096},
+};
+
+static int sst25l_status(struct sst25l_flash *flash, int *status)
+{
+ struct spi_message m;
+ struct spi_transfer t;
+ unsigned char cmd_resp[2];
+ int err;
+
+ spi_message_init(&m);
+ memset(&t, 0, sizeof(struct spi_transfer));
+
+ cmd_resp[0] = SST25L_CMD_RDSR;
+ cmd_resp[1] = 0xff;
+ t.tx_buf = cmd_resp;
+ t.rx_buf = cmd_resp;
+ t.len = sizeof(cmd_resp);
+ spi_message_add_tail(&t, &m);
+ err = spi_sync(flash->spi, &m);
+ if (err < 0)
+ return err;
+
+ *status = cmd_resp[1];
+ return 0;
+}
+
+static int sst25l_write_enable(struct sst25l_flash *flash, int enable)
+{
+ unsigned char command[2];
+ int status, err;
+
+ command[0] = enable ? SST25L_CMD_WREN : SST25L_CMD_WRDI;
+ err = spi_write(flash->spi, command, 1);
+ if (err)
+ return err;
+
+ command[0] = SST25L_CMD_EWSR;
+ err = spi_write(flash->spi, command, 1);
+ if (err)
+ return err;
+
+ command[0] = SST25L_CMD_WRSR;
+ command[1] = enable ? 0 : SST25L_STATUS_BP0 | SST25L_STATUS_BP1;
+ err = spi_write(flash->spi, command, 2);
+ if (err)
+ return err;
+
+ if (enable) {
+ err = sst25l_status(flash, &status);
+ if (err)
+ return err;
+ if (!(status & SST25L_STATUS_WREN))
+ return -EROFS;
+ }
+
+ return 0;
+}
+
+static int sst25l_wait_till_ready(struct sst25l_flash *flash)
+{
+ unsigned long deadline;
+ int status, err;
+
+ deadline = jiffies + MAX_READY_WAIT_JIFFIES;
+ do {
+ err = sst25l_status(flash, &status);
+ if (err)
+ return err;
+ if (!(status & SST25L_STATUS_BUSY))
+ return 0;
+
+ cond_resched();
+ } while (!time_after_eq(jiffies, deadline));
+
+ return -ETIMEDOUT;
+}
+
+static int sst25l_erase_sector(struct sst25l_flash *flash, uint32_t offset)
+{
+ unsigned char command[4];
+ int err;
+
+ err = sst25l_write_enable(flash, 1);
+ if (err)
+ return err;
+
+ command[0] = SST25L_CMD_SECTOR_ERASE;
+ command[1] = offset >> 16;
+ command[2] = offset >> 8;
+ command[3] = offset;
+ err = spi_write(flash->spi, command, 4);
+ if (err)
+ return err;
+
+ err = sst25l_wait_till_ready(flash);
+ if (err)
+ return err;
+
+ return sst25l_write_enable(flash, 0);
+}
+
+static int sst25l_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct sst25l_flash *flash = to_sst25l_flash(mtd);
+ uint32_t addr, end;
+ int err;
+
+ /* Sanity checks */
+ if ((uint32_t)instr->len % mtd->erasesize)
+ return -EINVAL;
+
+ if ((uint32_t)instr->addr % mtd->erasesize)
+ return -EINVAL;
+
+ addr = instr->addr;
+ end = addr + instr->len;
+
+ mutex_lock(&flash->lock);
+
+ err = sst25l_wait_till_ready(flash);
+ if (err) {
+ mutex_unlock(&flash->lock);
+ return err;
+ }
+
+ while (addr < end) {
+ err = sst25l_erase_sector(flash, addr);
+ if (err) {
+ mutex_unlock(&flash->lock);
+ dev_err(&flash->spi->dev, "Erase failed\n");
+ return err;
+ }
+
+ addr += mtd->erasesize;
+ }
+
+ mutex_unlock(&flash->lock);
+
+ return 0;
+}
+
+static int sst25l_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, unsigned char *buf)
+{
+ struct sst25l_flash *flash = to_sst25l_flash(mtd);
+ struct spi_transfer transfer[2];
+ struct spi_message message;
+ unsigned char command[4];
+ int ret;
+
+ spi_message_init(&message);
+ memset(&transfer, 0, sizeof(transfer));
+
+ command[0] = SST25L_CMD_READ;
+ command[1] = from >> 16;
+ command[2] = from >> 8;
+ command[3] = from;
+
+ transfer[0].tx_buf = command;
+ transfer[0].len = sizeof(command);
+ spi_message_add_tail(&transfer[0], &message);
+
+ transfer[1].rx_buf = buf;
+ transfer[1].len = len;
+ spi_message_add_tail(&transfer[1], &message);
+
+ mutex_lock(&flash->lock);
+
+ /* Wait for previous write/erase to complete */
+ ret = sst25l_wait_till_ready(flash);
+ if (ret) {
+ mutex_unlock(&flash->lock);
+ return ret;
+ }
+
+ spi_sync(flash->spi, &message);
+
+ if (retlen && message.actual_length > sizeof(command))
+ *retlen += message.actual_length - sizeof(command);
+
+ mutex_unlock(&flash->lock);
+ return 0;
+}
+
+static int sst25l_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const unsigned char *buf)
+{
+ struct sst25l_flash *flash = to_sst25l_flash(mtd);
+ int i, j, ret, bytes, copied = 0;
+ unsigned char command[5];
+
+ if ((uint32_t)to % mtd->writesize)
+ return -EINVAL;
+
+ mutex_lock(&flash->lock);
+
+ ret = sst25l_write_enable(flash, 1);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < len; i += mtd->writesize) {
+ ret = sst25l_wait_till_ready(flash);
+ if (ret)
+ goto out;
+
+ /* Write the first byte of the page */
+ command[0] = SST25L_CMD_AAI_PROGRAM;
+ command[1] = (to + i) >> 16;
+ command[2] = (to + i) >> 8;
+ command[3] = (to + i);
+ command[4] = buf[i];
+ ret = spi_write(flash->spi, command, 5);
+ if (ret < 0)
+ goto out;
+ copied++;
+
+ /*
+ * Write the remaining bytes using auto address
+ * increment mode
+ */
+ bytes = min_t(uint32_t, mtd->writesize, len - i);
+ for (j = 1; j < bytes; j++, copied++) {
+ ret = sst25l_wait_till_ready(flash);
+ if (ret)
+ goto out;
+
+ command[1] = buf[i + j];
+ ret = spi_write(flash->spi, command, 2);
+ if (ret)
+ goto out;
+ }
+ }
+
+out:
+ ret = sst25l_write_enable(flash, 0);
+
+ if (retlen)
+ *retlen = copied;
+
+ mutex_unlock(&flash->lock);
+ return ret;
+}
+
+static struct flash_info *sst25l_match_device(struct spi_device *spi)
+{
+ struct flash_info *flash_info = NULL;
+ struct spi_message m;
+ struct spi_transfer t;
+ unsigned char cmd_resp[6];
+ int i, err;
+ uint16_t id;
+
+ spi_message_init(&m);
+ memset(&t, 0, sizeof(struct spi_transfer));
+
+ cmd_resp[0] = SST25L_CMD_READ_ID;
+ cmd_resp[1] = 0;
+ cmd_resp[2] = 0;
+ cmd_resp[3] = 0;
+ cmd_resp[4] = 0xff;
+ cmd_resp[5] = 0xff;
+ t.tx_buf = cmd_resp;
+ t.rx_buf = cmd_resp;
+ t.len = sizeof(cmd_resp);
+ spi_message_add_tail(&t, &m);
+ err = spi_sync(spi, &m);
+ if (err < 0) {
+ dev_err(&spi->dev, "error reading device id\n");
+ return NULL;
+ }
+
+ id = (cmd_resp[4] << 8) | cmd_resp[5];
+
+ for (i = 0; i < ARRAY_SIZE(sst25l_flash_info); i++)
+ if (sst25l_flash_info[i].device_id == id)
+ flash_info = &sst25l_flash_info[i];
+
+ if (!flash_info)
+ dev_err(&spi->dev, "unknown id %.4x\n", id);
+
+ return flash_info;
+}
+
+static int sst25l_probe(struct spi_device *spi)
+{
+ struct flash_info *flash_info;
+ struct sst25l_flash *flash;
+ struct flash_platform_data *data;
+ int ret;
+
+ flash_info = sst25l_match_device(spi);
+ if (!flash_info)
+ return -ENODEV;
+
+ flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL);
+ if (!flash)
+ return -ENOMEM;
+
+ flash->spi = spi;
+ mutex_init(&flash->lock);
+ spi_set_drvdata(spi, flash);
+
+ data = dev_get_platdata(&spi->dev);
+ if (data && data->name)
+ flash->mtd.name = data->name;
+
+ flash->mtd.dev.parent = &spi->dev;
+ flash->mtd.type = MTD_NORFLASH;
+ flash->mtd.flags = MTD_CAP_NORFLASH;
+ flash->mtd.erasesize = flash_info->erase_size;
+ flash->mtd.writesize = flash_info->page_size;
+ flash->mtd.writebufsize = flash_info->page_size;
+ flash->mtd.size = flash_info->page_size * flash_info->nr_pages;
+ flash->mtd._erase = sst25l_erase;
+ flash->mtd._read = sst25l_read;
+ flash->mtd._write = sst25l_write;
+
+ dev_info(&spi->dev, "%s (%lld KiB)\n", flash_info->name,
+ (long long)flash->mtd.size >> 10);
+
+ pr_debug("mtd .name = %s, .size = 0x%llx (%lldMiB) "
+ ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
+ flash->mtd.name,
+ (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20),
+ flash->mtd.erasesize, flash->mtd.erasesize / 1024,
+ flash->mtd.numeraseregions);
+
+
+ ret = mtd_device_register(&flash->mtd, data ? data->parts : NULL,
+ data ? data->nr_parts : 0);
+ if (ret)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void sst25l_remove(struct spi_device *spi)
+{
+ struct sst25l_flash *flash = spi_get_drvdata(spi);
+
+ WARN_ON(mtd_device_unregister(&flash->mtd));
+}
+
+static struct spi_driver sst25l_driver = {
+ .driver = {
+ .name = "sst25l",
+ },
+ .probe = sst25l_probe,
+ .remove = sst25l_remove,
+};
+
+module_spi_driver(sst25l_driver);
+
+MODULE_DESCRIPTION("MTD SPI driver for SST25L Flash chips");
+MODULE_AUTHOR("Andre Renaud <andre@bluewatersys.com>, "
+ "Ryan Mallon");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
new file mode 100644
index 0000000000..95530cbbb1
--- /dev/null
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -0,0 +1,2148 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * st_spi_fsm.c - ST Fast Sequence Mode (FSM) Serial Flash Controller
+ *
+ * Author: Angus Clark <angus.clark@st.com>
+ *
+ * Copyright (C) 2010-2014 STMicroelectronics Limited
+ *
+ * JEDEC probe based on drivers/mtd/devices/m25p80.c
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+
+#include "serial_flash_cmds.h"
+
+/*
+ * FSM SPI Controller Registers
+ */
+#define SPI_CLOCKDIV 0x0010
+#define SPI_MODESELECT 0x0018
+#define SPI_CONFIGDATA 0x0020
+#define SPI_STA_MODE_CHANGE 0x0028
+#define SPI_FAST_SEQ_TRANSFER_SIZE 0x0100
+#define SPI_FAST_SEQ_ADD1 0x0104
+#define SPI_FAST_SEQ_ADD2 0x0108
+#define SPI_FAST_SEQ_ADD_CFG 0x010c
+#define SPI_FAST_SEQ_OPC1 0x0110
+#define SPI_FAST_SEQ_OPC2 0x0114
+#define SPI_FAST_SEQ_OPC3 0x0118
+#define SPI_FAST_SEQ_OPC4 0x011c
+#define SPI_FAST_SEQ_OPC5 0x0120
+#define SPI_MODE_BITS 0x0124
+#define SPI_DUMMY_BITS 0x0128
+#define SPI_FAST_SEQ_FLASH_STA_DATA 0x012c
+#define SPI_FAST_SEQ_1 0x0130
+#define SPI_FAST_SEQ_2 0x0134
+#define SPI_FAST_SEQ_3 0x0138
+#define SPI_FAST_SEQ_4 0x013c
+#define SPI_FAST_SEQ_CFG 0x0140
+#define SPI_FAST_SEQ_STA 0x0144
+#define SPI_QUAD_BOOT_SEQ_INIT_1 0x0148
+#define SPI_QUAD_BOOT_SEQ_INIT_2 0x014c
+#define SPI_QUAD_BOOT_READ_SEQ_1 0x0150
+#define SPI_QUAD_BOOT_READ_SEQ_2 0x0154
+#define SPI_PROGRAM_ERASE_TIME 0x0158
+#define SPI_MULT_PAGE_REPEAT_SEQ_1 0x015c
+#define SPI_MULT_PAGE_REPEAT_SEQ_2 0x0160
+#define SPI_STATUS_WR_TIME_REG 0x0164
+#define SPI_FAST_SEQ_DATA_REG 0x0300
+
+/*
+ * Register: SPI_MODESELECT
+ */
+#define SPI_MODESELECT_CONTIG 0x01
+#define SPI_MODESELECT_FASTREAD 0x02
+#define SPI_MODESELECT_DUALIO 0x04
+#define SPI_MODESELECT_FSM 0x08
+#define SPI_MODESELECT_QUADBOOT 0x10
+
+/*
+ * Register: SPI_CONFIGDATA
+ */
+#define SPI_CFG_DEVICE_ST 0x1
+#define SPI_CFG_DEVICE_ATMEL 0x4
+#define SPI_CFG_MIN_CS_HIGH(x) (((x) & 0xfff) << 4)
+#define SPI_CFG_CS_SETUPHOLD(x) (((x) & 0xff) << 16)
+#define SPI_CFG_DATA_HOLD(x) (((x) & 0xff) << 24)
+
+#define SPI_CFG_DEFAULT_MIN_CS_HIGH SPI_CFG_MIN_CS_HIGH(0x0AA)
+#define SPI_CFG_DEFAULT_CS_SETUPHOLD SPI_CFG_CS_SETUPHOLD(0xA0)
+#define SPI_CFG_DEFAULT_DATA_HOLD SPI_CFG_DATA_HOLD(0x00)
+
+/*
+ * Register: SPI_FAST_SEQ_TRANSFER_SIZE
+ */
+#define TRANSFER_SIZE(x) ((x) * 8)
+
+/*
+ * Register: SPI_FAST_SEQ_ADD_CFG
+ */
+#define ADR_CFG_CYCLES_ADD1(x) ((x) << 0)
+#define ADR_CFG_PADS_1_ADD1 (0x0 << 6)
+#define ADR_CFG_PADS_2_ADD1 (0x1 << 6)
+#define ADR_CFG_PADS_4_ADD1 (0x3 << 6)
+#define ADR_CFG_CSDEASSERT_ADD1 (1 << 8)
+#define ADR_CFG_CYCLES_ADD2(x) ((x) << (0+16))
+#define ADR_CFG_PADS_1_ADD2 (0x0 << (6+16))
+#define ADR_CFG_PADS_2_ADD2 (0x1 << (6+16))
+#define ADR_CFG_PADS_4_ADD2 (0x3 << (6+16))
+#define ADR_CFG_CSDEASSERT_ADD2 (1 << (8+16))
+
+/*
+ * Register: SPI_FAST_SEQ_n
+ */
+#define SEQ_OPC_OPCODE(x) ((x) << 0)
+#define SEQ_OPC_CYCLES(x) ((x) << 8)
+#define SEQ_OPC_PADS_1 (0x0 << 14)
+#define SEQ_OPC_PADS_2 (0x1 << 14)
+#define SEQ_OPC_PADS_4 (0x3 << 14)
+#define SEQ_OPC_CSDEASSERT (1 << 16)
+
+/*
+ * Register: SPI_FAST_SEQ_CFG
+ */
+#define SEQ_CFG_STARTSEQ (1 << 0)
+#define SEQ_CFG_SWRESET (1 << 5)
+#define SEQ_CFG_CSDEASSERT (1 << 6)
+#define SEQ_CFG_READNOTWRITE (1 << 7)
+#define SEQ_CFG_ERASE (1 << 8)
+#define SEQ_CFG_PADS_1 (0x0 << 16)
+#define SEQ_CFG_PADS_2 (0x1 << 16)
+#define SEQ_CFG_PADS_4 (0x3 << 16)
+
+/*
+ * Register: SPI_MODE_BITS
+ */
+#define MODE_DATA(x) (x & 0xff)
+#define MODE_CYCLES(x) ((x & 0x3f) << 16)
+#define MODE_PADS_1 (0x0 << 22)
+#define MODE_PADS_2 (0x1 << 22)
+#define MODE_PADS_4 (0x3 << 22)
+#define DUMMY_CSDEASSERT (1 << 24)
+
+/*
+ * Register: SPI_DUMMY_BITS
+ */
+#define DUMMY_CYCLES(x) ((x & 0x3f) << 16)
+#define DUMMY_PADS_1 (0x0 << 22)
+#define DUMMY_PADS_2 (0x1 << 22)
+#define DUMMY_PADS_4 (0x3 << 22)
+#define DUMMY_CSDEASSERT (1 << 24)
+
+/*
+ * Register: SPI_FAST_SEQ_FLASH_STA_DATA
+ */
+#define STA_DATA_BYTE1(x) ((x & 0xff) << 0)
+#define STA_DATA_BYTE2(x) ((x & 0xff) << 8)
+#define STA_PADS_1 (0x0 << 16)
+#define STA_PADS_2 (0x1 << 16)
+#define STA_PADS_4 (0x3 << 16)
+#define STA_CSDEASSERT (0x1 << 20)
+#define STA_RDNOTWR (0x1 << 21)
+
+/*
+ * FSM SPI Instruction Opcodes
+ */
+#define STFSM_OPC_CMD 0x1
+#define STFSM_OPC_ADD 0x2
+#define STFSM_OPC_STA 0x3
+#define STFSM_OPC_MODE 0x4
+#define STFSM_OPC_DUMMY 0x5
+#define STFSM_OPC_DATA 0x6
+#define STFSM_OPC_WAIT 0x7
+#define STFSM_OPC_JUMP 0x8
+#define STFSM_OPC_GOTO 0x9
+#define STFSM_OPC_STOP 0xF
+
+/*
+ * FSM SPI Instructions (== opcode + operand).
+ */
+#define STFSM_INSTR(cmd, op) ((cmd) | ((op) << 4))
+
+#define STFSM_INST_CMD1 STFSM_INSTR(STFSM_OPC_CMD, 1)
+#define STFSM_INST_CMD2 STFSM_INSTR(STFSM_OPC_CMD, 2)
+#define STFSM_INST_CMD3 STFSM_INSTR(STFSM_OPC_CMD, 3)
+#define STFSM_INST_CMD4 STFSM_INSTR(STFSM_OPC_CMD, 4)
+#define STFSM_INST_CMD5 STFSM_INSTR(STFSM_OPC_CMD, 5)
+#define STFSM_INST_ADD1 STFSM_INSTR(STFSM_OPC_ADD, 1)
+#define STFSM_INST_ADD2 STFSM_INSTR(STFSM_OPC_ADD, 2)
+
+#define STFSM_INST_DATA_WRITE STFSM_INSTR(STFSM_OPC_DATA, 1)
+#define STFSM_INST_DATA_READ STFSM_INSTR(STFSM_OPC_DATA, 2)
+
+#define STFSM_INST_STA_RD1 STFSM_INSTR(STFSM_OPC_STA, 0x1)
+#define STFSM_INST_STA_WR1 STFSM_INSTR(STFSM_OPC_STA, 0x1)
+#define STFSM_INST_STA_RD2 STFSM_INSTR(STFSM_OPC_STA, 0x2)
+#define STFSM_INST_STA_WR1_2 STFSM_INSTR(STFSM_OPC_STA, 0x3)
+
+#define STFSM_INST_MODE STFSM_INSTR(STFSM_OPC_MODE, 0)
+#define STFSM_INST_DUMMY STFSM_INSTR(STFSM_OPC_DUMMY, 0)
+#define STFSM_INST_WAIT STFSM_INSTR(STFSM_OPC_WAIT, 0)
+#define STFSM_INST_STOP STFSM_INSTR(STFSM_OPC_STOP, 0)
+
+#define STFSM_DEFAULT_EMI_FREQ 100000000UL /* 100 MHz */
+#define STFSM_DEFAULT_WR_TIME (STFSM_DEFAULT_EMI_FREQ * (15/1000)) /* 15ms */
+
+#define STFSM_FLASH_SAFE_FREQ 10000000UL /* 10 MHz */
+
+#define STFSM_MAX_WAIT_SEQ_MS 1000 /* FSM execution time */
+
+/* S25FLxxxS commands */
+#define S25FL_CMD_WRITE4_1_1_4 0x34
+#define S25FL_CMD_SE4 0xdc
+#define S25FL_CMD_CLSR 0x30
+#define S25FL_CMD_DYBWR 0xe1
+#define S25FL_CMD_DYBRD 0xe0
+#define S25FL_CMD_WRITE4 0x12 /* Note, opcode clashes with
+ * 'SPINOR_OP_WRITE_1_4_4'
+ * as found on N25Qxxx devices! */
+
+/* Status register */
+#define FLASH_STATUS_BUSY 0x01
+#define FLASH_STATUS_WEL 0x02
+#define FLASH_STATUS_BP0 0x04
+#define FLASH_STATUS_BP1 0x08
+#define FLASH_STATUS_BP2 0x10
+#define FLASH_STATUS_SRWP0 0x80
+#define FLASH_STATUS_TIMEOUT 0xff
+/* S25FL Error Flags */
+#define S25FL_STATUS_E_ERR 0x20
+#define S25FL_STATUS_P_ERR 0x40
+
+#define N25Q_CMD_WRVCR 0x81
+#define N25Q_CMD_RDVCR 0x85
+#define N25Q_CMD_RDVECR 0x65
+#define N25Q_CMD_RDNVCR 0xb5
+#define N25Q_CMD_WRNVCR 0xb1
+
+#define FLASH_PAGESIZE 256 /* In Bytes */
+#define FLASH_PAGESIZE_32 (FLASH_PAGESIZE / 4) /* In uint32_t */
+#define FLASH_MAX_BUSY_WAIT (300 * HZ) /* Maximum 'CHIPERASE' time */
+
+/*
+ * Flags to tweak operation of default read/write/erase routines
+ */
+#define CFG_READ_TOGGLE_32BIT_ADDR 0x00000001
+#define CFG_WRITE_TOGGLE_32BIT_ADDR 0x00000002
+#define CFG_ERASESEC_TOGGLE_32BIT_ADDR 0x00000008
+#define CFG_S25FL_CHECK_ERROR_FLAGS 0x00000010
+
+struct stfsm_seq {
+ uint32_t data_size;
+ uint32_t addr1;
+ uint32_t addr2;
+ uint32_t addr_cfg;
+ uint32_t seq_opc[5];
+ uint32_t mode;
+ uint32_t dummy;
+ uint32_t status;
+ uint8_t seq[16];
+ uint32_t seq_cfg;
+} __packed __aligned(4);
+
+struct stfsm {
+ struct device *dev;
+ void __iomem *base;
+ struct mtd_info mtd;
+ struct mutex lock;
+ struct flash_info *info;
+ struct clk *clk;
+
+ uint32_t configuration;
+ uint32_t fifo_dir_delay;
+ bool booted_from_spi;
+ bool reset_signal;
+ bool reset_por;
+
+ struct stfsm_seq stfsm_seq_read;
+ struct stfsm_seq stfsm_seq_write;
+ struct stfsm_seq stfsm_seq_en_32bit_addr;
+};
+
+/* Parameters to configure a READ or WRITE FSM sequence */
+struct seq_rw_config {
+ uint32_t flags; /* flags to support config */
+ uint8_t cmd; /* FLASH command */
+ int write; /* Write Sequence */
+ uint8_t addr_pads; /* No. of addr pads (MODE & DUMMY) */
+ uint8_t data_pads; /* No. of data pads */
+ uint8_t mode_data; /* MODE data */
+ uint8_t mode_cycles; /* No. of MODE cycles */
+ uint8_t dummy_cycles; /* No. of DUMMY cycles */
+};
+
+/* SPI Flash Device Table */
+struct flash_info {
+ char *name;
+ /*
+ * JEDEC id zero means "no ID" (most older chips); otherwise it has
+ * a high byte of zero plus three data bytes: the manufacturer id,
+ * then a two byte device id.
+ */
+ u32 jedec_id;
+ u16 ext_id;
+ /*
+ * The size listed here is what works with SPINOR_OP_SE, which isn't
+ * necessarily called a "sector" by the vendor.
+ */
+ unsigned sector_size;
+ u16 n_sectors;
+ u32 flags;
+ /*
+ * Note, where FAST_READ is supported, freq_max specifies the
+ * FAST_READ frequency, not the READ frequency.
+ */
+ u32 max_freq;
+ int (*config)(struct stfsm *);
+};
+
+static int stfsm_n25q_config(struct stfsm *fsm);
+static int stfsm_mx25_config(struct stfsm *fsm);
+static int stfsm_s25fl_config(struct stfsm *fsm);
+static int stfsm_w25q_config(struct stfsm *fsm);
+
+static struct flash_info flash_types[] = {
+ /*
+ * ST Microelectronics/Numonyx --
+ * (newer production versions may have feature updates
+ * (eg faster operating frequency)
+ */
+#define M25P_FLAG (FLASH_FLAG_READ_WRITE | FLASH_FLAG_READ_FAST)
+ { "m25p40", 0x202013, 0, 64 * 1024, 8, M25P_FLAG, 25, NULL },
+ { "m25p80", 0x202014, 0, 64 * 1024, 16, M25P_FLAG, 25, NULL },
+ { "m25p16", 0x202015, 0, 64 * 1024, 32, M25P_FLAG, 25, NULL },
+ { "m25p32", 0x202016, 0, 64 * 1024, 64, M25P_FLAG, 50, NULL },
+ { "m25p64", 0x202017, 0, 64 * 1024, 128, M25P_FLAG, 50, NULL },
+ { "m25p128", 0x202018, 0, 256 * 1024, 64, M25P_FLAG, 50, NULL },
+
+#define M25PX_FLAG (FLASH_FLAG_READ_WRITE | \
+ FLASH_FLAG_READ_FAST | \
+ FLASH_FLAG_READ_1_1_2 | \
+ FLASH_FLAG_WRITE_1_1_2)
+ { "m25px32", 0x207116, 0, 64 * 1024, 64, M25PX_FLAG, 75, NULL },
+ { "m25px64", 0x207117, 0, 64 * 1024, 128, M25PX_FLAG, 75, NULL },
+
+ /* Macronix MX25xxx
+ * - Support for 'FLASH_FLAG_WRITE_1_4_4' is omitted for devices
+ * where operating frequency must be reduced.
+ */
+#define MX25_FLAG (FLASH_FLAG_READ_WRITE | \
+ FLASH_FLAG_READ_FAST | \
+ FLASH_FLAG_READ_1_1_2 | \
+ FLASH_FLAG_READ_1_2_2 | \
+ FLASH_FLAG_READ_1_1_4 | \
+ FLASH_FLAG_SE_4K | \
+ FLASH_FLAG_SE_32K)
+ { "mx25l3255e", 0xc29e16, 0, 64 * 1024, 64,
+ (MX25_FLAG | FLASH_FLAG_WRITE_1_4_4), 86,
+ stfsm_mx25_config},
+ { "mx25l25635e", 0xc22019, 0, 64*1024, 512,
+ (MX25_FLAG | FLASH_FLAG_32BIT_ADDR | FLASH_FLAG_RESET), 70,
+ stfsm_mx25_config },
+ { "mx25l25655e", 0xc22619, 0, 64*1024, 512,
+ (MX25_FLAG | FLASH_FLAG_32BIT_ADDR | FLASH_FLAG_RESET), 70,
+ stfsm_mx25_config},
+
+#define N25Q_FLAG (FLASH_FLAG_READ_WRITE | \
+ FLASH_FLAG_READ_FAST | \
+ FLASH_FLAG_READ_1_1_2 | \
+ FLASH_FLAG_READ_1_2_2 | \
+ FLASH_FLAG_READ_1_1_4 | \
+ FLASH_FLAG_READ_1_4_4 | \
+ FLASH_FLAG_WRITE_1_1_2 | \
+ FLASH_FLAG_WRITE_1_2_2 | \
+ FLASH_FLAG_WRITE_1_1_4 | \
+ FLASH_FLAG_WRITE_1_4_4)
+ { "n25q128", 0x20ba18, 0, 64 * 1024, 256, N25Q_FLAG, 108,
+ stfsm_n25q_config },
+ { "n25q256", 0x20ba19, 0, 64 * 1024, 512,
+ N25Q_FLAG | FLASH_FLAG_32BIT_ADDR, 108, stfsm_n25q_config },
+
+ /*
+ * Spansion S25FLxxxP
+ * - 256KiB and 64KiB sector variants (identified by ext. JEDEC)
+ */
+#define S25FLXXXP_FLAG (FLASH_FLAG_READ_WRITE | \
+ FLASH_FLAG_READ_1_1_2 | \
+ FLASH_FLAG_READ_1_2_2 | \
+ FLASH_FLAG_READ_1_1_4 | \
+ FLASH_FLAG_READ_1_4_4 | \
+ FLASH_FLAG_WRITE_1_1_4 | \
+ FLASH_FLAG_READ_FAST)
+ { "s25fl032p", 0x010215, 0x4d00, 64 * 1024, 64, S25FLXXXP_FLAG, 80,
+ stfsm_s25fl_config},
+ { "s25fl129p0", 0x012018, 0x4d00, 256 * 1024, 64, S25FLXXXP_FLAG, 80,
+ stfsm_s25fl_config },
+ { "s25fl129p1", 0x012018, 0x4d01, 64 * 1024, 256, S25FLXXXP_FLAG, 80,
+ stfsm_s25fl_config },
+
+ /*
+ * Spansion S25FLxxxS
+ * - 256KiB and 64KiB sector variants (identified by ext. JEDEC)
+ * - RESET# signal supported by die but not bristled out on all
+ * package types. The package type is a function of board design,
+ * so this information is captured in the board's flags.
+ * - Supports 'DYB' sector protection. Depending on variant, sectors
+ * may default to locked state on power-on.
+ */
+#define S25FLXXXS_FLAG (S25FLXXXP_FLAG | \
+ FLASH_FLAG_RESET | \
+ FLASH_FLAG_DYB_LOCKING)
+ { "s25fl128s0", 0x012018, 0x0300, 256 * 1024, 64, S25FLXXXS_FLAG, 80,
+ stfsm_s25fl_config },
+ { "s25fl128s1", 0x012018, 0x0301, 64 * 1024, 256, S25FLXXXS_FLAG, 80,
+ stfsm_s25fl_config },
+ { "s25fl256s0", 0x010219, 0x4d00, 256 * 1024, 128,
+ S25FLXXXS_FLAG | FLASH_FLAG_32BIT_ADDR, 80, stfsm_s25fl_config },
+ { "s25fl256s1", 0x010219, 0x4d01, 64 * 1024, 512,
+ S25FLXXXS_FLAG | FLASH_FLAG_32BIT_ADDR, 80, stfsm_s25fl_config },
+
+ /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
+#define W25X_FLAG (FLASH_FLAG_READ_WRITE | \
+ FLASH_FLAG_READ_FAST | \
+ FLASH_FLAG_READ_1_1_2 | \
+ FLASH_FLAG_WRITE_1_1_2)
+ { "w25x40", 0xef3013, 0, 64 * 1024, 8, W25X_FLAG, 75, NULL },
+ { "w25x80", 0xef3014, 0, 64 * 1024, 16, W25X_FLAG, 75, NULL },
+ { "w25x16", 0xef3015, 0, 64 * 1024, 32, W25X_FLAG, 75, NULL },
+ { "w25x32", 0xef3016, 0, 64 * 1024, 64, W25X_FLAG, 75, NULL },
+ { "w25x64", 0xef3017, 0, 64 * 1024, 128, W25X_FLAG, 75, NULL },
+
+ /* Winbond -- w25q "blocks" are 64K, "sectors" are 4KiB */
+#define W25Q_FLAG (FLASH_FLAG_READ_WRITE | \
+ FLASH_FLAG_READ_FAST | \
+ FLASH_FLAG_READ_1_1_2 | \
+ FLASH_FLAG_READ_1_2_2 | \
+ FLASH_FLAG_READ_1_1_4 | \
+ FLASH_FLAG_READ_1_4_4 | \
+ FLASH_FLAG_WRITE_1_1_4)
+ { "w25q80", 0xef4014, 0, 64 * 1024, 16, W25Q_FLAG, 80,
+ stfsm_w25q_config },
+ { "w25q16", 0xef4015, 0, 64 * 1024, 32, W25Q_FLAG, 80,
+ stfsm_w25q_config },
+ { "w25q32", 0xef4016, 0, 64 * 1024, 64, W25Q_FLAG, 80,
+ stfsm_w25q_config },
+ { "w25q64", 0xef4017, 0, 64 * 1024, 128, W25Q_FLAG, 80,
+ stfsm_w25q_config },
+
+ /* Sentinel */
+ { NULL, 0x000000, 0, 0, 0, 0, 0, NULL },
+};
+
+/*
+ * FSM message sequence configurations:
+ *
+ * All configs are presented in order of preference
+ */
+
+/* Default READ configurations, in order of preference */
+static struct seq_rw_config default_read_configs[] = {
+ {FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ_1_4_4, 0, 4, 4, 0x00, 2, 4},
+ {FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ_1_1_4, 0, 1, 4, 0x00, 4, 0},
+ {FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ_1_2_2, 0, 2, 2, 0x00, 4, 0},
+ {FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ_1_1_2, 0, 1, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_FAST, SPINOR_OP_READ_FAST, 0, 1, 1, 0x00, 0, 8},
+ {FLASH_FLAG_READ_WRITE, SPINOR_OP_READ, 0, 1, 1, 0x00, 0, 0},
+ {0x00, 0, 0, 0, 0, 0x00, 0, 0},
+};
+
+/* Default WRITE configurations */
+static struct seq_rw_config default_write_configs[] = {
+ {FLASH_FLAG_WRITE_1_4_4, SPINOR_OP_WRITE_1_4_4, 1, 4, 4, 0x00, 0, 0},
+ {FLASH_FLAG_WRITE_1_1_4, SPINOR_OP_WRITE_1_1_4, 1, 1, 4, 0x00, 0, 0},
+ {FLASH_FLAG_WRITE_1_2_2, SPINOR_OP_WRITE_1_2_2, 1, 2, 2, 0x00, 0, 0},
+ {FLASH_FLAG_WRITE_1_1_2, SPINOR_OP_WRITE_1_1_2, 1, 1, 2, 0x00, 0, 0},
+ {FLASH_FLAG_READ_WRITE, SPINOR_OP_WRITE, 1, 1, 1, 0x00, 0, 0},
+ {0x00, 0, 0, 0, 0, 0x00, 0, 0},
+};
+
+/*
+ * [N25Qxxx] Configuration
+ */
+#define N25Q_VCR_DUMMY_CYCLES(x) (((x) & 0xf) << 4)
+#define N25Q_VCR_XIP_DISABLED ((uint8_t)0x1 << 3)
+#define N25Q_VCR_WRAP_CONT 0x3
+
+/* N25Q 3-byte Address READ configurations
+ * - 'FAST' variants configured for 8 dummy cycles.
+ *
+ * Note, the number of dummy cycles used for 'FAST' READ operations is
+ * configurable and would normally be tuned according to the READ command and
+ * operating frequency. However, this applies universally to all 'FAST' READ
+ * commands, including those used by the SPIBoot controller, and remains in
+ * force until the device is power-cycled. Since the SPIBoot controller is
+ * hard-wired to use 8 dummy cycles, we must configure the device to also use 8
+ * cycles.
+ */
+static struct seq_rw_config n25q_read3_configs[] = {
+ {FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ_1_4_4, 0, 4, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ_1_1_4, 0, 1, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ_1_2_2, 0, 2, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ_1_1_2, 0, 1, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_FAST, SPINOR_OP_READ_FAST, 0, 1, 1, 0x00, 0, 8},
+ {FLASH_FLAG_READ_WRITE, SPINOR_OP_READ, 0, 1, 1, 0x00, 0, 0},
+ {0x00, 0, 0, 0, 0, 0x00, 0, 0},
+};
+
+/* N25Q 4-byte Address READ configurations
+ * - use special 4-byte address READ commands (reduces overheads, and
+ * reduces risk of hitting watchdog reset issues).
+ * - 'FAST' variants configured for 8 dummy cycles (see note above.)
+ */
+static struct seq_rw_config n25q_read4_configs[] = {
+ {FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B, 0, 4, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B, 0, 1, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B, 0, 2, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B, 0, 1, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_FAST, SPINOR_OP_READ_FAST_4B, 0, 1, 1, 0x00, 0, 8},
+ {FLASH_FLAG_READ_WRITE, SPINOR_OP_READ_4B, 0, 1, 1, 0x00, 0, 0},
+ {0x00, 0, 0, 0, 0, 0x00, 0, 0},
+};
+
+/*
+ * [MX25xxx] Configuration
+ */
+#define MX25_STATUS_QE (0x1 << 6)
+
+static int stfsm_mx25_en_32bit_addr_seq(struct stfsm_seq *seq)
+{
+ seq->seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_EN4B) |
+ SEQ_OPC_CSDEASSERT);
+
+ seq->seq[0] = STFSM_INST_CMD1;
+ seq->seq[1] = STFSM_INST_WAIT;
+ seq->seq[2] = STFSM_INST_STOP;
+
+ seq->seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_ERASE |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ);
+
+ return 0;
+}
+
+/*
+ * [S25FLxxx] Configuration
+ */
+#define STFSM_S25FL_CONFIG_QE (0x1 << 1)
+
+/*
+ * S25FLxxxS devices provide three ways of supporting 32-bit addressing: Bank
+ * Register, Extended Address Modes, and a 32-bit address command set. The
+ * 32-bit address command set is used here, since it avoids any problems with
+ * entering a state that is incompatible with the SPIBoot Controller.
+ */
+static struct seq_rw_config stfsm_s25fl_read4_configs[] = {
+ {FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B, 0, 4, 4, 0x00, 2, 4},
+ {FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B, 0, 1, 4, 0x00, 0, 8},
+ {FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B, 0, 2, 2, 0x00, 4, 0},
+ {FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B, 0, 1, 2, 0x00, 0, 8},
+ {FLASH_FLAG_READ_FAST, SPINOR_OP_READ_FAST_4B, 0, 1, 1, 0x00, 0, 8},
+ {FLASH_FLAG_READ_WRITE, SPINOR_OP_READ_4B, 0, 1, 1, 0x00, 0, 0},
+ {0x00, 0, 0, 0, 0, 0x00, 0, 0},
+};
+
+static struct seq_rw_config stfsm_s25fl_write4_configs[] = {
+ {FLASH_FLAG_WRITE_1_1_4, S25FL_CMD_WRITE4_1_1_4, 1, 1, 4, 0x00, 0, 0},
+ {FLASH_FLAG_READ_WRITE, S25FL_CMD_WRITE4, 1, 1, 1, 0x00, 0, 0},
+ {0x00, 0, 0, 0, 0, 0x00, 0, 0},
+};
+
+/*
+ * [W25Qxxx] Configuration
+ */
+#define W25Q_STATUS_QE (0x1 << 1)
+
+static struct stfsm_seq stfsm_seq_read_jedec = {
+ .data_size = TRANSFER_SIZE(8),
+ .seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_RDID)),
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_DATA_READ,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+};
+
+static struct stfsm_seq stfsm_seq_read_status_fifo = {
+ .data_size = TRANSFER_SIZE(4),
+ .seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_RDSR)),
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_DATA_READ,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+};
+
+static struct stfsm_seq stfsm_seq_erase_sector = {
+ /* 'addr_cfg' configured during initialisation */
+ .seq_opc = {
+ (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WREN) | SEQ_OPC_CSDEASSERT),
+
+ (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_SE)),
+ },
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_CMD2,
+ STFSM_INST_ADD1,
+ STFSM_INST_ADD2,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+};
+
+static struct stfsm_seq stfsm_seq_erase_chip = {
+ .seq_opc = {
+ (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WREN) | SEQ_OPC_CSDEASSERT),
+
+ (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_CHIP_ERASE) | SEQ_OPC_CSDEASSERT),
+ },
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_CMD2,
+ STFSM_INST_WAIT,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_ERASE |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+};
+
+static struct stfsm_seq stfsm_seq_write_status = {
+ .seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WREN) | SEQ_OPC_CSDEASSERT),
+ .seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WRSR)),
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_CMD2,
+ STFSM_INST_STA_WR1,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+};
+
+/* Dummy sequence to read one byte of data from flash into the FIFO */
+static const struct stfsm_seq stfsm_seq_load_fifo_byte = {
+ .data_size = TRANSFER_SIZE(1),
+ .seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_RDID)),
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_DATA_READ,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+};
+
+static int stfsm_n25q_en_32bit_addr_seq(struct stfsm_seq *seq)
+{
+ seq->seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_EN4B));
+ seq->seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WREN) |
+ SEQ_OPC_CSDEASSERT);
+
+ seq->seq[0] = STFSM_INST_CMD2;
+ seq->seq[1] = STFSM_INST_CMD1;
+ seq->seq[2] = STFSM_INST_WAIT;
+ seq->seq[3] = STFSM_INST_STOP;
+
+ seq->seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_ERASE |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ);
+
+ return 0;
+}
+
+static inline int stfsm_is_idle(struct stfsm *fsm)
+{
+ return readl(fsm->base + SPI_FAST_SEQ_STA) & 0x10;
+}
+
+static inline uint32_t stfsm_fifo_available(struct stfsm *fsm)
+{
+ return (readl(fsm->base + SPI_FAST_SEQ_STA) >> 5) & 0x7f;
+}
+
+static inline void stfsm_load_seq(struct stfsm *fsm,
+ const struct stfsm_seq *seq)
+{
+ void __iomem *dst = fsm->base + SPI_FAST_SEQ_TRANSFER_SIZE;
+ const uint32_t *src = (const uint32_t *)seq;
+ int words = sizeof(*seq) / sizeof(*src);
+
+ BUG_ON(!stfsm_is_idle(fsm));
+
+ while (words--) {
+ writel(*src, dst);
+ src++;
+ dst += 4;
+ }
+}
+
+static void stfsm_wait_seq(struct stfsm *fsm)
+{
+ unsigned long deadline;
+ int timeout = 0;
+
+ deadline = jiffies + msecs_to_jiffies(STFSM_MAX_WAIT_SEQ_MS);
+
+ while (!timeout) {
+ if (time_after_eq(jiffies, deadline))
+ timeout = 1;
+
+ if (stfsm_is_idle(fsm))
+ return;
+
+ cond_resched();
+ }
+
+ dev_err(fsm->dev, "timeout on sequence completion\n");
+}
+
+static void stfsm_read_fifo(struct stfsm *fsm, uint32_t *buf, uint32_t size)
+{
+ uint32_t remaining = size >> 2;
+ uint32_t avail;
+ uint32_t words;
+
+ dev_dbg(fsm->dev, "Reading %d bytes from FIFO\n", size);
+
+ BUG_ON((((uintptr_t)buf) & 0x3) || (size & 0x3));
+
+ while (remaining) {
+ for (;;) {
+ avail = stfsm_fifo_available(fsm);
+ if (avail)
+ break;
+ udelay(1);
+ }
+ words = min(avail, remaining);
+ remaining -= words;
+
+ readsl(fsm->base + SPI_FAST_SEQ_DATA_REG, buf, words);
+ buf += words;
+ }
+}
+
+/*
+ * Clear the data FIFO
+ *
+ * Typically, this is only required during driver initialisation, where no
+ * assumptions can be made regarding the state of the FIFO.
+ *
+ * The process of clearing the FIFO is complicated by fact that while it is
+ * possible for the FIFO to contain an arbitrary number of bytes [1], the
+ * SPI_FAST_SEQ_STA register only reports the number of complete 32-bit words
+ * present. Furthermore, data can only be drained from the FIFO by reading
+ * complete 32-bit words.
+ *
+ * With this in mind, a two stage process is used to the clear the FIFO:
+ *
+ * 1. Read any complete 32-bit words from the FIFO, as reported by the
+ * SPI_FAST_SEQ_STA register.
+ *
+ * 2. Mop up any remaining bytes. At this point, it is not known if there
+ * are 0, 1, 2, or 3 bytes in the FIFO. To handle all cases, a dummy FSM
+ * sequence is used to load one byte at a time, until a complete 32-bit
+ * word is formed; at most, 4 bytes will need to be loaded.
+ *
+ * [1] It is theoretically possible for the FIFO to contain an arbitrary number
+ * of bits. However, since there are no known use-cases that leave
+ * incomplete bytes in the FIFO, only words and bytes are considered here.
+ */
+static void stfsm_clear_fifo(struct stfsm *fsm)
+{
+ const struct stfsm_seq *seq = &stfsm_seq_load_fifo_byte;
+ uint32_t words, i;
+
+ /* 1. Clear any 32-bit words */
+ words = stfsm_fifo_available(fsm);
+ if (words) {
+ for (i = 0; i < words; i++)
+ readl(fsm->base + SPI_FAST_SEQ_DATA_REG);
+ dev_dbg(fsm->dev, "cleared %d words from FIFO\n", words);
+ }
+
+ /*
+ * 2. Clear any remaining bytes
+ * - Load the FIFO, one byte at a time, until a complete 32-bit word
+ * is available.
+ */
+ for (i = 0, words = 0; i < 4 && !words; i++) {
+ stfsm_load_seq(fsm, seq);
+ stfsm_wait_seq(fsm);
+ words = stfsm_fifo_available(fsm);
+ }
+
+ /* - A single word must be available now */
+ if (words != 1) {
+ dev_err(fsm->dev, "failed to clear bytes from the data FIFO\n");
+ return;
+ }
+
+ /* - Read the 32-bit word */
+ readl(fsm->base + SPI_FAST_SEQ_DATA_REG);
+
+ dev_dbg(fsm->dev, "cleared %d byte(s) from the data FIFO\n", 4 - i);
+}
+
+static int stfsm_write_fifo(struct stfsm *fsm, const uint32_t *buf,
+ uint32_t size)
+{
+ uint32_t words = size >> 2;
+
+ dev_dbg(fsm->dev, "writing %d bytes to FIFO\n", size);
+
+ BUG_ON((((uintptr_t)buf) & 0x3) || (size & 0x3));
+
+ writesl(fsm->base + SPI_FAST_SEQ_DATA_REG, buf, words);
+
+ return size;
+}
+
+static int stfsm_enter_32bit_addr(struct stfsm *fsm, int enter)
+{
+ struct stfsm_seq *seq = &fsm->stfsm_seq_en_32bit_addr;
+ uint32_t cmd = enter ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
+
+ seq->seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(cmd) |
+ SEQ_OPC_CSDEASSERT);
+
+ stfsm_load_seq(fsm, seq);
+
+ stfsm_wait_seq(fsm);
+
+ return 0;
+}
+
+static uint8_t stfsm_wait_busy(struct stfsm *fsm)
+{
+ struct stfsm_seq *seq = &stfsm_seq_read_status_fifo;
+ unsigned long deadline;
+ uint32_t status;
+ int timeout = 0;
+
+ /* Use RDRS1 */
+ seq->seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_RDSR));
+
+ /* Load read_status sequence */
+ stfsm_load_seq(fsm, seq);
+
+ /*
+ * Repeat until busy bit is deasserted, or timeout, or error (S25FLxxxS)
+ */
+ deadline = jiffies + FLASH_MAX_BUSY_WAIT;
+ while (!timeout) {
+ if (time_after_eq(jiffies, deadline))
+ timeout = 1;
+
+ stfsm_wait_seq(fsm);
+
+ stfsm_read_fifo(fsm, &status, 4);
+
+ if ((status & FLASH_STATUS_BUSY) == 0)
+ return 0;
+
+ if ((fsm->configuration & CFG_S25FL_CHECK_ERROR_FLAGS) &&
+ ((status & S25FL_STATUS_P_ERR) ||
+ (status & S25FL_STATUS_E_ERR)))
+ return (uint8_t)(status & 0xff);
+
+ if (!timeout)
+ /* Restart */
+ writel(seq->seq_cfg, fsm->base + SPI_FAST_SEQ_CFG);
+
+ cond_resched();
+ }
+
+ dev_err(fsm->dev, "timeout on wait_busy\n");
+
+ return FLASH_STATUS_TIMEOUT;
+}
+
+static int stfsm_read_status(struct stfsm *fsm, uint8_t cmd,
+ uint8_t *data, int bytes)
+{
+ struct stfsm_seq *seq = &stfsm_seq_read_status_fifo;
+ uint32_t tmp;
+ uint8_t *t = (uint8_t *)&tmp;
+ int i;
+
+ dev_dbg(fsm->dev, "read 'status' register [0x%02x], %d byte(s)\n",
+ cmd, bytes);
+
+ BUG_ON(bytes != 1 && bytes != 2);
+
+ seq->seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(cmd));
+
+ stfsm_load_seq(fsm, seq);
+
+ stfsm_read_fifo(fsm, &tmp, 4);
+
+ for (i = 0; i < bytes; i++)
+ data[i] = t[i];
+
+ stfsm_wait_seq(fsm);
+
+ return 0;
+}
+
+static int stfsm_write_status(struct stfsm *fsm, uint8_t cmd,
+ uint16_t data, int bytes, int wait_busy)
+{
+ struct stfsm_seq *seq = &stfsm_seq_write_status;
+
+ dev_dbg(fsm->dev,
+ "write 'status' register [0x%02x], %d byte(s), 0x%04x\n"
+ " %s wait-busy\n", cmd, bytes, data, wait_busy ? "with" : "no");
+
+ BUG_ON(bytes != 1 && bytes != 2);
+
+ seq->seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(cmd));
+
+ seq->status = (uint32_t)data | STA_PADS_1 | STA_CSDEASSERT;
+ seq->seq[2] = (bytes == 1) ? STFSM_INST_STA_WR1 : STFSM_INST_STA_WR1_2;
+
+ stfsm_load_seq(fsm, seq);
+
+ stfsm_wait_seq(fsm);
+
+ if (wait_busy)
+ stfsm_wait_busy(fsm);
+
+ return 0;
+}
+
+/*
+ * SoC reset on 'boot-from-spi' systems
+ *
+ * Certain modes of operation cause the Flash device to enter a particular state
+ * for a period of time (e.g. 'Erase Sector', 'Quad Enable', and 'Enter 32-bit
+ * Addr' commands). On boot-from-spi systems, it is important to consider what
+ * happens if a warm reset occurs during this period. The SPIBoot controller
+ * assumes that Flash device is in its default reset state, 24-bit address mode,
+ * and ready to accept commands. This can be achieved using some form of
+ * on-board logic/controller to force a device POR in response to a SoC-level
+ * reset or by making use of the device reset signal if available (limited
+ * number of devices only).
+ *
+ * Failure to take such precautions can cause problems following a warm reset.
+ * For some operations (e.g. ERASE), there is little that can be done. For
+ * other modes of operation (e.g. 32-bit addressing), options are often
+ * available that can help minimise the window in which a reset could cause a
+ * problem.
+ *
+ */
+static bool stfsm_can_handle_soc_reset(struct stfsm *fsm)
+{
+ /* Reset signal is available on the board and supported by the device */
+ if (fsm->reset_signal && fsm->info->flags & FLASH_FLAG_RESET)
+ return true;
+
+ /* Board-level logic forces a power-on-reset */
+ if (fsm->reset_por)
+ return true;
+
+ /* Reset is not properly handled and may result in failure to reboot */
+ return false;
+}
+
+/* Configure 'addr_cfg' according to addressing mode */
+static void stfsm_prepare_erasesec_seq(struct stfsm *fsm,
+ struct stfsm_seq *seq)
+{
+ int addr1_cycles = fsm->info->flags & FLASH_FLAG_32BIT_ADDR ? 16 : 8;
+
+ seq->addr_cfg = (ADR_CFG_CYCLES_ADD1(addr1_cycles) |
+ ADR_CFG_PADS_1_ADD1 |
+ ADR_CFG_CYCLES_ADD2(16) |
+ ADR_CFG_PADS_1_ADD2 |
+ ADR_CFG_CSDEASSERT_ADD2);
+}
+
+/* Search for preferred configuration based on available flags */
+static struct seq_rw_config *
+stfsm_search_seq_rw_configs(struct stfsm *fsm,
+ struct seq_rw_config cfgs[])
+{
+ struct seq_rw_config *config;
+ int flags = fsm->info->flags;
+
+ for (config = cfgs; config->cmd != 0; config++)
+ if ((config->flags & flags) == config->flags)
+ return config;
+
+ return NULL;
+}
+
+/* Prepare a READ/WRITE sequence according to configuration parameters */
+static void stfsm_prepare_rw_seq(struct stfsm *fsm,
+ struct stfsm_seq *seq,
+ struct seq_rw_config *cfg)
+{
+ int addr1_cycles, addr2_cycles;
+ int i = 0;
+
+ memset(seq, 0, sizeof(*seq));
+
+ /* Add READ/WRITE OPC */
+ seq->seq_opc[i++] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(cfg->cmd));
+
+ /* Add WREN OPC for a WRITE sequence */
+ if (cfg->write)
+ seq->seq_opc[i++] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WREN) |
+ SEQ_OPC_CSDEASSERT);
+
+ /* Address configuration (24 or 32-bit addresses) */
+ addr1_cycles = (fsm->info->flags & FLASH_FLAG_32BIT_ADDR) ? 16 : 8;
+ addr1_cycles /= cfg->addr_pads;
+ addr2_cycles = 16 / cfg->addr_pads;
+ seq->addr_cfg = ((addr1_cycles & 0x3f) << 0 | /* ADD1 cycles */
+ (cfg->addr_pads - 1) << 6 | /* ADD1 pads */
+ (addr2_cycles & 0x3f) << 16 | /* ADD2 cycles */
+ ((cfg->addr_pads - 1) << 22)); /* ADD2 pads */
+
+ /* Data/Sequence configuration */
+ seq->seq_cfg = ((cfg->data_pads - 1) << 16 |
+ SEQ_CFG_STARTSEQ |
+ SEQ_CFG_CSDEASSERT);
+ if (!cfg->write)
+ seq->seq_cfg |= SEQ_CFG_READNOTWRITE;
+
+ /* Mode configuration (no. of pads taken from addr cfg) */
+ seq->mode = ((cfg->mode_data & 0xff) << 0 | /* data */
+ (cfg->mode_cycles & 0x3f) << 16 | /* cycles */
+ (cfg->addr_pads - 1) << 22); /* pads */
+
+ /* Dummy configuration (no. of pads taken from addr cfg) */
+ seq->dummy = ((cfg->dummy_cycles & 0x3f) << 16 | /* cycles */
+ (cfg->addr_pads - 1) << 22); /* pads */
+
+
+ /* Instruction sequence */
+ i = 0;
+ if (cfg->write)
+ seq->seq[i++] = STFSM_INST_CMD2;
+
+ seq->seq[i++] = STFSM_INST_CMD1;
+
+ seq->seq[i++] = STFSM_INST_ADD1;
+ seq->seq[i++] = STFSM_INST_ADD2;
+
+ if (cfg->mode_cycles)
+ seq->seq[i++] = STFSM_INST_MODE;
+
+ if (cfg->dummy_cycles)
+ seq->seq[i++] = STFSM_INST_DUMMY;
+
+ seq->seq[i++] =
+ cfg->write ? STFSM_INST_DATA_WRITE : STFSM_INST_DATA_READ;
+ seq->seq[i++] = STFSM_INST_STOP;
+}
+
+static int stfsm_search_prepare_rw_seq(struct stfsm *fsm,
+ struct stfsm_seq *seq,
+ struct seq_rw_config *cfgs)
+{
+ struct seq_rw_config *config;
+
+ config = stfsm_search_seq_rw_configs(fsm, cfgs);
+ if (!config) {
+ dev_err(fsm->dev, "failed to find suitable config\n");
+ return -EINVAL;
+ }
+
+ stfsm_prepare_rw_seq(fsm, seq, config);
+
+ return 0;
+}
+
+/* Prepare a READ/WRITE/ERASE 'default' sequences */
+static int stfsm_prepare_rwe_seqs_default(struct stfsm *fsm)
+{
+ uint32_t flags = fsm->info->flags;
+ int ret;
+
+ /* Configure 'READ' sequence */
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_read,
+ default_read_configs);
+ if (ret) {
+ dev_err(fsm->dev,
+ "failed to prep READ sequence with flags [0x%08x]\n",
+ flags);
+ return ret;
+ }
+
+ /* Configure 'WRITE' sequence */
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_write,
+ default_write_configs);
+ if (ret) {
+ dev_err(fsm->dev,
+ "failed to prep WRITE sequence with flags [0x%08x]\n",
+ flags);
+ return ret;
+ }
+
+ /* Configure 'ERASE_SECTOR' sequence */
+ stfsm_prepare_erasesec_seq(fsm, &stfsm_seq_erase_sector);
+
+ return 0;
+}
+
+static int stfsm_mx25_config(struct stfsm *fsm)
+{
+ uint32_t flags = fsm->info->flags;
+ uint32_t data_pads;
+ uint8_t sta;
+ int ret;
+ bool soc_reset;
+
+ /*
+ * Use default READ/WRITE sequences
+ */
+ ret = stfsm_prepare_rwe_seqs_default(fsm);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure 32-bit Address Support
+ */
+ if (flags & FLASH_FLAG_32BIT_ADDR) {
+ /* Configure 'enter_32bitaddr' FSM sequence */
+ stfsm_mx25_en_32bit_addr_seq(&fsm->stfsm_seq_en_32bit_addr);
+
+ soc_reset = stfsm_can_handle_soc_reset(fsm);
+ if (soc_reset || !fsm->booted_from_spi)
+ /* If we can handle SoC resets, we enable 32-bit address
+ * mode pervasively */
+ stfsm_enter_32bit_addr(fsm, 1);
+
+ else
+ /* Else, enable/disable 32-bit addressing before/after
+ * each operation */
+ fsm->configuration = (CFG_READ_TOGGLE_32BIT_ADDR |
+ CFG_WRITE_TOGGLE_32BIT_ADDR |
+ CFG_ERASESEC_TOGGLE_32BIT_ADDR);
+ }
+
+ /* Check status of 'QE' bit, update if required. */
+ stfsm_read_status(fsm, SPINOR_OP_RDSR, &sta, 1);
+ data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1;
+ if (data_pads == 4) {
+ if (!(sta & MX25_STATUS_QE)) {
+ /* Set 'QE' */
+ sta |= MX25_STATUS_QE;
+
+ stfsm_write_status(fsm, SPINOR_OP_WRSR, sta, 1, 1);
+ }
+ } else {
+ if (sta & MX25_STATUS_QE) {
+ /* Clear 'QE' */
+ sta &= ~MX25_STATUS_QE;
+
+ stfsm_write_status(fsm, SPINOR_OP_WRSR, sta, 1, 1);
+ }
+ }
+
+ return 0;
+}
+
+static int stfsm_n25q_config(struct stfsm *fsm)
+{
+ uint32_t flags = fsm->info->flags;
+ uint8_t vcr;
+ int ret = 0;
+ bool soc_reset;
+
+ /* Configure 'READ' sequence */
+ if (flags & FLASH_FLAG_32BIT_ADDR)
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_read,
+ n25q_read4_configs);
+ else
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_read,
+ n25q_read3_configs);
+ if (ret) {
+ dev_err(fsm->dev,
+ "failed to prepare READ sequence with flags [0x%08x]\n",
+ flags);
+ return ret;
+ }
+
+ /* Configure 'WRITE' sequence (default configs) */
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_write,
+ default_write_configs);
+ if (ret) {
+ dev_err(fsm->dev,
+ "preparing WRITE sequence using flags [0x%08x] failed\n",
+ flags);
+ return ret;
+ }
+
+ /* * Configure 'ERASE_SECTOR' sequence */
+ stfsm_prepare_erasesec_seq(fsm, &stfsm_seq_erase_sector);
+
+ /* Configure 32-bit address support */
+ if (flags & FLASH_FLAG_32BIT_ADDR) {
+ stfsm_n25q_en_32bit_addr_seq(&fsm->stfsm_seq_en_32bit_addr);
+
+ soc_reset = stfsm_can_handle_soc_reset(fsm);
+ if (soc_reset || !fsm->booted_from_spi) {
+ /*
+ * If we can handle SoC resets, we enable 32-bit
+ * address mode pervasively
+ */
+ stfsm_enter_32bit_addr(fsm, 1);
+ } else {
+ /*
+ * If not, enable/disable for WRITE and ERASE
+ * operations (READ uses special commands)
+ */
+ fsm->configuration = (CFG_WRITE_TOGGLE_32BIT_ADDR |
+ CFG_ERASESEC_TOGGLE_32BIT_ADDR);
+ }
+ }
+
+ /*
+ * Configure device to use 8 dummy cycles
+ */
+ vcr = (N25Q_VCR_DUMMY_CYCLES(8) | N25Q_VCR_XIP_DISABLED |
+ N25Q_VCR_WRAP_CONT);
+ stfsm_write_status(fsm, N25Q_CMD_WRVCR, vcr, 1, 0);
+
+ return 0;
+}
+
+static void stfsm_s25fl_prepare_erasesec_seq_32(struct stfsm_seq *seq)
+{
+ seq->seq_opc[1] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(S25FL_CMD_SE4));
+
+ seq->addr_cfg = (ADR_CFG_CYCLES_ADD1(16) |
+ ADR_CFG_PADS_1_ADD1 |
+ ADR_CFG_CYCLES_ADD2(16) |
+ ADR_CFG_PADS_1_ADD2 |
+ ADR_CFG_CSDEASSERT_ADD2);
+}
+
+static void stfsm_s25fl_read_dyb(struct stfsm *fsm, uint32_t offs, uint8_t *dby)
+{
+ uint32_t tmp;
+ struct stfsm_seq seq = {
+ .data_size = TRANSFER_SIZE(4),
+ .seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(S25FL_CMD_DYBRD)),
+ .addr_cfg = (ADR_CFG_CYCLES_ADD1(16) |
+ ADR_CFG_PADS_1_ADD1 |
+ ADR_CFG_CYCLES_ADD2(16) |
+ ADR_CFG_PADS_1_ADD2),
+ .addr1 = (offs >> 16) & 0xffff,
+ .addr2 = offs & 0xffff,
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_ADD1,
+ STFSM_INST_ADD2,
+ STFSM_INST_DATA_READ,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+ };
+
+ stfsm_load_seq(fsm, &seq);
+
+ stfsm_read_fifo(fsm, &tmp, 4);
+
+ *dby = (uint8_t)(tmp >> 24);
+
+ stfsm_wait_seq(fsm);
+}
+
+static void stfsm_s25fl_write_dyb(struct stfsm *fsm, uint32_t offs, uint8_t dby)
+{
+ struct stfsm_seq seq = {
+ .seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WREN) |
+ SEQ_OPC_CSDEASSERT),
+ .seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(S25FL_CMD_DYBWR)),
+ .addr_cfg = (ADR_CFG_CYCLES_ADD1(16) |
+ ADR_CFG_PADS_1_ADD1 |
+ ADR_CFG_CYCLES_ADD2(16) |
+ ADR_CFG_PADS_1_ADD2),
+ .status = (uint32_t)dby | STA_PADS_1 | STA_CSDEASSERT,
+ .addr1 = (offs >> 16) & 0xffff,
+ .addr2 = offs & 0xffff,
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_CMD2,
+ STFSM_INST_ADD1,
+ STFSM_INST_ADD2,
+ STFSM_INST_STA_WR1,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+ };
+
+ stfsm_load_seq(fsm, &seq);
+ stfsm_wait_seq(fsm);
+
+ stfsm_wait_busy(fsm);
+}
+
+static int stfsm_s25fl_clear_status_reg(struct stfsm *fsm)
+{
+ struct stfsm_seq seq = {
+ .seq_opc[0] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(S25FL_CMD_CLSR) |
+ SEQ_OPC_CSDEASSERT),
+ .seq_opc[1] = (SEQ_OPC_PADS_1 |
+ SEQ_OPC_CYCLES(8) |
+ SEQ_OPC_OPCODE(SPINOR_OP_WRDI) |
+ SEQ_OPC_CSDEASSERT),
+ .seq = {
+ STFSM_INST_CMD1,
+ STFSM_INST_CMD2,
+ STFSM_INST_WAIT,
+ STFSM_INST_STOP,
+ },
+ .seq_cfg = (SEQ_CFG_PADS_1 |
+ SEQ_CFG_ERASE |
+ SEQ_CFG_READNOTWRITE |
+ SEQ_CFG_CSDEASSERT |
+ SEQ_CFG_STARTSEQ),
+ };
+
+ stfsm_load_seq(fsm, &seq);
+
+ stfsm_wait_seq(fsm);
+
+ return 0;
+}
+
+static int stfsm_s25fl_config(struct stfsm *fsm)
+{
+ struct flash_info *info = fsm->info;
+ uint32_t flags = info->flags;
+ uint32_t data_pads;
+ uint32_t offs;
+ uint16_t sta_wr;
+ uint8_t sr1, cr1, dyb;
+ int update_sr = 0;
+ int ret;
+
+ if (flags & FLASH_FLAG_32BIT_ADDR) {
+ /*
+ * Prepare Read/Write/Erase sequences according to S25FLxxx
+ * 32-bit address command set
+ */
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_read,
+ stfsm_s25fl_read4_configs);
+ if (ret)
+ return ret;
+
+ ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_write,
+ stfsm_s25fl_write4_configs);
+ if (ret)
+ return ret;
+
+ stfsm_s25fl_prepare_erasesec_seq_32(&stfsm_seq_erase_sector);
+
+ } else {
+ /* Use default configurations for 24-bit addressing */
+ ret = stfsm_prepare_rwe_seqs_default(fsm);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * For devices that support 'DYB' sector locking, check lock status and
+ * unlock sectors if necessary (some variants power-on with sectors
+ * locked by default)
+ */
+ if (flags & FLASH_FLAG_DYB_LOCKING) {
+ offs = 0;
+ for (offs = 0; offs < info->sector_size * info->n_sectors;) {
+ stfsm_s25fl_read_dyb(fsm, offs, &dyb);
+ if (dyb == 0x00)
+ stfsm_s25fl_write_dyb(fsm, offs, 0xff);
+
+ /* Handle bottom/top 4KiB parameter sectors */
+ if ((offs < info->sector_size * 2) ||
+ (offs >= (info->sector_size - info->n_sectors * 4)))
+ offs += 0x1000;
+ else
+ offs += 0x10000;
+ }
+ }
+
+ /* Check status of 'QE' bit, update if required. */
+ stfsm_read_status(fsm, SPINOR_OP_RDCR, &cr1, 1);
+ data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1;
+ if (data_pads == 4) {
+ if (!(cr1 & STFSM_S25FL_CONFIG_QE)) {
+ /* Set 'QE' */
+ cr1 |= STFSM_S25FL_CONFIG_QE;
+
+ update_sr = 1;
+ }
+ } else {
+ if (cr1 & STFSM_S25FL_CONFIG_QE) {
+ /* Clear 'QE' */
+ cr1 &= ~STFSM_S25FL_CONFIG_QE;
+
+ update_sr = 1;
+ }
+ }
+ if (update_sr) {
+ stfsm_read_status(fsm, SPINOR_OP_RDSR, &sr1, 1);
+ sta_wr = ((uint16_t)cr1 << 8) | sr1;
+ stfsm_write_status(fsm, SPINOR_OP_WRSR, sta_wr, 2, 1);
+ }
+
+ /*
+ * S25FLxxx devices support Program and Error error flags.
+ * Configure driver to check flags and clear if necessary.
+ */
+ fsm->configuration |= CFG_S25FL_CHECK_ERROR_FLAGS;
+
+ return 0;
+}
+
+static int stfsm_w25q_config(struct stfsm *fsm)
+{
+ uint32_t data_pads;
+ uint8_t sr1, sr2;
+ uint16_t sr_wr;
+ int update_sr = 0;
+ int ret;
+
+ ret = stfsm_prepare_rwe_seqs_default(fsm);
+ if (ret)
+ return ret;
+
+ /* Check status of 'QE' bit, update if required. */
+ stfsm_read_status(fsm, SPINOR_OP_RDCR, &sr2, 1);
+ data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1;
+ if (data_pads == 4) {
+ if (!(sr2 & W25Q_STATUS_QE)) {
+ /* Set 'QE' */
+ sr2 |= W25Q_STATUS_QE;
+ update_sr = 1;
+ }
+ } else {
+ if (sr2 & W25Q_STATUS_QE) {
+ /* Clear 'QE' */
+ sr2 &= ~W25Q_STATUS_QE;
+ update_sr = 1;
+ }
+ }
+ if (update_sr) {
+ /* Write status register */
+ stfsm_read_status(fsm, SPINOR_OP_RDSR, &sr1, 1);
+ sr_wr = ((uint16_t)sr2 << 8) | sr1;
+ stfsm_write_status(fsm, SPINOR_OP_WRSR, sr_wr, 2, 1);
+ }
+
+ return 0;
+}
+
+static int stfsm_read(struct stfsm *fsm, uint8_t *buf, uint32_t size,
+ uint32_t offset)
+{
+ struct stfsm_seq *seq = &fsm->stfsm_seq_read;
+ uint32_t data_pads;
+ uint32_t read_mask;
+ uint32_t size_ub;
+ uint32_t size_lb;
+ uint32_t size_mop;
+ uint32_t tmp[4];
+ uint32_t page_buf[FLASH_PAGESIZE_32];
+ uint8_t *p;
+
+ dev_dbg(fsm->dev, "reading %d bytes from 0x%08x\n", size, offset);
+
+ /* Enter 32-bit address mode, if required */
+ if (fsm->configuration & CFG_READ_TOGGLE_32BIT_ADDR)
+ stfsm_enter_32bit_addr(fsm, 1);
+
+ /* Must read in multiples of 32 cycles (or 32*pads/8 Bytes) */
+ data_pads = ((seq->seq_cfg >> 16) & 0x3) + 1;
+ read_mask = (data_pads << 2) - 1;
+
+ /* Handle non-aligned buf */
+ p = ((uintptr_t)buf & 0x3) ? (uint8_t *)page_buf : buf;
+
+ /* Handle non-aligned size */
+ size_ub = (size + read_mask) & ~read_mask;
+ size_lb = size & ~read_mask;
+ size_mop = size & read_mask;
+
+ seq->data_size = TRANSFER_SIZE(size_ub);
+ seq->addr1 = (offset >> 16) & 0xffff;
+ seq->addr2 = offset & 0xffff;
+
+ stfsm_load_seq(fsm, seq);
+
+ if (size_lb)
+ stfsm_read_fifo(fsm, (uint32_t *)p, size_lb);
+
+ if (size_mop) {
+ stfsm_read_fifo(fsm, tmp, read_mask + 1);
+ memcpy(p + size_lb, &tmp, size_mop);
+ }
+
+ /* Handle non-aligned buf */
+ if ((uintptr_t)buf & 0x3)
+ memcpy(buf, page_buf, size);
+
+ /* Wait for sequence to finish */
+ stfsm_wait_seq(fsm);
+
+ stfsm_clear_fifo(fsm);
+
+ /* Exit 32-bit address mode, if required */
+ if (fsm->configuration & CFG_READ_TOGGLE_32BIT_ADDR)
+ stfsm_enter_32bit_addr(fsm, 0);
+
+ return 0;
+}
+
+static int stfsm_write(struct stfsm *fsm, const uint8_t *buf,
+ uint32_t size, uint32_t offset)
+{
+ struct stfsm_seq *seq = &fsm->stfsm_seq_write;
+ uint32_t data_pads;
+ uint32_t write_mask;
+ uint32_t size_ub;
+ uint32_t size_lb;
+ uint32_t size_mop;
+ uint32_t tmp[4];
+ uint32_t i;
+ uint32_t page_buf[FLASH_PAGESIZE_32];
+ uint8_t *t = (uint8_t *)&tmp;
+ const uint8_t *p;
+ int ret;
+
+ dev_dbg(fsm->dev, "writing %d bytes to 0x%08x\n", size, offset);
+
+ /* Enter 32-bit address mode, if required */
+ if (fsm->configuration & CFG_WRITE_TOGGLE_32BIT_ADDR)
+ stfsm_enter_32bit_addr(fsm, 1);
+
+ /* Must write in multiples of 32 cycles (or 32*pads/8 bytes) */
+ data_pads = ((seq->seq_cfg >> 16) & 0x3) + 1;
+ write_mask = (data_pads << 2) - 1;
+
+ /* Handle non-aligned buf */
+ if ((uintptr_t)buf & 0x3) {
+ memcpy(page_buf, buf, size);
+ p = (uint8_t *)page_buf;
+ } else {
+ p = buf;
+ }
+
+ /* Handle non-aligned size */
+ size_ub = (size + write_mask) & ~write_mask;
+ size_lb = size & ~write_mask;
+ size_mop = size & write_mask;
+
+ seq->data_size = TRANSFER_SIZE(size_ub);
+ seq->addr1 = (offset >> 16) & 0xffff;
+ seq->addr2 = offset & 0xffff;
+
+ /* Need to set FIFO to write mode, before writing data to FIFO (see
+ * GNBvb79594)
+ */
+ writel(0x00040000, fsm->base + SPI_FAST_SEQ_CFG);
+
+ /*
+ * Before writing data to the FIFO, apply a small delay to allow a
+ * potential change of FIFO direction to complete.
+ */
+ if (fsm->fifo_dir_delay == 0)
+ readl(fsm->base + SPI_FAST_SEQ_CFG);
+ else
+ udelay(fsm->fifo_dir_delay);
+
+
+ /* Write data to FIFO, before starting sequence (see GNBvd79593) */
+ if (size_lb) {
+ stfsm_write_fifo(fsm, (uint32_t *)p, size_lb);
+ p += size_lb;
+ }
+
+ /* Handle non-aligned size */
+ if (size_mop) {
+ memset(t, 0xff, write_mask + 1); /* fill with 0xff's */
+ for (i = 0; i < size_mop; i++)
+ t[i] = *p++;
+
+ stfsm_write_fifo(fsm, tmp, write_mask + 1);
+ }
+
+ /* Start sequence */
+ stfsm_load_seq(fsm, seq);
+
+ /* Wait for sequence to finish */
+ stfsm_wait_seq(fsm);
+
+ /* Wait for completion */
+ ret = stfsm_wait_busy(fsm);
+ if (ret && fsm->configuration & CFG_S25FL_CHECK_ERROR_FLAGS)
+ stfsm_s25fl_clear_status_reg(fsm);
+
+ /* Exit 32-bit address mode, if required */
+ if (fsm->configuration & CFG_WRITE_TOGGLE_32BIT_ADDR)
+ stfsm_enter_32bit_addr(fsm, 0);
+
+ return 0;
+}
+
+/*
+ * Read an address range from the flash chip. The address range
+ * may be any size provided it is within the physical boundaries.
+ */
+static int stfsm_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct stfsm *fsm = dev_get_drvdata(mtd->dev.parent);
+ uint32_t bytes;
+
+ dev_dbg(fsm->dev, "%s from 0x%08x, len %zd\n",
+ __func__, (u32)from, len);
+
+ mutex_lock(&fsm->lock);
+
+ while (len > 0) {
+ bytes = min_t(size_t, len, FLASH_PAGESIZE);
+
+ stfsm_read(fsm, buf, bytes, from);
+
+ buf += bytes;
+ from += bytes;
+ len -= bytes;
+
+ *retlen += bytes;
+ }
+
+ mutex_unlock(&fsm->lock);
+
+ return 0;
+}
+
+static int stfsm_erase_sector(struct stfsm *fsm, uint32_t offset)
+{
+ struct stfsm_seq *seq = &stfsm_seq_erase_sector;
+ int ret;
+
+ dev_dbg(fsm->dev, "erasing sector at 0x%08x\n", offset);
+
+ /* Enter 32-bit address mode, if required */
+ if (fsm->configuration & CFG_ERASESEC_TOGGLE_32BIT_ADDR)
+ stfsm_enter_32bit_addr(fsm, 1);
+
+ seq->addr1 = (offset >> 16) & 0xffff;
+ seq->addr2 = offset & 0xffff;
+
+ stfsm_load_seq(fsm, seq);
+
+ stfsm_wait_seq(fsm);
+
+ /* Wait for completion */
+ ret = stfsm_wait_busy(fsm);
+ if (ret && fsm->configuration & CFG_S25FL_CHECK_ERROR_FLAGS)
+ stfsm_s25fl_clear_status_reg(fsm);
+
+ /* Exit 32-bit address mode, if required */
+ if (fsm->configuration & CFG_ERASESEC_TOGGLE_32BIT_ADDR)
+ stfsm_enter_32bit_addr(fsm, 0);
+
+ return ret;
+}
+
+static int stfsm_erase_chip(struct stfsm *fsm)
+{
+ const struct stfsm_seq *seq = &stfsm_seq_erase_chip;
+
+ dev_dbg(fsm->dev, "erasing chip\n");
+
+ stfsm_load_seq(fsm, seq);
+
+ stfsm_wait_seq(fsm);
+
+ return stfsm_wait_busy(fsm);
+}
+
+/*
+ * Write an address range to the flash chip. Data must be written in
+ * FLASH_PAGESIZE chunks. The address range may be any size provided
+ * it is within the physical boundaries.
+ */
+static int stfsm_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct stfsm *fsm = dev_get_drvdata(mtd->dev.parent);
+
+ u32 page_offs;
+ u32 bytes;
+ uint8_t *b = (uint8_t *)buf;
+ int ret = 0;
+
+ dev_dbg(fsm->dev, "%s to 0x%08x, len %zd\n", __func__, (u32)to, len);
+
+ /* Offset within page */
+ page_offs = to % FLASH_PAGESIZE;
+
+ mutex_lock(&fsm->lock);
+
+ while (len) {
+ /* Write up to page boundary */
+ bytes = min_t(size_t, FLASH_PAGESIZE - page_offs, len);
+
+ ret = stfsm_write(fsm, b, bytes, to);
+ if (ret)
+ goto out1;
+
+ b += bytes;
+ len -= bytes;
+ to += bytes;
+
+ /* We are now page-aligned */
+ page_offs = 0;
+
+ *retlen += bytes;
+
+ }
+
+out1:
+ mutex_unlock(&fsm->lock);
+
+ return ret;
+}
+
+/*
+ * Erase an address range on the flash chip. The address range may extend
+ * one or more erase sectors. Return an error is there is a problem erasing.
+ */
+static int stfsm_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct stfsm *fsm = dev_get_drvdata(mtd->dev.parent);
+ u32 addr, len;
+ int ret;
+
+ dev_dbg(fsm->dev, "%s at 0x%llx, len %lld\n", __func__,
+ (long long)instr->addr, (long long)instr->len);
+
+ addr = instr->addr;
+ len = instr->len;
+
+ mutex_lock(&fsm->lock);
+
+ /* Whole-chip erase? */
+ if (len == mtd->size) {
+ ret = stfsm_erase_chip(fsm);
+ if (ret)
+ goto out1;
+ } else {
+ while (len) {
+ ret = stfsm_erase_sector(fsm, addr);
+ if (ret)
+ goto out1;
+
+ addr += mtd->erasesize;
+ len -= mtd->erasesize;
+ }
+ }
+
+ mutex_unlock(&fsm->lock);
+
+ return 0;
+
+out1:
+ mutex_unlock(&fsm->lock);
+
+ return ret;
+}
+
+static void stfsm_read_jedec(struct stfsm *fsm, uint8_t *jedec)
+{
+ const struct stfsm_seq *seq = &stfsm_seq_read_jedec;
+ uint32_t tmp[2];
+
+ stfsm_load_seq(fsm, seq);
+
+ stfsm_read_fifo(fsm, tmp, 8);
+
+ memcpy(jedec, tmp, 5);
+
+ stfsm_wait_seq(fsm);
+}
+
+static struct flash_info *stfsm_jedec_probe(struct stfsm *fsm)
+{
+ struct flash_info *info;
+ u16 ext_jedec;
+ u32 jedec;
+ u8 id[5];
+
+ stfsm_read_jedec(fsm, id);
+
+ jedec = id[0] << 16 | id[1] << 8 | id[2];
+ /*
+ * JEDEC also defines an optional "extended device information"
+ * string for after vendor-specific data, after the three bytes
+ * we use here. Supporting some chips might require using it.
+ */
+ ext_jedec = id[3] << 8 | id[4];
+
+ dev_dbg(fsm->dev, "JEDEC = 0x%08x [%5ph]\n", jedec, id);
+
+ for (info = flash_types; info->name; info++) {
+ if (info->jedec_id == jedec) {
+ if (info->ext_id && info->ext_id != ext_jedec)
+ continue;
+ return info;
+ }
+ }
+ dev_err(fsm->dev, "Unrecognized JEDEC id %06x\n", jedec);
+
+ return NULL;
+}
+
+static int stfsm_set_mode(struct stfsm *fsm, uint32_t mode)
+{
+ int ret, timeout = 10;
+
+ /* Wait for controller to accept mode change */
+ while (--timeout) {
+ ret = readl(fsm->base + SPI_STA_MODE_CHANGE);
+ if (ret & 0x1)
+ break;
+ udelay(1);
+ }
+
+ if (!timeout)
+ return -EBUSY;
+
+ writel(mode, fsm->base + SPI_MODESELECT);
+
+ return 0;
+}
+
+static void stfsm_set_freq(struct stfsm *fsm, uint32_t spi_freq)
+{
+ uint32_t emi_freq;
+ uint32_t clk_div;
+
+ emi_freq = clk_get_rate(fsm->clk);
+
+ /*
+ * Calculate clk_div - values between 2 and 128
+ * Multiple of 2, rounded up
+ */
+ clk_div = 2 * DIV_ROUND_UP(emi_freq, 2 * spi_freq);
+ if (clk_div < 2)
+ clk_div = 2;
+ else if (clk_div > 128)
+ clk_div = 128;
+
+ /*
+ * Determine a suitable delay for the IP to complete a change of
+ * direction of the FIFO. The required delay is related to the clock
+ * divider used. The following heuristics are based on empirical tests,
+ * using a 100MHz EMI clock.
+ */
+ if (clk_div <= 4)
+ fsm->fifo_dir_delay = 0;
+ else if (clk_div <= 10)
+ fsm->fifo_dir_delay = 1;
+ else
+ fsm->fifo_dir_delay = DIV_ROUND_UP(clk_div, 10);
+
+ dev_dbg(fsm->dev, "emi_clk = %uHZ, spi_freq = %uHZ, clk_div = %u\n",
+ emi_freq, spi_freq, clk_div);
+
+ writel(clk_div, fsm->base + SPI_CLOCKDIV);
+}
+
+static int stfsm_init(struct stfsm *fsm)
+{
+ int ret;
+
+ /* Perform a soft reset of the FSM controller */
+ writel(SEQ_CFG_SWRESET, fsm->base + SPI_FAST_SEQ_CFG);
+ udelay(1);
+ writel(0, fsm->base + SPI_FAST_SEQ_CFG);
+
+ /* Set clock to 'safe' frequency initially */
+ stfsm_set_freq(fsm, STFSM_FLASH_SAFE_FREQ);
+
+ /* Switch to FSM */
+ ret = stfsm_set_mode(fsm, SPI_MODESELECT_FSM);
+ if (ret)
+ return ret;
+
+ /* Set timing parameters */
+ writel(SPI_CFG_DEVICE_ST |
+ SPI_CFG_DEFAULT_MIN_CS_HIGH |
+ SPI_CFG_DEFAULT_CS_SETUPHOLD |
+ SPI_CFG_DEFAULT_DATA_HOLD,
+ fsm->base + SPI_CONFIGDATA);
+ writel(STFSM_DEFAULT_WR_TIME, fsm->base + SPI_STATUS_WR_TIME_REG);
+
+ /*
+ * Set the FSM 'WAIT' delay to the minimum workable value. Note, for
+ * our purposes, the WAIT instruction is used purely to achieve
+ * "sequence validity" rather than actually implement a delay.
+ */
+ writel(0x00000001, fsm->base + SPI_PROGRAM_ERASE_TIME);
+
+ /* Clear FIFO, just in case */
+ stfsm_clear_fifo(fsm);
+
+ return 0;
+}
+
+static void stfsm_fetch_platform_configs(struct platform_device *pdev)
+{
+ struct stfsm *fsm = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ struct regmap *regmap;
+ uint32_t boot_device_reg;
+ uint32_t boot_device_spi;
+ uint32_t boot_device; /* Value we read from *boot_device_reg */
+ int ret;
+
+ /* Booting from SPI NOR Flash is the default */
+ fsm->booted_from_spi = true;
+
+ regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
+ if (IS_ERR(regmap))
+ goto boot_device_fail;
+
+ fsm->reset_signal = of_property_read_bool(np, "st,reset-signal");
+
+ fsm->reset_por = of_property_read_bool(np, "st,reset-por");
+
+ /* Where in the syscon the boot device information lives */
+ ret = of_property_read_u32(np, "st,boot-device-reg", &boot_device_reg);
+ if (ret)
+ goto boot_device_fail;
+
+ /* Boot device value when booted from SPI NOR */
+ ret = of_property_read_u32(np, "st,boot-device-spi", &boot_device_spi);
+ if (ret)
+ goto boot_device_fail;
+
+ ret = regmap_read(regmap, boot_device_reg, &boot_device);
+ if (ret)
+ goto boot_device_fail;
+
+ if (boot_device != boot_device_spi)
+ fsm->booted_from_spi = false;
+
+ return;
+
+boot_device_fail:
+ dev_warn(&pdev->dev,
+ "failed to fetch boot device, assuming boot from SPI\n");
+}
+
+static int stfsm_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct flash_info *info;
+ struct stfsm *fsm;
+ int ret;
+
+ if (!np) {
+ dev_err(&pdev->dev, "No DT found\n");
+ return -EINVAL;
+ }
+
+ fsm = devm_kzalloc(&pdev->dev, sizeof(*fsm), GFP_KERNEL);
+ if (!fsm)
+ return -ENOMEM;
+
+ fsm->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, fsm);
+
+ fsm->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(fsm->base))
+ return PTR_ERR(fsm->base);
+
+ fsm->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(fsm->clk)) {
+ dev_err(fsm->dev, "Couldn't find EMI clock.\n");
+ return PTR_ERR(fsm->clk);
+ }
+
+ mutex_init(&fsm->lock);
+
+ ret = stfsm_init(fsm);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialise FSM Controller\n");
+ return ret;
+ }
+
+ stfsm_fetch_platform_configs(pdev);
+
+ /* Detect SPI FLASH device */
+ info = stfsm_jedec_probe(fsm);
+ if (!info)
+ return -ENODEV;
+ fsm->info = info;
+
+ /* Use device size to determine address width */
+ if (info->sector_size * info->n_sectors > 0x1000000)
+ info->flags |= FLASH_FLAG_32BIT_ADDR;
+
+ /*
+ * Configure READ/WRITE/ERASE sequences according to platform and
+ * device flags.
+ */
+ if (info->config)
+ ret = info->config(fsm);
+ else
+ ret = stfsm_prepare_rwe_seqs_default(fsm);
+ if (ret)
+ return ret;
+
+ fsm->mtd.name = info->name;
+ fsm->mtd.dev.parent = &pdev->dev;
+ mtd_set_of_node(&fsm->mtd, np);
+ fsm->mtd.type = MTD_NORFLASH;
+ fsm->mtd.writesize = 4;
+ fsm->mtd.writebufsize = fsm->mtd.writesize;
+ fsm->mtd.flags = MTD_CAP_NORFLASH;
+ fsm->mtd.size = info->sector_size * info->n_sectors;
+ fsm->mtd.erasesize = info->sector_size;
+
+ fsm->mtd._read = stfsm_mtd_read;
+ fsm->mtd._write = stfsm_mtd_write;
+ fsm->mtd._erase = stfsm_mtd_erase;
+
+ dev_info(&pdev->dev,
+ "Found serial flash device: %s\n"
+ " size = %llx (%lldMiB) erasesize = 0x%08x (%uKiB)\n",
+ info->name,
+ (long long)fsm->mtd.size, (long long)(fsm->mtd.size >> 20),
+ fsm->mtd.erasesize, (fsm->mtd.erasesize >> 10));
+
+ return mtd_device_register(&fsm->mtd, NULL, 0);
+}
+
+static int stfsm_remove(struct platform_device *pdev)
+{
+ struct stfsm *fsm = platform_get_drvdata(pdev);
+
+ WARN_ON(mtd_device_unregister(&fsm->mtd));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int stfsmfsm_suspend(struct device *dev)
+{
+ struct stfsm *fsm = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(fsm->clk);
+
+ return 0;
+}
+
+static int stfsmfsm_resume(struct device *dev)
+{
+ struct stfsm *fsm = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(fsm->clk);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(stfsm_pm_ops, stfsmfsm_suspend, stfsmfsm_resume);
+
+static const struct of_device_id stfsm_match[] = {
+ { .compatible = "st,spi-fsm", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stfsm_match);
+
+static struct platform_driver stfsm_driver = {
+ .probe = stfsm_probe,
+ .remove = stfsm_remove,
+ .driver = {
+ .name = "st-spi-fsm",
+ .of_match_table = stfsm_match,
+ .pm = &stfsm_pm_ops,
+ },
+};
+module_platform_driver(stfsm_driver);
+
+MODULE_AUTHOR("Angus Clark <angus.clark@st.com>");
+MODULE_DESCRIPTION("ST SPI FSM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
new file mode 100644
index 0000000000..8c22064ead
--- /dev/null
+++ b/drivers/mtd/ftl.c
@@ -0,0 +1,1063 @@
+/* This version ported to the Linux-MTD system by dwmw2@infradead.org
+ *
+ * Fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ * - fixes some leaks on failure in build_maps and ftl_notify_add, cleanups
+ *
+ * Based on:
+ */
+/*======================================================================
+
+ A Flash Translation Layer memory card driver
+
+ This driver implements a disk-like block device driver with an
+ apparent block size of 512 bytes for flash memory cards.
+
+ ftl_cs.c 1.62 2000/02/01 00:59:04
+
+ The contents of this file are subject to the Mozilla Public
+ License Version 1.1 (the "License"); you may not use this file
+ except in compliance with the License. You may obtain a copy of
+ the License at http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS
+ IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ implied. See the License for the specific language governing
+ rights and limitations under the License.
+
+ The initial developer of the original code is David A. Hinds
+ <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ are Copyright © 1999 David A. Hinds. All Rights Reserved.
+
+ Alternatively, the contents of this file may be used under the
+ terms of the GNU General Public License version 2 (the "GPL"), in
+ which case the provisions of the GPL are applicable instead of the
+ above. If you wish to allow the use of your version of this file
+ only under the terms of the GPL and not to allow others to use
+ your version of this file under the MPL, indicate your decision
+ by deleting the provisions above and replace them with the notice
+ and other provisions required by the GPL. If you do not delete
+ the provisions above, a recipient may use your version of this
+ file under either the MPL or the GPL.
+
+ LEGAL NOTE: The FTL format is patented by M-Systems. They have
+ granted a license for its use with PCMCIA devices:
+
+ "M-Systems grants a royalty-free, non-exclusive license under
+ any presently existing M-Systems intellectual property rights
+ necessary for the design and development of FTL-compatible
+ drivers, file systems and utilities using the data formats with
+ PCMCIA PC Cards as described in the PCMCIA Flash Translation
+ Layer (FTL) Specification."
+
+ Use of the FTL format for non-PCMCIA applications may be an
+ infringement of these patents. For additional information,
+ contact M-Systems directly. M-Systems since acquired by Sandisk.
+
+======================================================================*/
+#include <linux/mtd/blktrans.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+/*#define PSYCHO_DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/major.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/hdreg.h>
+#include <linux/vmalloc.h>
+#include <linux/blkpg.h>
+#include <linux/uaccess.h>
+
+#include <linux/mtd/ftl.h>
+
+/*====================================================================*/
+
+/* Parameters that can be set with 'insmod' */
+static int shuffle_freq = 50;
+module_param(shuffle_freq, int, 0);
+
+/*====================================================================*/
+
+/* Major device # for FTL device */
+#ifndef FTL_MAJOR
+#define FTL_MAJOR 44
+#endif
+
+
+/*====================================================================*/
+
+/* Maximum number of separate memory devices we'll allow */
+#define MAX_DEV 4
+
+/* Maximum number of regions per device */
+#define MAX_REGION 4
+
+/* Maximum number of partitions in an FTL region */
+#define PART_BITS 4
+
+/* Maximum number of outstanding erase requests per socket */
+#define MAX_ERASE 8
+
+/* Sector size -- shouldn't need to change */
+#define SECTOR_SIZE 512
+
+
+/* Each memory region corresponds to a minor device */
+typedef struct partition_t {
+ struct mtd_blktrans_dev mbd;
+ uint32_t state;
+ uint32_t *VirtualBlockMap;
+ uint32_t FreeTotal;
+ struct eun_info_t {
+ uint32_t Offset;
+ uint32_t EraseCount;
+ uint32_t Free;
+ uint32_t Deleted;
+ } *EUNInfo;
+ struct xfer_info_t {
+ uint32_t Offset;
+ uint32_t EraseCount;
+ uint16_t state;
+ } *XferInfo;
+ uint16_t bam_index;
+ uint32_t *bam_cache;
+ uint16_t DataUnits;
+ uint32_t BlocksPerUnit;
+ erase_unit_header_t header;
+} partition_t;
+
+/* Partition state flags */
+#define FTL_FORMATTED 0x01
+
+/* Transfer unit states */
+#define XFER_UNKNOWN 0x00
+#define XFER_ERASING 0x01
+#define XFER_ERASED 0x02
+#define XFER_PREPARED 0x03
+#define XFER_FAILED 0x04
+
+/*======================================================================
+
+ Scan_header() checks to see if a memory region contains an FTL
+ partition. build_maps() reads all the erase unit headers, builds
+ the erase unit map, and then builds the virtual page map.
+
+======================================================================*/
+
+static int scan_header(partition_t *part)
+{
+ erase_unit_header_t header;
+ loff_t offset, max_offset;
+ size_t ret;
+ int err;
+ part->header.FormattedSize = 0;
+ max_offset = (0x100000<part->mbd.mtd->size)?0x100000:part->mbd.mtd->size;
+ /* Search first megabyte for a valid FTL header */
+ for (offset = 0;
+ (offset + sizeof(header)) < max_offset;
+ offset += part->mbd.mtd->erasesize ? : 0x2000) {
+
+ err = mtd_read(part->mbd.mtd, offset, sizeof(header), &ret,
+ (unsigned char *)&header);
+
+ if (err)
+ return err;
+
+ if (strcmp(header.DataOrgTuple+3, "FTL100") == 0) break;
+ }
+
+ if (offset == max_offset) {
+ printk(KERN_NOTICE "ftl_cs: FTL header not found.\n");
+ return -ENOENT;
+ }
+ if (header.BlockSize != 9 ||
+ (header.EraseUnitSize < 10) || (header.EraseUnitSize > 31) ||
+ (header.NumTransferUnits >= le16_to_cpu(header.NumEraseUnits))) {
+ printk(KERN_NOTICE "ftl_cs: FTL header corrupt!\n");
+ return -1;
+ }
+ if ((1 << header.EraseUnitSize) != part->mbd.mtd->erasesize) {
+ printk(KERN_NOTICE "ftl: FTL EraseUnitSize %x != MTD erasesize %x\n",
+ 1 << header.EraseUnitSize,part->mbd.mtd->erasesize);
+ return -1;
+ }
+ part->header = header;
+ return 0;
+}
+
+static int build_maps(partition_t *part)
+{
+ erase_unit_header_t header;
+ uint16_t xvalid, xtrans, i;
+ unsigned blocks, j;
+ int hdr_ok, ret = -1;
+ ssize_t retval;
+ loff_t offset;
+
+ /* Set up erase unit maps */
+ part->DataUnits = le16_to_cpu(part->header.NumEraseUnits) -
+ part->header.NumTransferUnits;
+ part->EUNInfo = kmalloc_array(part->DataUnits, sizeof(struct eun_info_t),
+ GFP_KERNEL);
+ if (!part->EUNInfo)
+ goto out;
+ for (i = 0; i < part->DataUnits; i++)
+ part->EUNInfo[i].Offset = 0xffffffff;
+ part->XferInfo =
+ kmalloc_array(part->header.NumTransferUnits,
+ sizeof(struct xfer_info_t),
+ GFP_KERNEL);
+ if (!part->XferInfo)
+ goto out_EUNInfo;
+
+ xvalid = xtrans = 0;
+ for (i = 0; i < le16_to_cpu(part->header.NumEraseUnits); i++) {
+ offset = ((i + le16_to_cpu(part->header.FirstPhysicalEUN))
+ << part->header.EraseUnitSize);
+ ret = mtd_read(part->mbd.mtd, offset, sizeof(header), &retval,
+ (unsigned char *)&header);
+
+ if (ret)
+ goto out_XferInfo;
+
+ ret = -1;
+ /* Is this a transfer partition? */
+ hdr_ok = (strcmp(header.DataOrgTuple+3, "FTL100") == 0);
+ if (hdr_ok && (le16_to_cpu(header.LogicalEUN) < part->DataUnits) &&
+ (part->EUNInfo[le16_to_cpu(header.LogicalEUN)].Offset == 0xffffffff)) {
+ part->EUNInfo[le16_to_cpu(header.LogicalEUN)].Offset = offset;
+ part->EUNInfo[le16_to_cpu(header.LogicalEUN)].EraseCount =
+ le32_to_cpu(header.EraseCount);
+ xvalid++;
+ } else {
+ if (xtrans == part->header.NumTransferUnits) {
+ printk(KERN_NOTICE "ftl_cs: format error: too many "
+ "transfer units!\n");
+ goto out_XferInfo;
+ }
+ if (hdr_ok && (le16_to_cpu(header.LogicalEUN) == 0xffff)) {
+ part->XferInfo[xtrans].state = XFER_PREPARED;
+ part->XferInfo[xtrans].EraseCount = le32_to_cpu(header.EraseCount);
+ } else {
+ part->XferInfo[xtrans].state = XFER_UNKNOWN;
+ /* Pick anything reasonable for the erase count */
+ part->XferInfo[xtrans].EraseCount =
+ le32_to_cpu(part->header.EraseCount);
+ }
+ part->XferInfo[xtrans].Offset = offset;
+ xtrans++;
+ }
+ }
+ /* Check for format trouble */
+ header = part->header;
+ if ((xtrans != header.NumTransferUnits) ||
+ (xvalid+xtrans != le16_to_cpu(header.NumEraseUnits))) {
+ printk(KERN_NOTICE "ftl_cs: format error: erase units "
+ "don't add up!\n");
+ goto out_XferInfo;
+ }
+
+ /* Set up virtual page map */
+ blocks = le32_to_cpu(header.FormattedSize) >> header.BlockSize;
+ part->VirtualBlockMap = vmalloc(array_size(blocks, sizeof(uint32_t)));
+ if (!part->VirtualBlockMap)
+ goto out_XferInfo;
+
+ memset(part->VirtualBlockMap, 0xff, blocks * sizeof(uint32_t));
+ part->BlocksPerUnit = (1 << header.EraseUnitSize) >> header.BlockSize;
+
+ part->bam_cache = kmalloc_array(part->BlocksPerUnit, sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!part->bam_cache)
+ goto out_VirtualBlockMap;
+
+ part->bam_index = 0xffff;
+ part->FreeTotal = 0;
+
+ for (i = 0; i < part->DataUnits; i++) {
+ part->EUNInfo[i].Free = 0;
+ part->EUNInfo[i].Deleted = 0;
+ offset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset);
+
+ ret = mtd_read(part->mbd.mtd, offset,
+ part->BlocksPerUnit * sizeof(uint32_t), &retval,
+ (unsigned char *)part->bam_cache);
+
+ if (ret)
+ goto out_bam_cache;
+
+ for (j = 0; j < part->BlocksPerUnit; j++) {
+ if (BLOCK_FREE(le32_to_cpu(part->bam_cache[j]))) {
+ part->EUNInfo[i].Free++;
+ part->FreeTotal++;
+ } else if ((BLOCK_TYPE(le32_to_cpu(part->bam_cache[j])) == BLOCK_DATA) &&
+ (BLOCK_NUMBER(le32_to_cpu(part->bam_cache[j])) < blocks))
+ part->VirtualBlockMap[BLOCK_NUMBER(le32_to_cpu(part->bam_cache[j]))] =
+ (i << header.EraseUnitSize) + (j << header.BlockSize);
+ else if (BLOCK_DELETED(le32_to_cpu(part->bam_cache[j])))
+ part->EUNInfo[i].Deleted++;
+ }
+ }
+
+ ret = 0;
+ goto out;
+
+out_bam_cache:
+ kfree(part->bam_cache);
+out_VirtualBlockMap:
+ vfree(part->VirtualBlockMap);
+out_XferInfo:
+ kfree(part->XferInfo);
+out_EUNInfo:
+ kfree(part->EUNInfo);
+out:
+ return ret;
+} /* build_maps */
+
+/*======================================================================
+
+ Erase_xfer() schedules an asynchronous erase operation for a
+ transfer unit.
+
+======================================================================*/
+
+static int erase_xfer(partition_t *part,
+ uint16_t xfernum)
+{
+ int ret;
+ struct xfer_info_t *xfer;
+ struct erase_info *erase;
+
+ xfer = &part->XferInfo[xfernum];
+ pr_debug("ftl_cs: erasing xfer unit at 0x%x\n", xfer->Offset);
+ xfer->state = XFER_ERASING;
+
+ /* Is there a free erase slot? Always in MTD. */
+
+
+ erase=kmalloc(sizeof(struct erase_info), GFP_KERNEL);
+ if (!erase)
+ return -ENOMEM;
+
+ erase->addr = xfer->Offset;
+ erase->len = 1 << part->header.EraseUnitSize;
+
+ ret = mtd_erase(part->mbd.mtd, erase);
+ if (!ret) {
+ xfer->state = XFER_ERASED;
+ xfer->EraseCount++;
+ } else {
+ xfer->state = XFER_FAILED;
+ pr_notice("ftl_cs: erase failed: err = %d\n", ret);
+ }
+
+ kfree(erase);
+
+ return ret;
+} /* erase_xfer */
+
+/*======================================================================
+
+ Prepare_xfer() takes a freshly erased transfer unit and gives
+ it an appropriate header.
+
+======================================================================*/
+
+static int prepare_xfer(partition_t *part, int i)
+{
+ erase_unit_header_t header;
+ struct xfer_info_t *xfer;
+ int nbam, ret;
+ uint32_t ctl;
+ ssize_t retlen;
+ loff_t offset;
+
+ xfer = &part->XferInfo[i];
+ xfer->state = XFER_FAILED;
+
+ pr_debug("ftl_cs: preparing xfer unit at 0x%x\n", xfer->Offset);
+
+ /* Write the transfer unit header */
+ header = part->header;
+ header.LogicalEUN = cpu_to_le16(0xffff);
+ header.EraseCount = cpu_to_le32(xfer->EraseCount);
+
+ ret = mtd_write(part->mbd.mtd, xfer->Offset, sizeof(header), &retlen,
+ (u_char *)&header);
+
+ if (ret) {
+ return ret;
+ }
+
+ /* Write the BAM stub */
+ nbam = DIV_ROUND_UP(part->BlocksPerUnit * sizeof(uint32_t) +
+ le32_to_cpu(part->header.BAMOffset), SECTOR_SIZE);
+
+ offset = xfer->Offset + le32_to_cpu(part->header.BAMOffset);
+ ctl = cpu_to_le32(BLOCK_CONTROL);
+
+ for (i = 0; i < nbam; i++, offset += sizeof(uint32_t)) {
+
+ ret = mtd_write(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
+ (u_char *)&ctl);
+
+ if (ret)
+ return ret;
+ }
+ xfer->state = XFER_PREPARED;
+ return 0;
+
+} /* prepare_xfer */
+
+/*======================================================================
+
+ Copy_erase_unit() takes a full erase block and a transfer unit,
+ copies everything to the transfer unit, then swaps the block
+ pointers.
+
+ All data blocks are copied to the corresponding blocks in the
+ target unit, so the virtual block map does not need to be
+ updated.
+
+======================================================================*/
+
+static int copy_erase_unit(partition_t *part, uint16_t srcunit,
+ uint16_t xferunit)
+{
+ u_char buf[SECTOR_SIZE];
+ struct eun_info_t *eun;
+ struct xfer_info_t *xfer;
+ uint32_t src, dest, free, i;
+ uint16_t unit;
+ int ret;
+ ssize_t retlen;
+ loff_t offset;
+ uint16_t srcunitswap = cpu_to_le16(srcunit);
+
+ eun = &part->EUNInfo[srcunit];
+ xfer = &part->XferInfo[xferunit];
+ pr_debug("ftl_cs: copying block 0x%x to 0x%x\n",
+ eun->Offset, xfer->Offset);
+
+
+ /* Read current BAM */
+ if (part->bam_index != srcunit) {
+
+ offset = eun->Offset + le32_to_cpu(part->header.BAMOffset);
+
+ ret = mtd_read(part->mbd.mtd, offset,
+ part->BlocksPerUnit * sizeof(uint32_t), &retlen,
+ (u_char *)(part->bam_cache));
+
+ /* mark the cache bad, in case we get an error later */
+ part->bam_index = 0xffff;
+
+ if (ret) {
+ printk( KERN_WARNING "ftl: Failed to read BAM cache in copy_erase_unit()!\n");
+ return ret;
+ }
+ }
+
+ /* Write the LogicalEUN for the transfer unit */
+ xfer->state = XFER_UNKNOWN;
+ offset = xfer->Offset + 20; /* Bad! */
+ unit = cpu_to_le16(0x7fff);
+
+ ret = mtd_write(part->mbd.mtd, offset, sizeof(uint16_t), &retlen,
+ (u_char *)&unit);
+
+ if (ret) {
+ printk( KERN_WARNING "ftl: Failed to write back to BAM cache in copy_erase_unit()!\n");
+ return ret;
+ }
+
+ /* Copy all data blocks from source unit to transfer unit */
+ src = eun->Offset; dest = xfer->Offset;
+
+ free = 0;
+ ret = 0;
+ for (i = 0; i < part->BlocksPerUnit; i++) {
+ switch (BLOCK_TYPE(le32_to_cpu(part->bam_cache[i]))) {
+ case BLOCK_CONTROL:
+ /* This gets updated later */
+ break;
+ case BLOCK_DATA:
+ case BLOCK_REPLACEMENT:
+ ret = mtd_read(part->mbd.mtd, src, SECTOR_SIZE, &retlen,
+ (u_char *)buf);
+ if (ret) {
+ printk(KERN_WARNING "ftl: Error reading old xfer unit in copy_erase_unit\n");
+ return ret;
+ }
+
+
+ ret = mtd_write(part->mbd.mtd, dest, SECTOR_SIZE, &retlen,
+ (u_char *)buf);
+ if (ret) {
+ printk(KERN_WARNING "ftl: Error writing new xfer unit in copy_erase_unit\n");
+ return ret;
+ }
+
+ break;
+ default:
+ /* All other blocks must be free */
+ part->bam_cache[i] = cpu_to_le32(0xffffffff);
+ free++;
+ break;
+ }
+ src += SECTOR_SIZE;
+ dest += SECTOR_SIZE;
+ }
+
+ /* Write the BAM to the transfer unit */
+ ret = mtd_write(part->mbd.mtd,
+ xfer->Offset + le32_to_cpu(part->header.BAMOffset),
+ part->BlocksPerUnit * sizeof(int32_t),
+ &retlen,
+ (u_char *)part->bam_cache);
+ if (ret) {
+ printk( KERN_WARNING "ftl: Error writing BAM in copy_erase_unit\n");
+ return ret;
+ }
+
+
+ /* All clear? Then update the LogicalEUN again */
+ ret = mtd_write(part->mbd.mtd, xfer->Offset + 20, sizeof(uint16_t),
+ &retlen, (u_char *)&srcunitswap);
+
+ if (ret) {
+ printk(KERN_WARNING "ftl: Error writing new LogicalEUN in copy_erase_unit\n");
+ return ret;
+ }
+
+
+ /* Update the maps and usage stats*/
+ swap(xfer->EraseCount, eun->EraseCount);
+ swap(xfer->Offset, eun->Offset);
+ part->FreeTotal -= eun->Free;
+ part->FreeTotal += free;
+ eun->Free = free;
+ eun->Deleted = 0;
+
+ /* Now, the cache should be valid for the new block */
+ part->bam_index = srcunit;
+
+ return 0;
+} /* copy_erase_unit */
+
+/*======================================================================
+
+ reclaim_block() picks a full erase unit and a transfer unit and
+ then calls copy_erase_unit() to copy one to the other. Then, it
+ schedules an erase on the expired block.
+
+ What's a good way to decide which transfer unit and which erase
+ unit to use? Beats me. My way is to always pick the transfer
+ unit with the fewest erases, and usually pick the data unit with
+ the most deleted blocks. But with a small probability, pick the
+ oldest data unit instead. This means that we generally postpone
+ the next reclamation as long as possible, but shuffle static
+ stuff around a bit for wear leveling.
+
+======================================================================*/
+
+static int reclaim_block(partition_t *part)
+{
+ uint16_t i, eun, xfer;
+ uint32_t best;
+ int queued, ret;
+
+ pr_debug("ftl_cs: reclaiming space...\n");
+ pr_debug("NumTransferUnits == %x\n", part->header.NumTransferUnits);
+ /* Pick the least erased transfer unit */
+ best = 0xffffffff; xfer = 0xffff;
+ do {
+ queued = 0;
+ for (i = 0; i < part->header.NumTransferUnits; i++) {
+ int n=0;
+ if (part->XferInfo[i].state == XFER_UNKNOWN) {
+ pr_debug("XferInfo[%d].state == XFER_UNKNOWN\n",i);
+ n=1;
+ erase_xfer(part, i);
+ }
+ if (part->XferInfo[i].state == XFER_ERASING) {
+ pr_debug("XferInfo[%d].state == XFER_ERASING\n",i);
+ n=1;
+ queued = 1;
+ }
+ else if (part->XferInfo[i].state == XFER_ERASED) {
+ pr_debug("XferInfo[%d].state == XFER_ERASED\n",i);
+ n=1;
+ prepare_xfer(part, i);
+ }
+ if (part->XferInfo[i].state == XFER_PREPARED) {
+ pr_debug("XferInfo[%d].state == XFER_PREPARED\n",i);
+ n=1;
+ if (part->XferInfo[i].EraseCount <= best) {
+ best = part->XferInfo[i].EraseCount;
+ xfer = i;
+ }
+ }
+ if (!n)
+ pr_debug("XferInfo[%d].state == %x\n",i, part->XferInfo[i].state);
+
+ }
+ if (xfer == 0xffff) {
+ if (queued) {
+ pr_debug("ftl_cs: waiting for transfer "
+ "unit to be prepared...\n");
+ mtd_sync(part->mbd.mtd);
+ } else {
+ static int ne = 0;
+ if (++ne < 5)
+ printk(KERN_NOTICE "ftl_cs: reclaim failed: no "
+ "suitable transfer units!\n");
+ else
+ pr_debug("ftl_cs: reclaim failed: no "
+ "suitable transfer units!\n");
+
+ return -EIO;
+ }
+ }
+ } while (xfer == 0xffff);
+
+ eun = 0;
+ if ((jiffies % shuffle_freq) == 0) {
+ pr_debug("ftl_cs: recycling freshest block...\n");
+ best = 0xffffffff;
+ for (i = 0; i < part->DataUnits; i++)
+ if (part->EUNInfo[i].EraseCount <= best) {
+ best = part->EUNInfo[i].EraseCount;
+ eun = i;
+ }
+ } else {
+ best = 0;
+ for (i = 0; i < part->DataUnits; i++)
+ if (part->EUNInfo[i].Deleted >= best) {
+ best = part->EUNInfo[i].Deleted;
+ eun = i;
+ }
+ if (best == 0) {
+ static int ne = 0;
+ if (++ne < 5)
+ printk(KERN_NOTICE "ftl_cs: reclaim failed: "
+ "no free blocks!\n");
+ else
+ pr_debug("ftl_cs: reclaim failed: "
+ "no free blocks!\n");
+
+ return -EIO;
+ }
+ }
+ ret = copy_erase_unit(part, eun, xfer);
+ if (!ret)
+ erase_xfer(part, xfer);
+ else
+ printk(KERN_NOTICE "ftl_cs: copy_erase_unit failed!\n");
+ return ret;
+} /* reclaim_block */
+
+/*======================================================================
+
+ Find_free() searches for a free block. If necessary, it updates
+ the BAM cache for the erase unit containing the free block. It
+ returns the block index -- the erase unit is just the currently
+ cached unit. If there are no free blocks, it returns 0 -- this
+ is never a valid data block because it contains the header.
+
+======================================================================*/
+
+#ifdef PSYCHO_DEBUG
+static void dump_lists(partition_t *part)
+{
+ int i;
+ printk(KERN_DEBUG "ftl_cs: Free total = %d\n", part->FreeTotal);
+ for (i = 0; i < part->DataUnits; i++)
+ printk(KERN_DEBUG "ftl_cs: unit %d: %d phys, %d free, "
+ "%d deleted\n", i,
+ part->EUNInfo[i].Offset >> part->header.EraseUnitSize,
+ part->EUNInfo[i].Free, part->EUNInfo[i].Deleted);
+}
+#endif
+
+static uint32_t find_free(partition_t *part)
+{
+ uint16_t stop, eun;
+ uint32_t blk;
+ size_t retlen;
+ int ret;
+
+ /* Find an erase unit with some free space */
+ stop = (part->bam_index == 0xffff) ? 0 : part->bam_index;
+ eun = stop;
+ do {
+ if (part->EUNInfo[eun].Free != 0) break;
+ /* Wrap around at end of table */
+ if (++eun == part->DataUnits) eun = 0;
+ } while (eun != stop);
+
+ if (part->EUNInfo[eun].Free == 0)
+ return 0;
+
+ /* Is this unit's BAM cached? */
+ if (eun != part->bam_index) {
+ /* Invalidate cache */
+ part->bam_index = 0xffff;
+
+ ret = mtd_read(part->mbd.mtd,
+ part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset),
+ part->BlocksPerUnit * sizeof(uint32_t),
+ &retlen,
+ (u_char *)(part->bam_cache));
+
+ if (ret) {
+ printk(KERN_WARNING"ftl: Error reading BAM in find_free\n");
+ return 0;
+ }
+ part->bam_index = eun;
+ }
+
+ /* Find a free block */
+ for (blk = 0; blk < part->BlocksPerUnit; blk++)
+ if (BLOCK_FREE(le32_to_cpu(part->bam_cache[blk]))) break;
+ if (blk == part->BlocksPerUnit) {
+#ifdef PSYCHO_DEBUG
+ static int ne = 0;
+ if (++ne == 1)
+ dump_lists(part);
+#endif
+ printk(KERN_NOTICE "ftl_cs: bad free list!\n");
+ return 0;
+ }
+ pr_debug("ftl_cs: found free block at %d in %d\n", blk, eun);
+ return blk;
+
+} /* find_free */
+
+
+/*======================================================================
+
+ Read a series of sectors from an FTL partition.
+
+======================================================================*/
+
+static int ftl_read(partition_t *part, caddr_t buffer,
+ u_long sector, u_long nblocks)
+{
+ uint32_t log_addr, bsize;
+ u_long i;
+ int ret;
+ size_t offset, retlen;
+
+ pr_debug("ftl_cs: ftl_read(0x%p, 0x%lx, %ld)\n",
+ part, sector, nblocks);
+ if (!(part->state & FTL_FORMATTED)) {
+ printk(KERN_NOTICE "ftl_cs: bad partition\n");
+ return -EIO;
+ }
+ bsize = 1 << part->header.EraseUnitSize;
+
+ for (i = 0; i < nblocks; i++) {
+ if (((sector+i) * SECTOR_SIZE) >= le32_to_cpu(part->header.FormattedSize)) {
+ printk(KERN_NOTICE "ftl_cs: bad read offset\n");
+ return -EIO;
+ }
+ log_addr = part->VirtualBlockMap[sector+i];
+ if (log_addr == 0xffffffff)
+ memset(buffer, 0, SECTOR_SIZE);
+ else {
+ offset = (part->EUNInfo[log_addr / bsize].Offset
+ + (log_addr % bsize));
+ ret = mtd_read(part->mbd.mtd, offset, SECTOR_SIZE, &retlen,
+ (u_char *)buffer);
+
+ if (ret) {
+ printk(KERN_WARNING "Error reading MTD device in ftl_read()\n");
+ return ret;
+ }
+ }
+ buffer += SECTOR_SIZE;
+ }
+ return 0;
+} /* ftl_read */
+
+/*======================================================================
+
+ Write a series of sectors to an FTL partition
+
+======================================================================*/
+
+static int set_bam_entry(partition_t *part, uint32_t log_addr,
+ uint32_t virt_addr)
+{
+ uint32_t bsize, blk, le_virt_addr;
+#ifdef PSYCHO_DEBUG
+ uint32_t old_addr;
+#endif
+ uint16_t eun;
+ int ret;
+ size_t retlen, offset;
+
+ pr_debug("ftl_cs: set_bam_entry(0x%p, 0x%x, 0x%x)\n",
+ part, log_addr, virt_addr);
+ bsize = 1 << part->header.EraseUnitSize;
+ eun = log_addr / bsize;
+ blk = (log_addr % bsize) / SECTOR_SIZE;
+ offset = (part->EUNInfo[eun].Offset + blk * sizeof(uint32_t) +
+ le32_to_cpu(part->header.BAMOffset));
+
+#ifdef PSYCHO_DEBUG
+ ret = mtd_read(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
+ (u_char *)&old_addr);
+ if (ret) {
+ printk(KERN_WARNING"ftl: Error reading old_addr in set_bam_entry: %d\n",ret);
+ return ret;
+ }
+ old_addr = le32_to_cpu(old_addr);
+
+ if (((virt_addr == 0xfffffffe) && !BLOCK_FREE(old_addr)) ||
+ ((virt_addr == 0) && (BLOCK_TYPE(old_addr) != BLOCK_DATA)) ||
+ (!BLOCK_DELETED(virt_addr) && (old_addr != 0xfffffffe))) {
+ static int ne = 0;
+ if (++ne < 5) {
+ printk(KERN_NOTICE "ftl_cs: set_bam_entry() inconsistency!\n");
+ printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, old = 0x%x"
+ ", new = 0x%x\n", log_addr, old_addr, virt_addr);
+ }
+ return -EIO;
+ }
+#endif
+ le_virt_addr = cpu_to_le32(virt_addr);
+ if (part->bam_index == eun) {
+#ifdef PSYCHO_DEBUG
+ if (le32_to_cpu(part->bam_cache[blk]) != old_addr) {
+ static int ne = 0;
+ if (++ne < 5) {
+ printk(KERN_NOTICE "ftl_cs: set_bam_entry() "
+ "inconsistency!\n");
+ printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, cache"
+ " = 0x%x\n",
+ le32_to_cpu(part->bam_cache[blk]), old_addr);
+ }
+ return -EIO;
+ }
+#endif
+ part->bam_cache[blk] = le_virt_addr;
+ }
+ ret = mtd_write(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
+ (u_char *)&le_virt_addr);
+
+ if (ret) {
+ printk(KERN_NOTICE "ftl_cs: set_bam_entry() failed!\n");
+ printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, new = 0x%x\n",
+ log_addr, virt_addr);
+ }
+ return ret;
+} /* set_bam_entry */
+
+static int ftl_write(partition_t *part, caddr_t buffer,
+ u_long sector, u_long nblocks)
+{
+ uint32_t bsize, log_addr, virt_addr, old_addr, blk;
+ u_long i;
+ int ret;
+ size_t retlen, offset;
+
+ pr_debug("ftl_cs: ftl_write(0x%p, %ld, %ld)\n",
+ part, sector, nblocks);
+ if (!(part->state & FTL_FORMATTED)) {
+ printk(KERN_NOTICE "ftl_cs: bad partition\n");
+ return -EIO;
+ }
+ /* See if we need to reclaim space, before we start */
+ while (part->FreeTotal < nblocks) {
+ ret = reclaim_block(part);
+ if (ret)
+ return ret;
+ }
+
+ bsize = 1 << part->header.EraseUnitSize;
+
+ virt_addr = sector * SECTOR_SIZE | BLOCK_DATA;
+ for (i = 0; i < nblocks; i++) {
+ if (virt_addr >= le32_to_cpu(part->header.FormattedSize)) {
+ printk(KERN_NOTICE "ftl_cs: bad write offset\n");
+ return -EIO;
+ }
+
+ /* Grab a free block */
+ blk = find_free(part);
+ if (blk == 0) {
+ static int ne = 0;
+ if (++ne < 5)
+ printk(KERN_NOTICE "ftl_cs: internal error: "
+ "no free blocks!\n");
+ return -ENOSPC;
+ }
+
+ /* Tag the BAM entry, and write the new block */
+ log_addr = part->bam_index * bsize + blk * SECTOR_SIZE;
+ part->EUNInfo[part->bam_index].Free--;
+ part->FreeTotal--;
+ if (set_bam_entry(part, log_addr, 0xfffffffe))
+ return -EIO;
+ part->EUNInfo[part->bam_index].Deleted++;
+ offset = (part->EUNInfo[part->bam_index].Offset +
+ blk * SECTOR_SIZE);
+ ret = mtd_write(part->mbd.mtd, offset, SECTOR_SIZE, &retlen, buffer);
+
+ if (ret) {
+ printk(KERN_NOTICE "ftl_cs: block write failed!\n");
+ printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, virt_addr"
+ " = 0x%x, Offset = 0x%zx\n", log_addr, virt_addr,
+ offset);
+ return -EIO;
+ }
+
+ /* Only delete the old entry when the new entry is ready */
+ old_addr = part->VirtualBlockMap[sector+i];
+ if (old_addr != 0xffffffff) {
+ part->VirtualBlockMap[sector+i] = 0xffffffff;
+ part->EUNInfo[old_addr/bsize].Deleted++;
+ if (set_bam_entry(part, old_addr, 0))
+ return -EIO;
+ }
+
+ /* Finally, set up the new pointers */
+ if (set_bam_entry(part, log_addr, virt_addr))
+ return -EIO;
+ part->VirtualBlockMap[sector+i] = log_addr;
+ part->EUNInfo[part->bam_index].Deleted--;
+
+ buffer += SECTOR_SIZE;
+ virt_addr += SECTOR_SIZE;
+ }
+ return 0;
+} /* ftl_write */
+
+static int ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
+{
+ partition_t *part = container_of(dev, struct partition_t, mbd);
+ u_long sect;
+
+ /* Sort of arbitrary: round size down to 4KiB boundary */
+ sect = le32_to_cpu(part->header.FormattedSize)/SECTOR_SIZE;
+
+ geo->heads = 1;
+ geo->sectors = 8;
+ geo->cylinders = sect >> 3;
+
+ return 0;
+}
+
+static int ftl_readsect(struct mtd_blktrans_dev *dev,
+ unsigned long block, char *buf)
+{
+ return ftl_read((void *)dev, buf, block, 1);
+}
+
+static int ftl_writesect(struct mtd_blktrans_dev *dev,
+ unsigned long block, char *buf)
+{
+ return ftl_write((void *)dev, buf, block, 1);
+}
+
+static int ftl_discardsect(struct mtd_blktrans_dev *dev,
+ unsigned long sector, unsigned nr_sects)
+{
+ partition_t *part = container_of(dev, struct partition_t, mbd);
+ uint32_t bsize = 1 << part->header.EraseUnitSize;
+
+ pr_debug("FTL erase sector %ld for %d sectors\n",
+ sector, nr_sects);
+
+ while (nr_sects) {
+ uint32_t old_addr = part->VirtualBlockMap[sector];
+ if (old_addr != 0xffffffff) {
+ part->VirtualBlockMap[sector] = 0xffffffff;
+ part->EUNInfo[old_addr/bsize].Deleted++;
+ if (set_bam_entry(part, old_addr, 0))
+ return -EIO;
+ }
+ nr_sects--;
+ sector++;
+ }
+
+ return 0;
+}
+/*====================================================================*/
+
+static void ftl_freepart(partition_t *part)
+{
+ vfree(part->VirtualBlockMap);
+ part->VirtualBlockMap = NULL;
+ kfree(part->EUNInfo);
+ part->EUNInfo = NULL;
+ kfree(part->XferInfo);
+ part->XferInfo = NULL;
+ kfree(part->bam_cache);
+ part->bam_cache = NULL;
+} /* ftl_freepart */
+
+static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+{
+ partition_t *partition;
+
+ partition = kzalloc(sizeof(partition_t), GFP_KERNEL);
+
+ if (!partition) {
+ printk(KERN_WARNING "No memory to scan for FTL on %s\n",
+ mtd->name);
+ return;
+ }
+
+ partition->mbd.mtd = mtd;
+
+ if ((scan_header(partition) == 0) &&
+ (build_maps(partition) == 0)) {
+
+ partition->state = FTL_FORMATTED;
+#ifdef PCMCIA_DEBUG
+ printk(KERN_INFO "ftl_cs: opening %d KiB FTL partition\n",
+ le32_to_cpu(partition->header.FormattedSize) >> 10);
+#endif
+ partition->mbd.size = le32_to_cpu(partition->header.FormattedSize) >> 9;
+
+ partition->mbd.tr = tr;
+ partition->mbd.devnum = -1;
+ if (!add_mtd_blktrans_dev(&partition->mbd))
+ return;
+ }
+
+ kfree(partition);
+}
+
+static void ftl_remove_dev(struct mtd_blktrans_dev *dev)
+{
+ del_mtd_blktrans_dev(dev);
+ ftl_freepart((partition_t *)dev);
+}
+
+static struct mtd_blktrans_ops ftl_tr = {
+ .name = "ftl",
+ .major = FTL_MAJOR,
+ .part_bits = PART_BITS,
+ .blksize = SECTOR_SIZE,
+ .readsect = ftl_readsect,
+ .writesect = ftl_writesect,
+ .discard = ftl_discardsect,
+ .getgeo = ftl_getgeo,
+ .add_mtd = ftl_add_mtd,
+ .remove_dev = ftl_remove_dev,
+ .owner = THIS_MODULE,
+};
+
+module_mtd_blktrans(ftl_tr);
+
+MODULE_LICENSE("Dual MPL/GPL");
+MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
+MODULE_DESCRIPTION("Support code for Flash Translation Layer, used on PCMCIA devices");
diff --git a/drivers/mtd/hyperbus/Kconfig b/drivers/mtd/hyperbus/Kconfig
new file mode 100644
index 0000000000..30ffc4c16e
--- /dev/null
+++ b/drivers/mtd/hyperbus/Kconfig
@@ -0,0 +1,32 @@
+menuconfig MTD_HYPERBUS
+ tristate "HyperBus support"
+ depends on HAS_IOMEM
+ select MTD_CFI
+ select MTD_MAP_BANK_WIDTH_2
+ select MTD_CFI_AMDSTD
+ select MTD_COMPLEX_MAPPINGS
+ help
+ This is the framework for the HyperBus which can be used by
+ the HyperBus Controller driver to communicate with
+ HyperFlash. See Cypress HyperBus specification for more
+ details
+
+if MTD_HYPERBUS
+
+config HBMC_AM654
+ tristate "HyperBus controller driver for AM65x SoC"
+ depends on ARCH_K3 || COMPILE_TEST
+ select MULTIPLEXER
+ imply MUX_MMIO
+ help
+ This is the driver for HyperBus controller on TI's AM65x and
+ other SoCs
+
+config RPCIF_HYPERBUS
+ tristate "Renesas RPC-IF HyperBus driver"
+ depends on RENESAS_RPCIF
+ depends on MTD_CFI_BE_BYTE_SWAP
+ help
+ This option includes Renesas RPC-IF HyperBus support.
+
+endif # MTD_HYPERBUS
diff --git a/drivers/mtd/hyperbus/Makefile b/drivers/mtd/hyperbus/Makefile
new file mode 100644
index 0000000000..5fc2b51245
--- /dev/null
+++ b/drivers/mtd/hyperbus/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_MTD_HYPERBUS) += hyperbus-core.o
+obj-$(CONFIG_HBMC_AM654) += hbmc-am654.o
+obj-$(CONFIG_RPCIF_HYPERBUS) += rpc-if.o
diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c
new file mode 100644
index 0000000000..a6161ce340
--- /dev/null
+++ b/drivers/mtd/hyperbus/hbmc-am654.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/
+// Author: Vignesh Raghavendra <vigneshr@ti.com>
+
+#include <linux/completion.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/hyperbus.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mux/consumer.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/sched/task_stack.h>
+#include <linux/types.h>
+
+#define AM654_HBMC_CALIB_COUNT 25
+
+struct am654_hbmc_device_priv {
+ struct completion rx_dma_complete;
+ phys_addr_t device_base;
+ struct hyperbus_ctlr *ctlr;
+ struct dma_chan *rx_chan;
+};
+
+struct am654_hbmc_priv {
+ struct hyperbus_ctlr ctlr;
+ struct hyperbus_device hbdev;
+ struct mux_control *mux_ctrl;
+};
+
+static int am654_hbmc_calibrate(struct hyperbus_device *hbdev)
+{
+ struct map_info *map = &hbdev->map;
+ struct cfi_private cfi;
+ int count = AM654_HBMC_CALIB_COUNT;
+ int pass_count = 0;
+ int ret;
+
+ cfi.interleave = 1;
+ cfi.device_type = CFI_DEVICETYPE_X16;
+ cfi_send_gen_cmd(0xF0, 0, 0, map, &cfi, cfi.device_type, NULL);
+ cfi_send_gen_cmd(0x98, 0x55, 0, map, &cfi, cfi.device_type, NULL);
+
+ while (count--) {
+ ret = cfi_qry_present(map, 0, &cfi);
+ if (ret)
+ pass_count++;
+ else
+ pass_count = 0;
+ if (pass_count == 5)
+ break;
+ }
+
+ cfi_qry_mode_off(0, map, &cfi);
+
+ return ret;
+}
+
+static void am654_hbmc_dma_callback(void *param)
+{
+ struct am654_hbmc_device_priv *priv = param;
+
+ complete(&priv->rx_dma_complete);
+}
+
+static int am654_hbmc_dma_read(struct am654_hbmc_device_priv *priv, void *to,
+ unsigned long from, ssize_t len)
+
+{
+ enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ struct dma_chan *rx_chan = priv->rx_chan;
+ struct dma_async_tx_descriptor *tx;
+ dma_addr_t dma_dst, dma_src;
+ dma_cookie_t cookie;
+ int ret;
+
+ if (!priv->rx_chan || !virt_addr_valid(to) || object_is_on_stack(to))
+ return -EINVAL;
+
+ dma_dst = dma_map_single(rx_chan->device->dev, to, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_chan->device->dev, dma_dst)) {
+ dev_dbg(priv->ctlr->dev, "DMA mapping failed\n");
+ return -EIO;
+ }
+
+ dma_src = priv->device_base + from;
+ tx = dmaengine_prep_dma_memcpy(rx_chan, dma_dst, dma_src, len, flags);
+ if (!tx) {
+ dev_err(priv->ctlr->dev, "device_prep_dma_memcpy error\n");
+ ret = -EIO;
+ goto unmap_dma;
+ }
+
+ reinit_completion(&priv->rx_dma_complete);
+ tx->callback = am654_hbmc_dma_callback;
+ tx->callback_param = priv;
+ cookie = dmaengine_submit(tx);
+
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(priv->ctlr->dev, "dma_submit_error %d\n", cookie);
+ goto unmap_dma;
+ }
+
+ dma_async_issue_pending(rx_chan);
+ if (!wait_for_completion_timeout(&priv->rx_dma_complete, msecs_to_jiffies(len + 1000))) {
+ dmaengine_terminate_sync(rx_chan);
+ dev_err(priv->ctlr->dev, "DMA wait_for_completion_timeout\n");
+ ret = -ETIMEDOUT;
+ }
+
+unmap_dma:
+ dma_unmap_single(rx_chan->device->dev, dma_dst, len, DMA_FROM_DEVICE);
+ return ret;
+}
+
+static void am654_hbmc_read(struct hyperbus_device *hbdev, void *to,
+ unsigned long from, ssize_t len)
+{
+ struct am654_hbmc_device_priv *priv = hbdev->priv;
+
+ if (len < SZ_1K || am654_hbmc_dma_read(priv, to, from, len))
+ memcpy_fromio(to, hbdev->map.virt + from, len);
+}
+
+static const struct hyperbus_ops am654_hbmc_ops = {
+ .calibrate = am654_hbmc_calibrate,
+ .copy_from = am654_hbmc_read,
+};
+
+static int am654_hbmc_request_mmap_dma(struct am654_hbmc_device_priv *priv)
+{
+ struct dma_chan *rx_chan;
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ rx_chan = dma_request_chan_by_mask(&mask);
+ if (IS_ERR(rx_chan)) {
+ if (PTR_ERR(rx_chan) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_dbg(priv->ctlr->dev, "No DMA channel available\n");
+ return 0;
+ }
+ priv->rx_chan = rx_chan;
+ init_completion(&priv->rx_dma_complete);
+
+ return 0;
+}
+
+static int am654_hbmc_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct am654_hbmc_device_priv *dev_priv;
+ struct device *dev = &pdev->dev;
+ struct am654_hbmc_priv *priv;
+ struct resource res;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ priv->hbdev.np = of_get_next_child(np, NULL);
+ ret = of_address_to_resource(priv->hbdev.np, 0, &res);
+ if (ret)
+ return ret;
+
+ if (of_property_read_bool(dev->of_node, "mux-controls")) {
+ struct mux_control *control = devm_mux_control_get(dev, NULL);
+
+ if (IS_ERR(control))
+ return PTR_ERR(control);
+
+ ret = mux_control_select(control, 1);
+ if (ret) {
+ dev_err(dev, "Failed to select HBMC mux\n");
+ return ret;
+ }
+ priv->mux_ctrl = control;
+ }
+
+ priv->hbdev.map.size = resource_size(&res);
+ priv->hbdev.map.virt = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(priv->hbdev.map.virt))
+ return PTR_ERR(priv->hbdev.map.virt);
+
+ priv->ctlr.dev = dev;
+ priv->ctlr.ops = &am654_hbmc_ops;
+ priv->hbdev.ctlr = &priv->ctlr;
+
+ dev_priv = devm_kzalloc(dev, sizeof(*dev_priv), GFP_KERNEL);
+ if (!dev_priv) {
+ ret = -ENOMEM;
+ goto disable_mux;
+ }
+
+ priv->hbdev.priv = dev_priv;
+ dev_priv->device_base = res.start;
+ dev_priv->ctlr = &priv->ctlr;
+
+ ret = am654_hbmc_request_mmap_dma(dev_priv);
+ if (ret)
+ goto disable_mux;
+
+ ret = hyperbus_register_device(&priv->hbdev);
+ if (ret) {
+ dev_err(dev, "failed to register controller\n");
+ goto release_dma;
+ }
+
+ return 0;
+release_dma:
+ if (dev_priv->rx_chan)
+ dma_release_channel(dev_priv->rx_chan);
+disable_mux:
+ if (priv->mux_ctrl)
+ mux_control_deselect(priv->mux_ctrl);
+ return ret;
+}
+
+static int am654_hbmc_remove(struct platform_device *pdev)
+{
+ struct am654_hbmc_priv *priv = platform_get_drvdata(pdev);
+ struct am654_hbmc_device_priv *dev_priv = priv->hbdev.priv;
+
+ hyperbus_unregister_device(&priv->hbdev);
+
+ if (priv->mux_ctrl)
+ mux_control_deselect(priv->mux_ctrl);
+
+ if (dev_priv->rx_chan)
+ dma_release_channel(dev_priv->rx_chan);
+
+ return 0;
+}
+
+static const struct of_device_id am654_hbmc_dt_ids[] = {
+ {
+ .compatible = "ti,am654-hbmc",
+ },
+ { /* end of table */ }
+};
+
+MODULE_DEVICE_TABLE(of, am654_hbmc_dt_ids);
+
+static struct platform_driver am654_hbmc_platform_driver = {
+ .probe = am654_hbmc_probe,
+ .remove = am654_hbmc_remove,
+ .driver = {
+ .name = "hbmc-am654",
+ .of_match_table = am654_hbmc_dt_ids,
+ },
+};
+
+module_platform_driver(am654_hbmc_platform_driver);
+
+MODULE_DESCRIPTION("HBMC driver for AM654 SoC");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:hbmc-am654");
+MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");
diff --git a/drivers/mtd/hyperbus/hyperbus-core.c b/drivers/mtd/hyperbus/hyperbus-core.c
new file mode 100644
index 0000000000..4d8047d43e
--- /dev/null
+++ b/drivers/mtd/hyperbus/hyperbus-core.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/
+// Author: Vignesh Raghavendra <vigneshr@ti.com>
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/hyperbus.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
+#include <linux/of.h>
+#include <linux/types.h>
+
+static struct hyperbus_device *map_to_hbdev(struct map_info *map)
+{
+ return container_of(map, struct hyperbus_device, map);
+}
+
+static map_word hyperbus_read16(struct map_info *map, unsigned long addr)
+{
+ struct hyperbus_device *hbdev = map_to_hbdev(map);
+ struct hyperbus_ctlr *ctlr = hbdev->ctlr;
+ map_word read_data;
+
+ read_data.x[0] = ctlr->ops->read16(hbdev, addr);
+
+ return read_data;
+}
+
+static void hyperbus_write16(struct map_info *map, map_word d,
+ unsigned long addr)
+{
+ struct hyperbus_device *hbdev = map_to_hbdev(map);
+ struct hyperbus_ctlr *ctlr = hbdev->ctlr;
+
+ ctlr->ops->write16(hbdev, addr, d.x[0]);
+}
+
+static void hyperbus_copy_from(struct map_info *map, void *to,
+ unsigned long from, ssize_t len)
+{
+ struct hyperbus_device *hbdev = map_to_hbdev(map);
+ struct hyperbus_ctlr *ctlr = hbdev->ctlr;
+
+ ctlr->ops->copy_from(hbdev, to, from, len);
+}
+
+static void hyperbus_copy_to(struct map_info *map, unsigned long to,
+ const void *from, ssize_t len)
+{
+ struct hyperbus_device *hbdev = map_to_hbdev(map);
+ struct hyperbus_ctlr *ctlr = hbdev->ctlr;
+
+ ctlr->ops->copy_to(hbdev, to, from, len);
+}
+
+int hyperbus_register_device(struct hyperbus_device *hbdev)
+{
+ const struct hyperbus_ops *ops;
+ struct hyperbus_ctlr *ctlr;
+ struct device_node *np;
+ struct map_info *map;
+ struct device *dev;
+ int ret;
+
+ if (!hbdev || !hbdev->np || !hbdev->ctlr || !hbdev->ctlr->dev) {
+ pr_err("hyperbus: please fill all the necessary fields!\n");
+ return -EINVAL;
+ }
+
+ np = hbdev->np;
+ ctlr = hbdev->ctlr;
+ if (!of_device_is_compatible(np, "cypress,hyperflash")) {
+ dev_err(ctlr->dev, "\"cypress,hyperflash\" compatible missing\n");
+ return -ENODEV;
+ }
+
+ hbdev->memtype = HYPERFLASH;
+
+ dev = ctlr->dev;
+ map = &hbdev->map;
+ map->name = dev_name(dev);
+ map->bankwidth = 2;
+ map->device_node = np;
+
+ simple_map_init(map);
+ ops = ctlr->ops;
+ if (ops) {
+ if (ops->read16)
+ map->read = hyperbus_read16;
+ if (ops->write16)
+ map->write = hyperbus_write16;
+ if (ops->copy_to)
+ map->copy_to = hyperbus_copy_to;
+ if (ops->copy_from)
+ map->copy_from = hyperbus_copy_from;
+
+ if (ops->calibrate && !ctlr->calibrated) {
+ ret = ops->calibrate(hbdev);
+ if (!ret) {
+ dev_err(dev, "Calibration failed\n");
+ return -ENODEV;
+ }
+ ctlr->calibrated = true;
+ }
+ }
+
+ hbdev->mtd = do_map_probe("cfi_probe", map);
+ if (!hbdev->mtd) {
+ dev_err(dev, "probing of hyperbus device failed\n");
+ return -ENODEV;
+ }
+
+ hbdev->mtd->dev.parent = dev;
+ mtd_set_of_node(hbdev->mtd, np);
+
+ ret = mtd_device_register(hbdev->mtd, NULL, 0);
+ if (ret) {
+ dev_err(dev, "failed to register mtd device\n");
+ map_destroy(hbdev->mtd);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hyperbus_register_device);
+
+void hyperbus_unregister_device(struct hyperbus_device *hbdev)
+{
+ if (hbdev && hbdev->mtd) {
+ WARN_ON(mtd_device_unregister(hbdev->mtd));
+ map_destroy(hbdev->mtd);
+ }
+}
+EXPORT_SYMBOL_GPL(hyperbus_unregister_device);
+
+MODULE_DESCRIPTION("HyperBus Framework");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");
diff --git a/drivers/mtd/hyperbus/rpc-if.c b/drivers/mtd/hyperbus/rpc-if.c
new file mode 100644
index 0000000000..ef32fca5f7
--- /dev/null
+++ b/drivers/mtd/hyperbus/rpc-if.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Linux driver for RPC-IF HyperFlash
+ *
+ * Copyright (C) 2019-2020 Cogent Embedded, Inc.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/hyperbus.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mux/consumer.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include <memory/renesas-rpc-if.h>
+
+struct rpcif_hyperbus {
+ struct rpcif rpc;
+ struct hyperbus_ctlr ctlr;
+ struct hyperbus_device hbdev;
+};
+
+static const struct rpcif_op rpcif_op_tmpl = {
+ .cmd = {
+ .buswidth = 8,
+ .ddr = true,
+ },
+ .ocmd = {
+ .buswidth = 8,
+ .ddr = true,
+ },
+ .addr = {
+ .nbytes = 1,
+ .buswidth = 8,
+ .ddr = true,
+ },
+ .data = {
+ .buswidth = 8,
+ .ddr = true,
+ },
+};
+
+static void rpcif_hb_prepare_read(struct rpcif *rpc, void *to,
+ unsigned long from, ssize_t len)
+{
+ struct rpcif_op op = rpcif_op_tmpl;
+
+ op.cmd.opcode = HYPERBUS_RW_READ | HYPERBUS_AS_MEM;
+ op.addr.val = from >> 1;
+ op.dummy.buswidth = 1;
+ op.dummy.ncycles = 15;
+ op.data.dir = RPCIF_DATA_IN;
+ op.data.nbytes = len;
+ op.data.buf.in = to;
+
+ rpcif_prepare(rpc->dev, &op, NULL, NULL);
+}
+
+static void rpcif_hb_prepare_write(struct rpcif *rpc, unsigned long to,
+ void *from, ssize_t len)
+{
+ struct rpcif_op op = rpcif_op_tmpl;
+
+ op.cmd.opcode = HYPERBUS_RW_WRITE | HYPERBUS_AS_MEM;
+ op.addr.val = to >> 1;
+ op.data.dir = RPCIF_DATA_OUT;
+ op.data.nbytes = len;
+ op.data.buf.out = from;
+
+ rpcif_prepare(rpc->dev, &op, NULL, NULL);
+}
+
+static u16 rpcif_hb_read16(struct hyperbus_device *hbdev, unsigned long addr)
+{
+ struct rpcif_hyperbus *hyperbus =
+ container_of(hbdev, struct rpcif_hyperbus, hbdev);
+ map_word data;
+
+ rpcif_hb_prepare_read(&hyperbus->rpc, &data, addr, 2);
+
+ rpcif_manual_xfer(hyperbus->rpc.dev);
+
+ return data.x[0];
+}
+
+static void rpcif_hb_write16(struct hyperbus_device *hbdev, unsigned long addr,
+ u16 data)
+{
+ struct rpcif_hyperbus *hyperbus =
+ container_of(hbdev, struct rpcif_hyperbus, hbdev);
+
+ rpcif_hb_prepare_write(&hyperbus->rpc, addr, &data, 2);
+
+ rpcif_manual_xfer(hyperbus->rpc.dev);
+}
+
+static void rpcif_hb_copy_from(struct hyperbus_device *hbdev, void *to,
+ unsigned long from, ssize_t len)
+{
+ struct rpcif_hyperbus *hyperbus =
+ container_of(hbdev, struct rpcif_hyperbus, hbdev);
+
+ rpcif_hb_prepare_read(&hyperbus->rpc, to, from, len);
+
+ rpcif_dirmap_read(hyperbus->rpc.dev, from, len, to);
+}
+
+static const struct hyperbus_ops rpcif_hb_ops = {
+ .read16 = rpcif_hb_read16,
+ .write16 = rpcif_hb_write16,
+ .copy_from = rpcif_hb_copy_from,
+};
+
+static int rpcif_hb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rpcif_hyperbus *hyperbus;
+ int error;
+
+ hyperbus = devm_kzalloc(dev, sizeof(*hyperbus), GFP_KERNEL);
+ if (!hyperbus)
+ return -ENOMEM;
+
+ error = rpcif_sw_init(&hyperbus->rpc, pdev->dev.parent);
+ if (error)
+ return error;
+
+ platform_set_drvdata(pdev, hyperbus);
+
+ pm_runtime_enable(hyperbus->rpc.dev);
+
+ error = rpcif_hw_init(hyperbus->rpc.dev, true);
+ if (error)
+ goto out_disable_rpm;
+
+ hyperbus->hbdev.map.size = hyperbus->rpc.size;
+ hyperbus->hbdev.map.virt = hyperbus->rpc.dirmap;
+
+ hyperbus->ctlr.dev = dev;
+ hyperbus->ctlr.ops = &rpcif_hb_ops;
+ hyperbus->hbdev.ctlr = &hyperbus->ctlr;
+ hyperbus->hbdev.np = of_get_next_child(pdev->dev.parent->of_node, NULL);
+ error = hyperbus_register_device(&hyperbus->hbdev);
+ if (error)
+ goto out_disable_rpm;
+
+ return 0;
+
+out_disable_rpm:
+ pm_runtime_disable(hyperbus->rpc.dev);
+ return error;
+}
+
+static int rpcif_hb_remove(struct platform_device *pdev)
+{
+ struct rpcif_hyperbus *hyperbus = platform_get_drvdata(pdev);
+
+ hyperbus_unregister_device(&hyperbus->hbdev);
+
+ pm_runtime_disable(hyperbus->rpc.dev);
+
+ return 0;
+}
+
+static struct platform_driver rpcif_platform_driver = {
+ .probe = rpcif_hb_probe,
+ .remove = rpcif_hb_remove,
+ .driver = {
+ .name = "rpc-if-hyperflash",
+ },
+};
+
+module_platform_driver(rpcif_platform_driver);
+
+MODULE_DESCRIPTION("Renesas RPC-IF HyperFlash driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
new file mode 100644
index 0000000000..9739387cff
--- /dev/null
+++ b/drivers/mtd/inftlcore.c
@@ -0,0 +1,944 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * inftlcore.c -- Linux driver for Inverse Flash Translation Layer (INFTL)
+ *
+ * Copyright © 2002, Greg Ungerer (gerg@snapgear.com)
+ *
+ * Based heavily on the nftlcore.c code which is:
+ * Copyright © 1999 Machine Vision Holdings, Inc.
+ * Copyright © 1999 David Woodhouse <dwmw2@infradead.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/hdreg.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nftl.h>
+#include <linux/mtd/inftl.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+#include <asm/io.h>
+
+/*
+ * Maximum number of loops while examining next block, to have a
+ * chance to detect consistency problems (they should never happen
+ * because of the checks done in the mounting.
+ */
+#define MAX_LOOPS 10000
+
+static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+{
+ struct INFTLrecord *inftl;
+ unsigned long temp;
+
+ if (!mtd_type_is_nand(mtd) || mtd->size > UINT_MAX)
+ return;
+ /* OK, this is moderately ugly. But probably safe. Alternatives? */
+ if (memcmp(mtd->name, "DiskOnChip", 10))
+ return;
+
+ if (!mtd->_block_isbad) {
+ printk(KERN_ERR
+"INFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
+"Please use the new diskonchip driver under the NAND subsystem.\n");
+ return;
+ }
+
+ pr_debug("INFTL: add_mtd for %s\n", mtd->name);
+
+ inftl = kzalloc(sizeof(*inftl), GFP_KERNEL);
+
+ if (!inftl)
+ return;
+
+ inftl->mbd.mtd = mtd;
+ inftl->mbd.devnum = -1;
+
+ inftl->mbd.tr = tr;
+
+ if (INFTL_mount(inftl) < 0) {
+ printk(KERN_WARNING "INFTL: could not mount device\n");
+ kfree(inftl);
+ return;
+ }
+
+ /* OK, it's a new one. Set up all the data structures. */
+
+ /* Calculate geometry */
+ inftl->cylinders = 1024;
+ inftl->heads = 16;
+
+ temp = inftl->cylinders * inftl->heads;
+ inftl->sectors = inftl->mbd.size / temp;
+ if (inftl->mbd.size % temp) {
+ inftl->sectors++;
+ temp = inftl->cylinders * inftl->sectors;
+ inftl->heads = inftl->mbd.size / temp;
+
+ if (inftl->mbd.size % temp) {
+ inftl->heads++;
+ temp = inftl->heads * inftl->sectors;
+ inftl->cylinders = inftl->mbd.size / temp;
+ }
+ }
+
+ if (inftl->mbd.size != inftl->heads * inftl->cylinders * inftl->sectors) {
+ /*
+ Oh no we don't have
+ mbd.size == heads * cylinders * sectors
+ */
+ printk(KERN_WARNING "INFTL: cannot calculate a geometry to "
+ "match size of 0x%lx.\n", inftl->mbd.size);
+ printk(KERN_WARNING "INFTL: using C:%d H:%d S:%d "
+ "(== 0x%lx sects)\n",
+ inftl->cylinders, inftl->heads , inftl->sectors,
+ (long)inftl->cylinders * (long)inftl->heads *
+ (long)inftl->sectors );
+ }
+
+ if (add_mtd_blktrans_dev(&inftl->mbd)) {
+ kfree(inftl->PUtable);
+ kfree(inftl->VUtable);
+ kfree(inftl);
+ return;
+ }
+#ifdef PSYCHO_DEBUG
+ printk(KERN_INFO "INFTL: Found new inftl%c\n", inftl->mbd.devnum + 'a');
+#endif
+ return;
+}
+
+static void inftl_remove_dev(struct mtd_blktrans_dev *dev)
+{
+ struct INFTLrecord *inftl = (void *)dev;
+
+ pr_debug("INFTL: remove_dev (i=%d)\n", dev->devnum);
+
+ del_mtd_blktrans_dev(dev);
+
+ kfree(inftl->PUtable);
+ kfree(inftl->VUtable);
+}
+
+/*
+ * Actual INFTL access routines.
+ */
+
+/*
+ * Read oob data from flash
+ */
+int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
+ size_t *retlen, uint8_t *buf)
+{
+ struct mtd_oob_ops ops = { };
+ int res;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooboffs = offs & (mtd->writesize - 1);
+ ops.ooblen = len;
+ ops.oobbuf = buf;
+ ops.datbuf = NULL;
+
+ res = mtd_read_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+ *retlen = ops.oobretlen;
+ return res;
+}
+
+/*
+ * Write oob data to flash
+ */
+int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
+ size_t *retlen, uint8_t *buf)
+{
+ struct mtd_oob_ops ops = { };
+ int res;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooboffs = offs & (mtd->writesize - 1);
+ ops.ooblen = len;
+ ops.oobbuf = buf;
+ ops.datbuf = NULL;
+
+ res = mtd_write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+ *retlen = ops.oobretlen;
+ return res;
+}
+
+/*
+ * Write data and oob to flash
+ */
+static int inftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
+ size_t *retlen, uint8_t *buf, uint8_t *oob)
+{
+ struct mtd_oob_ops ops = { };
+ int res;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooboffs = offs;
+ ops.ooblen = mtd->oobsize;
+ ops.oobbuf = oob;
+ ops.datbuf = buf;
+ ops.len = len;
+
+ res = mtd_write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+ *retlen = ops.retlen;
+ return res;
+}
+
+/*
+ * INFTL_findfreeblock: Find a free Erase Unit on the INFTL partition.
+ * This function is used when the give Virtual Unit Chain.
+ */
+static u16 INFTL_findfreeblock(struct INFTLrecord *inftl, int desperate)
+{
+ u16 pot = inftl->LastFreeEUN;
+ int silly = inftl->nb_blocks;
+
+ pr_debug("INFTL: INFTL_findfreeblock(inftl=%p,desperate=%d)\n",
+ inftl, desperate);
+
+ /*
+ * Normally, we force a fold to happen before we run out of free
+ * blocks completely.
+ */
+ if (!desperate && inftl->numfreeEUNs < 2) {
+ pr_debug("INFTL: there are too few free EUNs (%d)\n",
+ inftl->numfreeEUNs);
+ return BLOCK_NIL;
+ }
+
+ /* Scan for a free block */
+ do {
+ if (inftl->PUtable[pot] == BLOCK_FREE) {
+ inftl->LastFreeEUN = pot;
+ return pot;
+ }
+
+ if (++pot > inftl->lastEUN)
+ pot = 0;
+
+ if (!silly--) {
+ printk(KERN_WARNING "INFTL: no free blocks found! "
+ "EUN range = %d - %d\n", 0, inftl->LastFreeEUN);
+ return BLOCK_NIL;
+ }
+ } while (pot != inftl->LastFreeEUN);
+
+ return BLOCK_NIL;
+}
+
+static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned pendingblock)
+{
+ u16 BlockMap[MAX_SECTORS_PER_UNIT];
+ unsigned char BlockDeleted[MAX_SECTORS_PER_UNIT];
+ unsigned int thisEUN, prevEUN, status;
+ struct mtd_info *mtd = inftl->mbd.mtd;
+ int block, silly;
+ unsigned int targetEUN;
+ struct inftl_oob oob;
+ size_t retlen;
+
+ pr_debug("INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,pending=%d)\n",
+ inftl, thisVUC, pendingblock);
+
+ memset(BlockMap, 0xff, sizeof(BlockMap));
+ memset(BlockDeleted, 0, sizeof(BlockDeleted));
+
+ thisEUN = targetEUN = inftl->VUtable[thisVUC];
+
+ if (thisEUN == BLOCK_NIL) {
+ printk(KERN_WARNING "INFTL: trying to fold non-existent "
+ "Virtual Unit Chain %d!\n", thisVUC);
+ return BLOCK_NIL;
+ }
+
+ /*
+ * Scan to find the Erase Unit which holds the actual data for each
+ * 512-byte block within the Chain.
+ */
+ silly = MAX_LOOPS;
+ while (thisEUN < inftl->nb_blocks) {
+ for (block = 0; block < inftl->EraseSize/SECTORSIZE; block ++) {
+ if ((BlockMap[block] != BLOCK_NIL) ||
+ BlockDeleted[block])
+ continue;
+
+ if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize)
+ + (block * SECTORSIZE), 16, &retlen,
+ (char *)&oob) < 0)
+ status = SECTOR_IGNORE;
+ else
+ status = oob.b.Status | oob.b.Status1;
+
+ switch(status) {
+ case SECTOR_FREE:
+ case SECTOR_IGNORE:
+ break;
+ case SECTOR_USED:
+ BlockMap[block] = thisEUN;
+ continue;
+ case SECTOR_DELETED:
+ BlockDeleted[block] = 1;
+ continue;
+ default:
+ printk(KERN_WARNING "INFTL: unknown status "
+ "for block %d in EUN %d: %x\n",
+ block, thisEUN, status);
+ break;
+ }
+ }
+
+ if (!silly--) {
+ printk(KERN_WARNING "INFTL: infinite loop in Virtual "
+ "Unit Chain 0x%x\n", thisVUC);
+ return BLOCK_NIL;
+ }
+
+ thisEUN = inftl->PUtable[thisEUN];
+ }
+
+ /*
+ * OK. We now know the location of every block in the Virtual Unit
+ * Chain, and the Erase Unit into which we are supposed to be copying.
+ * Go for it.
+ */
+ pr_debug("INFTL: folding chain %d into unit %d\n", thisVUC, targetEUN);
+
+ for (block = 0; block < inftl->EraseSize/SECTORSIZE ; block++) {
+ unsigned char movebuf[SECTORSIZE];
+ int ret;
+
+ /*
+ * If it's in the target EUN already, or if it's pending write,
+ * do nothing.
+ */
+ if (BlockMap[block] == targetEUN || (pendingblock ==
+ (thisVUC * (inftl->EraseSize / SECTORSIZE) + block))) {
+ continue;
+ }
+
+ /*
+ * Copy only in non free block (free blocks can only
+ * happen in case of media errors or deleted blocks).
+ */
+ if (BlockMap[block] == BLOCK_NIL)
+ continue;
+
+ ret = mtd_read(mtd,
+ (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE),
+ SECTORSIZE,
+ &retlen,
+ movebuf);
+ if (ret < 0 && !mtd_is_bitflip(ret)) {
+ ret = mtd_read(mtd,
+ (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE),
+ SECTORSIZE,
+ &retlen,
+ movebuf);
+ if (ret != -EIO)
+ pr_debug("INFTL: error went away on retry?\n");
+ }
+ memset(&oob, 0xff, sizeof(struct inftl_oob));
+ oob.b.Status = oob.b.Status1 = SECTOR_USED;
+
+ inftl_write(inftl->mbd.mtd, (inftl->EraseSize * targetEUN) +
+ (block * SECTORSIZE), SECTORSIZE, &retlen,
+ movebuf, (char *)&oob);
+ }
+
+ /*
+ * Newest unit in chain now contains data from _all_ older units.
+ * So go through and erase each unit in chain, oldest first. (This
+ * is important, by doing oldest first if we crash/reboot then it
+ * is relatively simple to clean up the mess).
+ */
+ pr_debug("INFTL: want to erase virtual chain %d\n", thisVUC);
+
+ for (;;) {
+ /* Find oldest unit in chain. */
+ thisEUN = inftl->VUtable[thisVUC];
+ prevEUN = BLOCK_NIL;
+ while (inftl->PUtable[thisEUN] != BLOCK_NIL) {
+ prevEUN = thisEUN;
+ thisEUN = inftl->PUtable[thisEUN];
+ }
+
+ /* Check if we are all done */
+ if (thisEUN == targetEUN)
+ break;
+
+ /* Unlink the last block from the chain. */
+ inftl->PUtable[prevEUN] = BLOCK_NIL;
+
+ /* Now try to erase it. */
+ if (INFTL_formatblock(inftl, thisEUN) < 0) {
+ /*
+ * Could not erase : mark block as reserved.
+ */
+ inftl->PUtable[thisEUN] = BLOCK_RESERVED;
+ } else {
+ /* Correctly erased : mark it as free */
+ inftl->PUtable[thisEUN] = BLOCK_FREE;
+ inftl->numfreeEUNs++;
+ }
+ }
+
+ return targetEUN;
+}
+
+static u16 INFTL_makefreeblock(struct INFTLrecord *inftl, unsigned pendingblock)
+{
+ /*
+ * This is the part that needs some cleverness applied.
+ * For now, I'm doing the minimum applicable to actually
+ * get the thing to work.
+ * Wear-levelling and other clever stuff needs to be implemented
+ * and we also need to do some assessment of the results when
+ * the system loses power half-way through the routine.
+ */
+ u16 LongestChain = 0;
+ u16 ChainLength = 0, thislen;
+ u16 chain, EUN;
+
+ pr_debug("INFTL: INFTL_makefreeblock(inftl=%p,"
+ "pending=%d)\n", inftl, pendingblock);
+
+ for (chain = 0; chain < inftl->nb_blocks; chain++) {
+ EUN = inftl->VUtable[chain];
+ thislen = 0;
+
+ while (EUN <= inftl->lastEUN) {
+ thislen++;
+ EUN = inftl->PUtable[EUN];
+ if (thislen > 0xff00) {
+ printk(KERN_WARNING "INFTL: endless loop in "
+ "Virtual Chain %d: Unit %x\n",
+ chain, EUN);
+ /*
+ * Actually, don't return failure.
+ * Just ignore this chain and get on with it.
+ */
+ thislen = 0;
+ break;
+ }
+ }
+
+ if (thislen > ChainLength) {
+ ChainLength = thislen;
+ LongestChain = chain;
+ }
+ }
+
+ if (ChainLength < 2) {
+ printk(KERN_WARNING "INFTL: no Virtual Unit Chains available "
+ "for folding. Failing request\n");
+ return BLOCK_NIL;
+ }
+
+ return INFTL_foldchain(inftl, LongestChain, pendingblock);
+}
+
+static int nrbits(unsigned int val, int bitcount)
+{
+ int i, total = 0;
+
+ for (i = 0; (i < bitcount); i++)
+ total += (((0x1 << i) & val) ? 1 : 0);
+ return total;
+}
+
+/*
+ * INFTL_findwriteunit: Return the unit number into which we can write
+ * for this block. Make it available if it isn't already.
+ */
+static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
+{
+ unsigned int thisVUC = block / (inftl->EraseSize / SECTORSIZE);
+ unsigned int thisEUN, writeEUN, prev_block, status;
+ unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize -1);
+ struct mtd_info *mtd = inftl->mbd.mtd;
+ struct inftl_oob oob;
+ struct inftl_bci bci;
+ unsigned char anac, nacs, parity;
+ size_t retlen;
+ int silly, silly2 = 3;
+
+ pr_debug("INFTL: INFTL_findwriteunit(inftl=%p,block=%d)\n",
+ inftl, block);
+
+ do {
+ /*
+ * Scan the media to find a unit in the VUC which has
+ * a free space for the block in question.
+ */
+ writeEUN = BLOCK_NIL;
+ thisEUN = inftl->VUtable[thisVUC];
+ silly = MAX_LOOPS;
+
+ while (thisEUN <= inftl->lastEUN) {
+ inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) +
+ blockofs, 8, &retlen, (char *)&bci);
+
+ status = bci.Status | bci.Status1;
+ pr_debug("INFTL: status of block %d in EUN %d is %x\n",
+ block , writeEUN, status);
+
+ switch(status) {
+ case SECTOR_FREE:
+ writeEUN = thisEUN;
+ break;
+ case SECTOR_DELETED:
+ case SECTOR_USED:
+ /* Can't go any further */
+ goto hitused;
+ case SECTOR_IGNORE:
+ break;
+ default:
+ /*
+ * Invalid block. Don't use it any more.
+ * Must implement.
+ */
+ break;
+ }
+
+ if (!silly--) {
+ printk(KERN_WARNING "INFTL: infinite loop in "
+ "Virtual Unit Chain 0x%x\n", thisVUC);
+ return BLOCK_NIL;
+ }
+
+ /* Skip to next block in chain */
+ thisEUN = inftl->PUtable[thisEUN];
+ }
+
+hitused:
+ if (writeEUN != BLOCK_NIL)
+ return writeEUN;
+
+
+ /*
+ * OK. We didn't find one in the existing chain, or there
+ * is no existing chain. Allocate a new one.
+ */
+ writeEUN = INFTL_findfreeblock(inftl, 0);
+
+ if (writeEUN == BLOCK_NIL) {
+ /*
+ * That didn't work - there were no free blocks just
+ * waiting to be picked up. We're going to have to fold
+ * a chain to make room.
+ */
+ thisEUN = INFTL_makefreeblock(inftl, block);
+
+ /*
+ * Hopefully we free something, lets try again.
+ * This time we are desperate...
+ */
+ pr_debug("INFTL: using desperate==1 to find free EUN "
+ "to accommodate write to VUC %d\n",
+ thisVUC);
+ writeEUN = INFTL_findfreeblock(inftl, 1);
+ if (writeEUN == BLOCK_NIL) {
+ /*
+ * Ouch. This should never happen - we should
+ * always be able to make some room somehow.
+ * If we get here, we've allocated more storage
+ * space than actual media, or our makefreeblock
+ * routine is missing something.
+ */
+ printk(KERN_WARNING "INFTL: cannot make free "
+ "space.\n");
+#ifdef DEBUG
+ INFTL_dumptables(inftl);
+ INFTL_dumpVUchains(inftl);
+#endif
+ return BLOCK_NIL;
+ }
+ }
+
+ /*
+ * Insert new block into virtual chain. Firstly update the
+ * block headers in flash...
+ */
+ anac = 0;
+ nacs = 0;
+ thisEUN = inftl->VUtable[thisVUC];
+ if (thisEUN != BLOCK_NIL) {
+ inftl_read_oob(mtd, thisEUN * inftl->EraseSize
+ + 8, 8, &retlen, (char *)&oob.u);
+ anac = oob.u.a.ANAC + 1;
+ nacs = oob.u.a.NACs + 1;
+ }
+
+ prev_block = inftl->VUtable[thisVUC];
+ if (prev_block < inftl->nb_blocks)
+ prev_block -= inftl->firstEUN;
+
+ parity = (nrbits(thisVUC, 16) & 0x1) ? 0x1 : 0;
+ parity |= (nrbits(prev_block, 16) & 0x1) ? 0x2 : 0;
+ parity |= (nrbits(anac, 8) & 0x1) ? 0x4 : 0;
+ parity |= (nrbits(nacs, 8) & 0x1) ? 0x8 : 0;
+
+ oob.u.a.virtualUnitNo = cpu_to_le16(thisVUC);
+ oob.u.a.prevUnitNo = cpu_to_le16(prev_block);
+ oob.u.a.ANAC = anac;
+ oob.u.a.NACs = nacs;
+ oob.u.a.parityPerField = parity;
+ oob.u.a.discarded = 0xaa;
+
+ inftl_write_oob(mtd, writeEUN * inftl->EraseSize + 8, 8,
+ &retlen, (char *)&oob.u);
+
+ /* Also back up header... */
+ oob.u.b.virtualUnitNo = cpu_to_le16(thisVUC);
+ oob.u.b.prevUnitNo = cpu_to_le16(prev_block);
+ oob.u.b.ANAC = anac;
+ oob.u.b.NACs = nacs;
+ oob.u.b.parityPerField = parity;
+ oob.u.b.discarded = 0xaa;
+
+ inftl_write_oob(mtd, writeEUN * inftl->EraseSize +
+ SECTORSIZE * 4 + 8, 8, &retlen, (char *)&oob.u);
+
+ inftl->PUtable[writeEUN] = inftl->VUtable[thisVUC];
+ inftl->VUtable[thisVUC] = writeEUN;
+
+ inftl->numfreeEUNs--;
+ return writeEUN;
+
+ } while (silly2--);
+
+ printk(KERN_WARNING "INFTL: error folding to make room for Virtual "
+ "Unit Chain 0x%x\n", thisVUC);
+ return BLOCK_NIL;
+}
+
+/*
+ * Given a Virtual Unit Chain, see if it can be deleted, and if so do it.
+ */
+static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC)
+{
+ struct mtd_info *mtd = inftl->mbd.mtd;
+ unsigned char BlockUsed[MAX_SECTORS_PER_UNIT];
+ unsigned char BlockDeleted[MAX_SECTORS_PER_UNIT];
+ unsigned int thisEUN, status;
+ int block, silly;
+ struct inftl_bci bci;
+ size_t retlen;
+
+ pr_debug("INFTL: INFTL_trydeletechain(inftl=%p,"
+ "thisVUC=%d)\n", inftl, thisVUC);
+
+ memset(BlockUsed, 0, sizeof(BlockUsed));
+ memset(BlockDeleted, 0, sizeof(BlockDeleted));
+
+ thisEUN = inftl->VUtable[thisVUC];
+ if (thisEUN == BLOCK_NIL) {
+ printk(KERN_WARNING "INFTL: trying to delete non-existent "
+ "Virtual Unit Chain %d!\n", thisVUC);
+ return;
+ }
+
+ /*
+ * Scan through the Erase Units to determine whether any data is in
+ * each of the 512-byte blocks within the Chain.
+ */
+ silly = MAX_LOOPS;
+ while (thisEUN < inftl->nb_blocks) {
+ for (block = 0; block < inftl->EraseSize/SECTORSIZE; block++) {
+ if (BlockUsed[block] || BlockDeleted[block])
+ continue;
+
+ if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize)
+ + (block * SECTORSIZE), 8 , &retlen,
+ (char *)&bci) < 0)
+ status = SECTOR_IGNORE;
+ else
+ status = bci.Status | bci.Status1;
+
+ switch(status) {
+ case SECTOR_FREE:
+ case SECTOR_IGNORE:
+ break;
+ case SECTOR_USED:
+ BlockUsed[block] = 1;
+ continue;
+ case SECTOR_DELETED:
+ BlockDeleted[block] = 1;
+ continue;
+ default:
+ printk(KERN_WARNING "INFTL: unknown status "
+ "for block %d in EUN %d: 0x%x\n",
+ block, thisEUN, status);
+ }
+ }
+
+ if (!silly--) {
+ printk(KERN_WARNING "INFTL: infinite loop in Virtual "
+ "Unit Chain 0x%x\n", thisVUC);
+ return;
+ }
+
+ thisEUN = inftl->PUtable[thisEUN];
+ }
+
+ for (block = 0; block < inftl->EraseSize/SECTORSIZE; block++)
+ if (BlockUsed[block])
+ return;
+
+ /*
+ * For each block in the chain free it and make it available
+ * for future use. Erase from the oldest unit first.
+ */
+ pr_debug("INFTL: deleting empty VUC %d\n", thisVUC);
+
+ for (;;) {
+ u16 *prevEUN = &inftl->VUtable[thisVUC];
+ thisEUN = *prevEUN;
+
+ /* If the chain is all gone already, we're done */
+ if (thisEUN == BLOCK_NIL) {
+ pr_debug("INFTL: Empty VUC %d for deletion was already absent\n", thisEUN);
+ return;
+ }
+
+ /* Find oldest unit in chain. */
+ while (inftl->PUtable[thisEUN] != BLOCK_NIL) {
+ BUG_ON(thisEUN >= inftl->nb_blocks);
+
+ prevEUN = &inftl->PUtable[thisEUN];
+ thisEUN = *prevEUN;
+ }
+
+ pr_debug("Deleting EUN %d from VUC %d\n",
+ thisEUN, thisVUC);
+
+ if (INFTL_formatblock(inftl, thisEUN) < 0) {
+ /*
+ * Could not erase : mark block as reserved.
+ */
+ inftl->PUtable[thisEUN] = BLOCK_RESERVED;
+ } else {
+ /* Correctly erased : mark it as free */
+ inftl->PUtable[thisEUN] = BLOCK_FREE;
+ inftl->numfreeEUNs++;
+ }
+
+ /* Now sort out whatever was pointing to it... */
+ *prevEUN = BLOCK_NIL;
+
+ /* Ideally we'd actually be responsive to new
+ requests while we're doing this -- if there's
+ free space why should others be made to wait? */
+ cond_resched();
+ }
+
+ inftl->VUtable[thisVUC] = BLOCK_NIL;
+}
+
+static int INFTL_deleteblock(struct INFTLrecord *inftl, unsigned block)
+{
+ unsigned int thisEUN = inftl->VUtable[block / (inftl->EraseSize / SECTORSIZE)];
+ unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1);
+ struct mtd_info *mtd = inftl->mbd.mtd;
+ unsigned int status;
+ int silly = MAX_LOOPS;
+ size_t retlen;
+ struct inftl_bci bci;
+
+ pr_debug("INFTL: INFTL_deleteblock(inftl=%p,"
+ "block=%d)\n", inftl, block);
+
+ while (thisEUN < inftl->nb_blocks) {
+ if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) +
+ blockofs, 8, &retlen, (char *)&bci) < 0)
+ status = SECTOR_IGNORE;
+ else
+ status = bci.Status | bci.Status1;
+
+ switch (status) {
+ case SECTOR_FREE:
+ case SECTOR_IGNORE:
+ break;
+ case SECTOR_DELETED:
+ thisEUN = BLOCK_NIL;
+ goto foundit;
+ case SECTOR_USED:
+ goto foundit;
+ default:
+ printk(KERN_WARNING "INFTL: unknown status for "
+ "block %d in EUN %d: 0x%x\n",
+ block, thisEUN, status);
+ break;
+ }
+
+ if (!silly--) {
+ printk(KERN_WARNING "INFTL: infinite loop in Virtual "
+ "Unit Chain 0x%x\n",
+ block / (inftl->EraseSize / SECTORSIZE));
+ return 1;
+ }
+ thisEUN = inftl->PUtable[thisEUN];
+ }
+
+foundit:
+ if (thisEUN != BLOCK_NIL) {
+ loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs;
+
+ if (inftl_read_oob(mtd, ptr, 8, &retlen, (char *)&bci) < 0)
+ return -EIO;
+ bci.Status = bci.Status1 = SECTOR_DELETED;
+ if (inftl_write_oob(mtd, ptr, 8, &retlen, (char *)&bci) < 0)
+ return -EIO;
+ INFTL_trydeletechain(inftl, block / (inftl->EraseSize / SECTORSIZE));
+ }
+ return 0;
+}
+
+static int inftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block,
+ char *buffer)
+{
+ struct INFTLrecord *inftl = (void *)mbd;
+ unsigned int writeEUN;
+ unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1);
+ size_t retlen;
+ struct inftl_oob oob;
+ char *p, *pend;
+
+ pr_debug("INFTL: inftl_writeblock(inftl=%p,block=%ld,"
+ "buffer=%p)\n", inftl, block, buffer);
+
+ /* Is block all zero? */
+ pend = buffer + SECTORSIZE;
+ for (p = buffer; p < pend && !*p; p++)
+ ;
+
+ if (p < pend) {
+ writeEUN = INFTL_findwriteunit(inftl, block);
+
+ if (writeEUN == BLOCK_NIL) {
+ printk(KERN_WARNING "inftl_writeblock(): cannot find "
+ "block to write to\n");
+ /*
+ * If we _still_ haven't got a block to use,
+ * we're screwed.
+ */
+ return 1;
+ }
+
+ memset(&oob, 0xff, sizeof(struct inftl_oob));
+ oob.b.Status = oob.b.Status1 = SECTOR_USED;
+
+ inftl_write(inftl->mbd.mtd, (writeEUN * inftl->EraseSize) +
+ blockofs, SECTORSIZE, &retlen, (char *)buffer,
+ (char *)&oob);
+ /*
+ * need to write SECTOR_USED flags since they are not written
+ * in mtd_writeecc
+ */
+ } else {
+ INFTL_deleteblock(inftl, block);
+ }
+
+ return 0;
+}
+
+static int inftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
+ char *buffer)
+{
+ struct INFTLrecord *inftl = (void *)mbd;
+ unsigned int thisEUN = inftl->VUtable[block / (inftl->EraseSize / SECTORSIZE)];
+ unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1);
+ struct mtd_info *mtd = inftl->mbd.mtd;
+ unsigned int status;
+ int silly = MAX_LOOPS;
+ struct inftl_bci bci;
+ size_t retlen;
+
+ pr_debug("INFTL: inftl_readblock(inftl=%p,block=%ld,"
+ "buffer=%p)\n", inftl, block, buffer);
+
+ while (thisEUN < inftl->nb_blocks) {
+ if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) +
+ blockofs, 8, &retlen, (char *)&bci) < 0)
+ status = SECTOR_IGNORE;
+ else
+ status = bci.Status | bci.Status1;
+
+ switch (status) {
+ case SECTOR_DELETED:
+ thisEUN = BLOCK_NIL;
+ goto foundit;
+ case SECTOR_USED:
+ goto foundit;
+ case SECTOR_FREE:
+ case SECTOR_IGNORE:
+ break;
+ default:
+ printk(KERN_WARNING "INFTL: unknown status for "
+ "block %ld in EUN %d: 0x%04x\n",
+ block, thisEUN, status);
+ break;
+ }
+
+ if (!silly--) {
+ printk(KERN_WARNING "INFTL: infinite loop in "
+ "Virtual Unit Chain 0x%lx\n",
+ block / (inftl->EraseSize / SECTORSIZE));
+ return 1;
+ }
+
+ thisEUN = inftl->PUtable[thisEUN];
+ }
+
+foundit:
+ if (thisEUN == BLOCK_NIL) {
+ /* The requested block is not on the media, return all 0x00 */
+ memset(buffer, 0, SECTORSIZE);
+ } else {
+ size_t retlen;
+ loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs;
+ int ret = mtd_read(mtd, ptr, SECTORSIZE, &retlen, buffer);
+
+ /* Handle corrected bit flips gracefully */
+ if (ret < 0 && !mtd_is_bitflip(ret))
+ return -EIO;
+ }
+ return 0;
+}
+
+static int inftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
+{
+ struct INFTLrecord *inftl = (void *)dev;
+
+ geo->heads = inftl->heads;
+ geo->sectors = inftl->sectors;
+ geo->cylinders = inftl->cylinders;
+
+ return 0;
+}
+
+static struct mtd_blktrans_ops inftl_tr = {
+ .name = "inftl",
+ .major = INFTL_MAJOR,
+ .part_bits = INFTL_PARTN_BITS,
+ .blksize = 512,
+ .getgeo = inftl_getgeo,
+ .readsect = inftl_readblock,
+ .writesect = inftl_writeblock,
+ .add_mtd = inftl_add_mtd,
+ .remove_dev = inftl_remove_dev,
+ .owner = THIS_MODULE,
+};
+
+module_mtd_blktrans(inftl_tr);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>, David Woodhouse <dwmw2@infradead.org>, Fabrice Bellard <fabrice.bellard@netgem.com> et al.");
+MODULE_DESCRIPTION("Support code for Inverse Flash Translation Layer, used on M-Systems DiskOnChip 2000, Millennium and Millennium Plus");
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
new file mode 100644
index 0000000000..6276daa296
--- /dev/null
+++ b/drivers/mtd/inftlmount.c
@@ -0,0 +1,776 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * inftlmount.c -- INFTL mount code with extensive checks.
+ *
+ * Author: Greg Ungerer (gerg@snapgear.com)
+ * Copyright © 2002-2003, Greg Ungerer (gerg@snapgear.com)
+ *
+ * Based heavily on the nftlmount.c code which is:
+ * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
+ * Copyright © 2000 Netgem S.A.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/errno.h>
+#include <asm/io.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nftl.h>
+#include <linux/mtd/inftl.h>
+
+/*
+ * find_boot_record: Find the INFTL Media Header and its Spare copy which
+ * contains the various device information of the INFTL partition and
+ * Bad Unit Table. Update the PUtable[] table according to the Bad
+ * Unit Table. PUtable[] is used for management of Erase Unit in
+ * other routines in inftlcore.c and inftlmount.c.
+ */
+static int find_boot_record(struct INFTLrecord *inftl)
+{
+ struct inftl_unittail h1;
+ //struct inftl_oob oob;
+ unsigned int i, block;
+ u8 buf[SECTORSIZE];
+ struct INFTLMediaHeader *mh = &inftl->MediaHdr;
+ struct mtd_info *mtd = inftl->mbd.mtd;
+ struct INFTLPartition *ip;
+ size_t retlen;
+
+ pr_debug("INFTL: find_boot_record(inftl=%p)\n", inftl);
+
+ /*
+ * Assume logical EraseSize == physical erasesize for starting the
+ * scan. We'll sort it out later if we find a MediaHeader which says
+ * otherwise.
+ */
+ inftl->EraseSize = inftl->mbd.mtd->erasesize;
+ inftl->nb_blocks = (u32)inftl->mbd.mtd->size / inftl->EraseSize;
+
+ inftl->MediaUnit = BLOCK_NIL;
+
+ /* Search for a valid boot record */
+ for (block = 0; block < inftl->nb_blocks; block++) {
+ int ret;
+
+ /*
+ * Check for BNAND header first. Then whinge if it's found
+ * but later checks fail.
+ */
+ ret = mtd_read(mtd, block * inftl->EraseSize, SECTORSIZE,
+ &retlen, buf);
+ /* We ignore ret in case the ECC of the MediaHeader is invalid
+ (which is apparently acceptable) */
+ if (retlen != SECTORSIZE) {
+ static int warncount = 5;
+
+ if (warncount) {
+ printk(KERN_WARNING "INFTL: block read at 0x%x "
+ "of mtd%d failed: %d\n",
+ block * inftl->EraseSize,
+ inftl->mbd.mtd->index, ret);
+ if (!--warncount)
+ printk(KERN_WARNING "INFTL: further "
+ "failures for this block will "
+ "not be printed\n");
+ }
+ continue;
+ }
+
+ if (retlen < 6 || memcmp(buf, "BNAND", 6)) {
+ /* BNAND\0 not found. Continue */
+ continue;
+ }
+
+ /* To be safer with BIOS, also use erase mark as discriminant */
+ ret = inftl_read_oob(mtd,
+ block * inftl->EraseSize + SECTORSIZE + 8,
+ 8, &retlen,(char *)&h1);
+ if (ret < 0) {
+ printk(KERN_WARNING "INFTL: ANAND header found at "
+ "0x%x in mtd%d, but OOB data read failed "
+ "(err %d)\n", block * inftl->EraseSize,
+ inftl->mbd.mtd->index, ret);
+ continue;
+ }
+
+
+ /*
+ * This is the first we've seen.
+ * Copy the media header structure into place.
+ */
+ memcpy(mh, buf, sizeof(struct INFTLMediaHeader));
+
+ /* Read the spare media header at offset 4096 */
+ mtd_read(mtd, block * inftl->EraseSize + 4096, SECTORSIZE,
+ &retlen, buf);
+ if (retlen != SECTORSIZE) {
+ printk(KERN_WARNING "INFTL: Unable to read spare "
+ "Media Header\n");
+ return -1;
+ }
+ /* Check if this one is the same as the first one we found. */
+ if (memcmp(mh, buf, sizeof(struct INFTLMediaHeader))) {
+ printk(KERN_WARNING "INFTL: Primary and spare Media "
+ "Headers disagree.\n");
+ return -1;
+ }
+
+ mh->NoOfBootImageBlocks = le32_to_cpu(mh->NoOfBootImageBlocks);
+ mh->NoOfBinaryPartitions = le32_to_cpu(mh->NoOfBinaryPartitions);
+ mh->NoOfBDTLPartitions = le32_to_cpu(mh->NoOfBDTLPartitions);
+ mh->BlockMultiplierBits = le32_to_cpu(mh->BlockMultiplierBits);
+ mh->FormatFlags = le32_to_cpu(mh->FormatFlags);
+ mh->PercentUsed = le32_to_cpu(mh->PercentUsed);
+
+ pr_debug("INFTL: Media Header ->\n"
+ " bootRecordID = %s\n"
+ " NoOfBootImageBlocks = %d\n"
+ " NoOfBinaryPartitions = %d\n"
+ " NoOfBDTLPartitions = %d\n"
+ " BlockMultiplierBits = %d\n"
+ " FormatFlgs = %d\n"
+ " OsakVersion = 0x%x\n"
+ " PercentUsed = %d\n",
+ mh->bootRecordID, mh->NoOfBootImageBlocks,
+ mh->NoOfBinaryPartitions,
+ mh->NoOfBDTLPartitions,
+ mh->BlockMultiplierBits, mh->FormatFlags,
+ mh->OsakVersion, mh->PercentUsed);
+
+ if (mh->NoOfBDTLPartitions == 0) {
+ printk(KERN_WARNING "INFTL: Media Header sanity check "
+ "failed: NoOfBDTLPartitions (%d) == 0, "
+ "must be at least 1\n", mh->NoOfBDTLPartitions);
+ return -1;
+ }
+
+ if ((mh->NoOfBDTLPartitions + mh->NoOfBinaryPartitions) > 4) {
+ printk(KERN_WARNING "INFTL: Media Header sanity check "
+ "failed: Total Partitions (%d) > 4, "
+ "BDTL=%d Binary=%d\n", mh->NoOfBDTLPartitions +
+ mh->NoOfBinaryPartitions,
+ mh->NoOfBDTLPartitions,
+ mh->NoOfBinaryPartitions);
+ return -1;
+ }
+
+ if (mh->BlockMultiplierBits > 1) {
+ printk(KERN_WARNING "INFTL: sorry, we don't support "
+ "UnitSizeFactor 0x%02x\n",
+ mh->BlockMultiplierBits);
+ return -1;
+ } else if (mh->BlockMultiplierBits == 1) {
+ printk(KERN_WARNING "INFTL: support for INFTL with "
+ "UnitSizeFactor 0x%02x is experimental\n",
+ mh->BlockMultiplierBits);
+ inftl->EraseSize = inftl->mbd.mtd->erasesize <<
+ mh->BlockMultiplierBits;
+ inftl->nb_blocks = (u32)inftl->mbd.mtd->size / inftl->EraseSize;
+ block >>= mh->BlockMultiplierBits;
+ }
+
+ /* Scan the partitions */
+ for (i = 0; (i < 4); i++) {
+ ip = &mh->Partitions[i];
+ ip->virtualUnits = le32_to_cpu(ip->virtualUnits);
+ ip->firstUnit = le32_to_cpu(ip->firstUnit);
+ ip->lastUnit = le32_to_cpu(ip->lastUnit);
+ ip->flags = le32_to_cpu(ip->flags);
+ ip->spareUnits = le32_to_cpu(ip->spareUnits);
+ ip->Reserved0 = le32_to_cpu(ip->Reserved0);
+
+ pr_debug(" PARTITION[%d] ->\n"
+ " virtualUnits = %d\n"
+ " firstUnit = %d\n"
+ " lastUnit = %d\n"
+ " flags = 0x%x\n"
+ " spareUnits = %d\n",
+ i, ip->virtualUnits, ip->firstUnit,
+ ip->lastUnit, ip->flags,
+ ip->spareUnits);
+
+ if (ip->Reserved0 != ip->firstUnit) {
+ struct erase_info *instr = &inftl->instr;
+
+ /*
+ * Most likely this is using the
+ * undocumented qiuck mount feature.
+ * We don't support that, we will need
+ * to erase the hidden block for full
+ * compatibility.
+ */
+ instr->addr = ip->Reserved0 * inftl->EraseSize;
+ instr->len = inftl->EraseSize;
+ mtd_erase(mtd, instr);
+ }
+ if ((ip->lastUnit - ip->firstUnit + 1) < ip->virtualUnits) {
+ printk(KERN_WARNING "INFTL: Media Header "
+ "Partition %d sanity check failed\n"
+ " firstUnit %d : lastUnit %d > "
+ "virtualUnits %d\n", i, ip->lastUnit,
+ ip->firstUnit, ip->Reserved0);
+ return -1;
+ }
+ if (ip->Reserved1 != 0) {
+ printk(KERN_WARNING "INFTL: Media Header "
+ "Partition %d sanity check failed: "
+ "Reserved1 %d != 0\n",
+ i, ip->Reserved1);
+ return -1;
+ }
+
+ if (ip->flags & INFTL_BDTL)
+ break;
+ }
+
+ if (i >= 4) {
+ printk(KERN_WARNING "INFTL: Media Header Partition "
+ "sanity check failed:\n No partition "
+ "marked as Disk Partition\n");
+ return -1;
+ }
+
+ inftl->nb_boot_blocks = ip->firstUnit;
+ inftl->numvunits = ip->virtualUnits;
+ if (inftl->numvunits > (inftl->nb_blocks -
+ inftl->nb_boot_blocks - 2)) {
+ printk(KERN_WARNING "INFTL: Media Header sanity check "
+ "failed:\n numvunits (%d) > nb_blocks "
+ "(%d) - nb_boot_blocks(%d) - 2\n",
+ inftl->numvunits, inftl->nb_blocks,
+ inftl->nb_boot_blocks);
+ return -1;
+ }
+
+ inftl->mbd.size = inftl->numvunits *
+ (inftl->EraseSize / SECTORSIZE);
+
+ /*
+ * Block count is set to last used EUN (we won't need to keep
+ * any meta-data past that point).
+ */
+ inftl->firstEUN = ip->firstUnit;
+ inftl->lastEUN = ip->lastUnit;
+ inftl->nb_blocks = ip->lastUnit + 1;
+
+ /* Memory alloc */
+ inftl->PUtable = kmalloc_array(inftl->nb_blocks, sizeof(u16),
+ GFP_KERNEL);
+ if (!inftl->PUtable)
+ return -ENOMEM;
+
+ inftl->VUtable = kmalloc_array(inftl->nb_blocks, sizeof(u16),
+ GFP_KERNEL);
+ if (!inftl->VUtable) {
+ kfree(inftl->PUtable);
+ return -ENOMEM;
+ }
+
+ /* Mark the blocks before INFTL MediaHeader as reserved */
+ for (i = 0; i < inftl->nb_boot_blocks; i++)
+ inftl->PUtable[i] = BLOCK_RESERVED;
+ /* Mark all remaining blocks as potentially containing data */
+ for (; i < inftl->nb_blocks; i++)
+ inftl->PUtable[i] = BLOCK_NOTEXPLORED;
+
+ /* Mark this boot record (NFTL MediaHeader) block as reserved */
+ inftl->PUtable[block] = BLOCK_RESERVED;
+
+ /* Read Bad Erase Unit Table and modify PUtable[] accordingly */
+ for (i = 0; i < inftl->nb_blocks; i++) {
+ int physblock;
+ /* If any of the physical eraseblocks are bad, don't
+ use the unit. */
+ for (physblock = 0; physblock < inftl->EraseSize; physblock += inftl->mbd.mtd->erasesize) {
+ if (mtd_block_isbad(inftl->mbd.mtd,
+ i * inftl->EraseSize + physblock))
+ inftl->PUtable[i] = BLOCK_RESERVED;
+ }
+ }
+
+ inftl->MediaUnit = block;
+ return 0;
+ }
+
+ /* Not found. */
+ return -1;
+}
+
+static int memcmpb(void *a, int c, int n)
+{
+ int i;
+ for (i = 0; i < n; i++) {
+ if (c != ((unsigned char *)a)[i])
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * check_free_sector: check if a free sector is actually FREE,
+ * i.e. All 0xff in data and oob area.
+ */
+static int check_free_sectors(struct INFTLrecord *inftl, unsigned int address,
+ int len, int check_oob)
+{
+ struct mtd_info *mtd = inftl->mbd.mtd;
+ size_t retlen;
+ int i, ret;
+ u8 *buf;
+
+ buf = kmalloc(SECTORSIZE + mtd->oobsize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = -1;
+ for (i = 0; i < len; i += SECTORSIZE) {
+ if (mtd_read(mtd, address, SECTORSIZE, &retlen, buf))
+ goto out;
+ if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
+ goto out;
+
+ if (check_oob) {
+ if(inftl_read_oob(mtd, address, mtd->oobsize,
+ &retlen, &buf[SECTORSIZE]) < 0)
+ goto out;
+ if (memcmpb(buf + SECTORSIZE, 0xff, mtd->oobsize) != 0)
+ goto out;
+ }
+ address += SECTORSIZE;
+ }
+
+ ret = 0;
+
+out:
+ kfree(buf);
+ return ret;
+}
+
+/*
+ * INFTL_format: format a Erase Unit by erasing ALL Erase Zones in the Erase
+ * Unit and Update INFTL metadata. Each erase operation is
+ * checked with check_free_sectors.
+ *
+ * Return: 0 when succeed, -1 on error.
+ *
+ * ToDo: 1. Is it necessary to check_free_sector after erasing ??
+ */
+int INFTL_formatblock(struct INFTLrecord *inftl, int block)
+{
+ size_t retlen;
+ struct inftl_unittail uci;
+ struct erase_info *instr = &inftl->instr;
+ struct mtd_info *mtd = inftl->mbd.mtd;
+ int physblock;
+
+ pr_debug("INFTL: INFTL_formatblock(inftl=%p,block=%d)\n", inftl, block);
+
+ memset(instr, 0, sizeof(struct erase_info));
+
+ /* FIXME: Shouldn't we be setting the 'discarded' flag to zero
+ _first_? */
+
+ /* Use async erase interface, test return code */
+ instr->addr = block * inftl->EraseSize;
+ instr->len = inftl->mbd.mtd->erasesize;
+ /* Erase one physical eraseblock at a time, even though the NAND api
+ allows us to group them. This way we if we have a failure, we can
+ mark only the failed block in the bbt. */
+ for (physblock = 0; physblock < inftl->EraseSize;
+ physblock += instr->len, instr->addr += instr->len) {
+ int ret;
+
+ ret = mtd_erase(inftl->mbd.mtd, instr);
+ if (ret) {
+ printk(KERN_WARNING "INFTL: error while formatting block %d\n",
+ block);
+ goto fail;
+ }
+
+ /*
+ * Check the "freeness" of Erase Unit before updating metadata.
+ * FixMe: is this check really necessary? Since we have check
+ * the return code after the erase operation.
+ */
+ if (check_free_sectors(inftl, instr->addr, instr->len, 1) != 0)
+ goto fail;
+ }
+
+ uci.EraseMark = cpu_to_le16(ERASE_MARK);
+ uci.EraseMark1 = cpu_to_le16(ERASE_MARK);
+ uci.Reserved[0] = 0;
+ uci.Reserved[1] = 0;
+ uci.Reserved[2] = 0;
+ uci.Reserved[3] = 0;
+ instr->addr = block * inftl->EraseSize + SECTORSIZE * 2;
+ if (inftl_write_oob(mtd, instr->addr + 8, 8, &retlen, (char *)&uci) < 0)
+ goto fail;
+ return 0;
+fail:
+ /* could not format, update the bad block table (caller is responsible
+ for setting the PUtable to BLOCK_RESERVED on failure) */
+ mtd_block_markbad(inftl->mbd.mtd, instr->addr);
+ return -1;
+}
+
+/*
+ * format_chain: Format an invalid Virtual Unit chain. It frees all the Erase
+ * Units in a Virtual Unit Chain, i.e. all the units are disconnected.
+ *
+ * Since the chain is invalid then we will have to erase it from its
+ * head (normally for INFTL we go from the oldest). But if it has a
+ * loop then there is no oldest...
+ */
+static void format_chain(struct INFTLrecord *inftl, unsigned int first_block)
+{
+ unsigned int block = first_block, block1;
+
+ printk(KERN_WARNING "INFTL: formatting chain at block %d\n",
+ first_block);
+
+ for (;;) {
+ block1 = inftl->PUtable[block];
+
+ printk(KERN_WARNING "INFTL: formatting block %d\n", block);
+ if (INFTL_formatblock(inftl, block) < 0) {
+ /*
+ * Cannot format !!!! Mark it as Bad Unit,
+ */
+ inftl->PUtable[block] = BLOCK_RESERVED;
+ } else {
+ inftl->PUtable[block] = BLOCK_FREE;
+ }
+
+ /* Goto next block on the chain */
+ block = block1;
+
+ if (block == BLOCK_NIL || block >= inftl->lastEUN)
+ break;
+ }
+}
+
+void INFTL_dumptables(struct INFTLrecord *s)
+{
+ int i;
+
+ pr_debug("-------------------------------------------"
+ "----------------------------------\n");
+
+ pr_debug("VUtable[%d] ->", s->nb_blocks);
+ for (i = 0; i < s->nb_blocks; i++) {
+ if ((i % 8) == 0)
+ pr_debug("\n%04x: ", i);
+ pr_debug("%04x ", s->VUtable[i]);
+ }
+
+ pr_debug("\n-------------------------------------------"
+ "----------------------------------\n");
+
+ pr_debug("PUtable[%d-%d=%d] ->", s->firstEUN, s->lastEUN, s->nb_blocks);
+ for (i = 0; i <= s->lastEUN; i++) {
+ if ((i % 8) == 0)
+ pr_debug("\n%04x: ", i);
+ pr_debug("%04x ", s->PUtable[i]);
+ }
+
+ pr_debug("\n-------------------------------------------"
+ "----------------------------------\n");
+
+ pr_debug("INFTL ->\n"
+ " EraseSize = %d\n"
+ " h/s/c = %d/%d/%d\n"
+ " numvunits = %d\n"
+ " firstEUN = %d\n"
+ " lastEUN = %d\n"
+ " numfreeEUNs = %d\n"
+ " LastFreeEUN = %d\n"
+ " nb_blocks = %d\n"
+ " nb_boot_blocks = %d",
+ s->EraseSize, s->heads, s->sectors, s->cylinders,
+ s->numvunits, s->firstEUN, s->lastEUN, s->numfreeEUNs,
+ s->LastFreeEUN, s->nb_blocks, s->nb_boot_blocks);
+
+ pr_debug("\n-------------------------------------------"
+ "----------------------------------\n");
+}
+
+void INFTL_dumpVUchains(struct INFTLrecord *s)
+{
+ int logical, block, i;
+
+ pr_debug("-------------------------------------------"
+ "----------------------------------\n");
+
+ pr_debug("INFTL Virtual Unit Chains:\n");
+ for (logical = 0; logical < s->nb_blocks; logical++) {
+ block = s->VUtable[logical];
+ if (block >= s->nb_blocks)
+ continue;
+ pr_debug(" LOGICAL %d --> %d ", logical, block);
+ for (i = 0; i < s->nb_blocks; i++) {
+ if (s->PUtable[block] == BLOCK_NIL)
+ break;
+ block = s->PUtable[block];
+ pr_debug("%d ", block);
+ }
+ pr_debug("\n");
+ }
+
+ pr_debug("-------------------------------------------"
+ "----------------------------------\n");
+}
+
+int INFTL_mount(struct INFTLrecord *s)
+{
+ struct mtd_info *mtd = s->mbd.mtd;
+ unsigned int block, first_block, prev_block, last_block;
+ unsigned int first_logical_block, logical_block, erase_mark;
+ int chain_length, do_format_chain;
+ struct inftl_unithead1 h0;
+ struct inftl_unittail h1;
+ size_t retlen;
+ int i;
+ u8 *ANACtable, ANAC;
+
+ pr_debug("INFTL: INFTL_mount(inftl=%p)\n", s);
+
+ /* Search for INFTL MediaHeader and Spare INFTL Media Header */
+ if (find_boot_record(s) < 0) {
+ printk(KERN_WARNING "INFTL: could not find valid boot record?\n");
+ return -ENXIO;
+ }
+
+ /* Init the logical to physical table */
+ for (i = 0; i < s->nb_blocks; i++)
+ s->VUtable[i] = BLOCK_NIL;
+
+ logical_block = block = BLOCK_NIL;
+
+ /* Temporary buffer to store ANAC numbers. */
+ ANACtable = kcalloc(s->nb_blocks, sizeof(u8), GFP_KERNEL);
+ if (!ANACtable)
+ return -ENOMEM;
+
+ /*
+ * First pass is to explore each physical unit, and construct the
+ * virtual chains that exist (newest physical unit goes into VUtable).
+ * Any block that is in any way invalid will be left in the
+ * NOTEXPLORED state. Then at the end we will try to format it and
+ * mark it as free.
+ */
+ pr_debug("INFTL: pass 1, explore each unit\n");
+ for (first_block = s->firstEUN; first_block <= s->lastEUN; first_block++) {
+ if (s->PUtable[first_block] != BLOCK_NOTEXPLORED)
+ continue;
+
+ do_format_chain = 0;
+ first_logical_block = BLOCK_NIL;
+ last_block = BLOCK_NIL;
+ block = first_block;
+
+ for (chain_length = 0; ; chain_length++) {
+
+ if ((chain_length == 0) &&
+ (s->PUtable[block] != BLOCK_NOTEXPLORED)) {
+ /* Nothing to do here, onto next block */
+ break;
+ }
+
+ if (inftl_read_oob(mtd, block * s->EraseSize + 8,
+ 8, &retlen, (char *)&h0) < 0 ||
+ inftl_read_oob(mtd, block * s->EraseSize +
+ 2 * SECTORSIZE + 8, 8, &retlen,
+ (char *)&h1) < 0) {
+ /* Should never happen? */
+ do_format_chain++;
+ break;
+ }
+
+ logical_block = le16_to_cpu(h0.virtualUnitNo);
+ prev_block = le16_to_cpu(h0.prevUnitNo);
+ erase_mark = le16_to_cpu((h1.EraseMark | h1.EraseMark1));
+ ANACtable[block] = h0.ANAC;
+
+ /* Previous block is relative to start of Partition */
+ if (prev_block < s->nb_blocks)
+ prev_block += s->firstEUN;
+
+ /* Already explored partial chain? */
+ if (s->PUtable[block] != BLOCK_NOTEXPLORED) {
+ /* Check if chain for this logical */
+ if (logical_block == first_logical_block) {
+ if (last_block != BLOCK_NIL)
+ s->PUtable[last_block] = block;
+ }
+ break;
+ }
+
+ /* Check for invalid block */
+ if (erase_mark != ERASE_MARK) {
+ printk(KERN_WARNING "INFTL: corrupt block %d "
+ "in chain %d, chain length %d, erase "
+ "mark 0x%x?\n", block, first_block,
+ chain_length, erase_mark);
+ /*
+ * Assume end of chain, probably incomplete
+ * fold/erase...
+ */
+ if (chain_length == 0)
+ do_format_chain++;
+ break;
+ }
+
+ /* Check for it being free already then... */
+ if ((logical_block == BLOCK_FREE) ||
+ (logical_block == BLOCK_NIL)) {
+ s->PUtable[block] = BLOCK_FREE;
+ break;
+ }
+
+ /* Sanity checks on block numbers */
+ if ((logical_block >= s->nb_blocks) ||
+ ((prev_block >= s->nb_blocks) &&
+ (prev_block != BLOCK_NIL))) {
+ if (chain_length > 0) {
+ printk(KERN_WARNING "INFTL: corrupt "
+ "block %d in chain %d?\n",
+ block, first_block);
+ do_format_chain++;
+ }
+ break;
+ }
+
+ if (first_logical_block == BLOCK_NIL) {
+ first_logical_block = logical_block;
+ } else {
+ if (first_logical_block != logical_block) {
+ /* Normal for folded chain... */
+ break;
+ }
+ }
+
+ /*
+ * Current block is valid, so if we followed a virtual
+ * chain to get here then we can set the previous
+ * block pointer in our PUtable now. Then move onto
+ * the previous block in the chain.
+ */
+ s->PUtable[block] = BLOCK_NIL;
+ if (last_block != BLOCK_NIL)
+ s->PUtable[last_block] = block;
+ last_block = block;
+ block = prev_block;
+
+ /* Check for end of chain */
+ if (block == BLOCK_NIL)
+ break;
+
+ /* Validate next block before following it... */
+ if (block > s->lastEUN) {
+ printk(KERN_WARNING "INFTL: invalid previous "
+ "block %d in chain %d?\n", block,
+ first_block);
+ do_format_chain++;
+ break;
+ }
+ }
+
+ if (do_format_chain) {
+ format_chain(s, first_block);
+ continue;
+ }
+
+ /*
+ * Looks like a valid chain then. It may not really be the
+ * newest block in the chain, but it is the newest we have
+ * found so far. We might update it in later iterations of
+ * this loop if we find something newer.
+ */
+ s->VUtable[first_logical_block] = first_block;
+ logical_block = BLOCK_NIL;
+ }
+
+ INFTL_dumptables(s);
+
+ /*
+ * Second pass, check for infinite loops in chains. These are
+ * possible because we don't update the previous pointers when
+ * we fold chains. No big deal, just fix them up in PUtable.
+ */
+ pr_debug("INFTL: pass 2, validate virtual chains\n");
+ for (logical_block = 0; logical_block < s->numvunits; logical_block++) {
+ block = s->VUtable[logical_block];
+ last_block = BLOCK_NIL;
+
+ /* Check for free/reserved/nil */
+ if (block >= BLOCK_RESERVED)
+ continue;
+
+ ANAC = ANACtable[block];
+ for (i = 0; i < s->numvunits; i++) {
+ if (s->PUtable[block] == BLOCK_NIL)
+ break;
+ if (s->PUtable[block] > s->lastEUN) {
+ printk(KERN_WARNING "INFTL: invalid prev %d, "
+ "in virtual chain %d\n",
+ s->PUtable[block], logical_block);
+ s->PUtable[block] = BLOCK_NIL;
+
+ }
+ if (ANACtable[block] != ANAC) {
+ /*
+ * Chain must point back to itself. This is ok,
+ * but we will need adjust the tables with this
+ * newest block and oldest block.
+ */
+ s->VUtable[logical_block] = block;
+ s->PUtable[last_block] = BLOCK_NIL;
+ break;
+ }
+
+ ANAC--;
+ last_block = block;
+ block = s->PUtable[block];
+ }
+
+ if (i >= s->nb_blocks) {
+ /*
+ * Uhoo, infinite chain with valid ANACS!
+ * Format whole chain...
+ */
+ format_chain(s, first_block);
+ }
+ }
+
+ INFTL_dumptables(s);
+ INFTL_dumpVUchains(s);
+
+ /*
+ * Third pass, format unreferenced blocks and init free block count.
+ */
+ s->numfreeEUNs = 0;
+ s->LastFreeEUN = BLOCK_NIL;
+
+ pr_debug("INFTL: pass 3, format unused blocks\n");
+ for (block = s->firstEUN; block <= s->lastEUN; block++) {
+ if (s->PUtable[block] == BLOCK_NOTEXPLORED) {
+ printk("INFTL: unreferenced block %d, formatting it\n",
+ block);
+ if (INFTL_formatblock(s, block) < 0)
+ s->PUtable[block] = BLOCK_RESERVED;
+ else
+ s->PUtable[block] = BLOCK_FREE;
+ }
+ if (s->PUtable[block] == BLOCK_FREE) {
+ s->numfreeEUNs++;
+ if (s->LastFreeEUN == BLOCK_NIL)
+ s->LastFreeEUN = block;
+ }
+ }
+
+ kfree(ANACtable);
+ return 0;
+}
diff --git a/drivers/mtd/lpddr/Kconfig b/drivers/mtd/lpddr/Kconfig
new file mode 100644
index 0000000000..0395aa6b68
--- /dev/null
+++ b/drivers/mtd/lpddr/Kconfig
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "LPDDR & LPDDR2 PCM memory drivers"
+ depends on MTD
+
+config MTD_LPDDR
+ tristate "Support for LPDDR flash chips"
+ select MTD_QINFO_PROBE
+ help
+ This option enables support of LPDDR (Low power double data rate)
+ flash chips. Synonymous with Mobile-DDR. It is a new standard for
+ DDR memories, intended for battery-operated systems.
+
+config MTD_QINFO_PROBE
+ depends on MTD_LPDDR
+ tristate "Detect flash chips by QINFO probe"
+ help
+ Device Information for LPDDR chips is offered through the Overlay
+ Window QINFO interface, permits software to be used for entire
+ families of devices. This serves similar purpose of CFI on legacy
+ Flash products
+
+config MTD_LPDDR2_NVM
+ # ARM dependency is only for writel_relaxed()
+ depends on MTD && ARM
+ tristate "Support for LPDDR2-NVM flash chips"
+ help
+ This option enables support of PCM memories with a LPDDR2-NVM
+ (Low power double data rate 2) interface.
+
+endmenu
diff --git a/drivers/mtd/lpddr/Makefile b/drivers/mtd/lpddr/Makefile
new file mode 100644
index 0000000000..b217b828f4
--- /dev/null
+++ b/drivers/mtd/lpddr/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# linux/drivers/mtd/lpddr/Makefile
+#
+
+obj-$(CONFIG_MTD_QINFO_PROBE) += qinfo_probe.o
+obj-$(CONFIG_MTD_LPDDR) += lpddr_cmds.o
+obj-$(CONFIG_MTD_LPDDR2_NVM) += lpddr2_nvm.o
diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
new file mode 100644
index 0000000000..f4e5174b24
--- /dev/null
+++ b/drivers/mtd/lpddr/lpddr2_nvm.c
@@ -0,0 +1,498 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * LPDDR2-NVM MTD driver. This module provides read, write, erase, lock/unlock
+ * support for LPDDR2-NVM PCM memories
+ *
+ * Copyright © 2012 Micron Technology, Inc.
+ *
+ * Vincenzo Aliberti <vincenzo.aliberti@gmail.com>
+ * Domenico Manna <domenico.manna@gmail.com>
+ * Many thanks to Andrea Vigilante for initial enabling
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include <linux/err.h>
+
+/* Parameters */
+#define ERASE_BLOCKSIZE (0x00020000/2) /* in Word */
+#define WRITE_BUFFSIZE (0x00000400/2) /* in Word */
+#define OW_BASE_ADDRESS 0x00000000 /* OW offset */
+#define BUS_WIDTH 0x00000020 /* x32 devices */
+
+/* PFOW symbols address offset */
+#define PFOW_QUERY_STRING_P (0x0000/2) /* in Word */
+#define PFOW_QUERY_STRING_F (0x0002/2) /* in Word */
+#define PFOW_QUERY_STRING_O (0x0004/2) /* in Word */
+#define PFOW_QUERY_STRING_W (0x0006/2) /* in Word */
+
+/* OW registers address */
+#define CMD_CODE_OFS (0x0080/2) /* in Word */
+#define CMD_DATA_OFS (0x0084/2) /* in Word */
+#define CMD_ADD_L_OFS (0x0088/2) /* in Word */
+#define CMD_ADD_H_OFS (0x008A/2) /* in Word */
+#define MPR_L_OFS (0x0090/2) /* in Word */
+#define MPR_H_OFS (0x0092/2) /* in Word */
+#define CMD_EXEC_OFS (0x00C0/2) /* in Word */
+#define STATUS_REG_OFS (0x00CC/2) /* in Word */
+#define PRG_BUFFER_OFS (0x0010/2) /* in Word */
+
+/* Datamask */
+#define MR_CFGMASK 0x8000
+#define SR_OK_DATAMASK 0x0080
+
+/* LPDDR2-NVM Commands */
+#define LPDDR2_NVM_LOCK 0x0061
+#define LPDDR2_NVM_UNLOCK 0x0062
+#define LPDDR2_NVM_SW_PROGRAM 0x0041
+#define LPDDR2_NVM_SW_OVERWRITE 0x0042
+#define LPDDR2_NVM_BUF_PROGRAM 0x00E9
+#define LPDDR2_NVM_BUF_OVERWRITE 0x00EA
+#define LPDDR2_NVM_ERASE 0x0020
+
+/* LPDDR2-NVM Registers offset */
+#define LPDDR2_MODE_REG_DATA 0x0040
+#define LPDDR2_MODE_REG_CFG 0x0050
+
+/*
+ * Internal Type Definitions
+ * pcm_int_data contains memory controller details:
+ * @reg_data : LPDDR2_MODE_REG_DATA register address after remapping
+ * @reg_cfg : LPDDR2_MODE_REG_CFG register address after remapping
+ * &bus_width: memory bus-width (eg: x16 2 Bytes, x32 4 Bytes)
+ */
+struct pcm_int_data {
+ void __iomem *ctl_regs;
+ int bus_width;
+};
+
+static DEFINE_MUTEX(lpdd2_nvm_mutex);
+
+/*
+ * Build a map_word starting from an u_long
+ */
+static inline map_word build_map_word(u_long myword)
+{
+ map_word val = { {0} };
+ val.x[0] = myword;
+ return val;
+}
+
+/*
+ * Build Mode Register Configuration DataMask based on device bus-width
+ */
+static inline u_int build_mr_cfgmask(u_int bus_width)
+{
+ u_int val = MR_CFGMASK;
+
+ if (bus_width == 0x0004) /* x32 device */
+ val = val << 16;
+
+ return val;
+}
+
+/*
+ * Build Status Register OK DataMask based on device bus-width
+ */
+static inline u_int build_sr_ok_datamask(u_int bus_width)
+{
+ u_int val = SR_OK_DATAMASK;
+
+ if (bus_width == 0x0004) /* x32 device */
+ val = (val << 16)+val;
+
+ return val;
+}
+
+/*
+ * Evaluates Overlay Window Control Registers address
+ */
+static inline u_long ow_reg_add(struct map_info *map, u_long offset)
+{
+ u_long val = 0;
+ struct pcm_int_data *pcm_data = map->fldrv_priv;
+
+ val = map->pfow_base + offset*pcm_data->bus_width;
+
+ return val;
+}
+
+/*
+ * Enable lpddr2-nvm Overlay Window
+ * Overlay Window is a memory mapped area containing all LPDDR2-NVM registers
+ * used by device commands as well as uservisible resources like Device Status
+ * Register, Device ID, etc
+ */
+static inline void ow_enable(struct map_info *map)
+{
+ struct pcm_int_data *pcm_data = map->fldrv_priv;
+
+ writel_relaxed(build_mr_cfgmask(pcm_data->bus_width) | 0x18,
+ pcm_data->ctl_regs + LPDDR2_MODE_REG_CFG);
+ writel_relaxed(0x01, pcm_data->ctl_regs + LPDDR2_MODE_REG_DATA);
+}
+
+/*
+ * Disable lpddr2-nvm Overlay Window
+ * Overlay Window is a memory mapped area containing all LPDDR2-NVM registers
+ * used by device commands as well as uservisible resources like Device Status
+ * Register, Device ID, etc
+ */
+static inline void ow_disable(struct map_info *map)
+{
+ struct pcm_int_data *pcm_data = map->fldrv_priv;
+
+ writel_relaxed(build_mr_cfgmask(pcm_data->bus_width) | 0x18,
+ pcm_data->ctl_regs + LPDDR2_MODE_REG_CFG);
+ writel_relaxed(0x02, pcm_data->ctl_regs + LPDDR2_MODE_REG_DATA);
+}
+
+/*
+ * Execute lpddr2-nvm operations
+ */
+static int lpddr2_nvm_do_op(struct map_info *map, u_long cmd_code,
+ u_long cmd_data, u_long cmd_add, u_long cmd_mpr, u_char *buf)
+{
+ map_word add_l = { {0} }, add_h = { {0} }, mpr_l = { {0} },
+ mpr_h = { {0} }, data_l = { {0} }, cmd = { {0} },
+ exec_cmd = { {0} }, sr;
+ map_word data_h = { {0} }; /* only for 2x x16 devices stacked */
+ u_long i, status_reg, prg_buff_ofs;
+ struct pcm_int_data *pcm_data = map->fldrv_priv;
+ u_int sr_ok_datamask = build_sr_ok_datamask(pcm_data->bus_width);
+
+ /* Builds low and high words for OW Control Registers */
+ add_l.x[0] = cmd_add & 0x0000FFFF;
+ add_h.x[0] = (cmd_add >> 16) & 0x0000FFFF;
+ mpr_l.x[0] = cmd_mpr & 0x0000FFFF;
+ mpr_h.x[0] = (cmd_mpr >> 16) & 0x0000FFFF;
+ cmd.x[0] = cmd_code & 0x0000FFFF;
+ exec_cmd.x[0] = 0x0001;
+ data_l.x[0] = cmd_data & 0x0000FFFF;
+ data_h.x[0] = (cmd_data >> 16) & 0x0000FFFF; /* only for 2x x16 */
+
+ /* Set Overlay Window Control Registers */
+ map_write(map, cmd, ow_reg_add(map, CMD_CODE_OFS));
+ map_write(map, data_l, ow_reg_add(map, CMD_DATA_OFS));
+ map_write(map, add_l, ow_reg_add(map, CMD_ADD_L_OFS));
+ map_write(map, add_h, ow_reg_add(map, CMD_ADD_H_OFS));
+ map_write(map, mpr_l, ow_reg_add(map, MPR_L_OFS));
+ map_write(map, mpr_h, ow_reg_add(map, MPR_H_OFS));
+ if (pcm_data->bus_width == 0x0004) { /* 2x16 devices stacked */
+ map_write(map, cmd, ow_reg_add(map, CMD_CODE_OFS) + 2);
+ map_write(map, data_h, ow_reg_add(map, CMD_DATA_OFS) + 2);
+ map_write(map, add_l, ow_reg_add(map, CMD_ADD_L_OFS) + 2);
+ map_write(map, add_h, ow_reg_add(map, CMD_ADD_H_OFS) + 2);
+ map_write(map, mpr_l, ow_reg_add(map, MPR_L_OFS) + 2);
+ map_write(map, mpr_h, ow_reg_add(map, MPR_H_OFS) + 2);
+ }
+
+ /* Fill Program Buffer */
+ if ((cmd_code == LPDDR2_NVM_BUF_PROGRAM) ||
+ (cmd_code == LPDDR2_NVM_BUF_OVERWRITE)) {
+ prg_buff_ofs = (map_read(map,
+ ow_reg_add(map, PRG_BUFFER_OFS))).x[0];
+ for (i = 0; i < cmd_mpr; i++) {
+ map_write(map, build_map_word(buf[i]), map->pfow_base +
+ prg_buff_ofs + i);
+ }
+ }
+
+ /* Command Execute */
+ map_write(map, exec_cmd, ow_reg_add(map, CMD_EXEC_OFS));
+ if (pcm_data->bus_width == 0x0004) /* 2x16 devices stacked */
+ map_write(map, exec_cmd, ow_reg_add(map, CMD_EXEC_OFS) + 2);
+
+ /* Status Register Check */
+ do {
+ sr = map_read(map, ow_reg_add(map, STATUS_REG_OFS));
+ status_reg = sr.x[0];
+ if (pcm_data->bus_width == 0x0004) {/* 2x16 devices stacked */
+ sr = map_read(map, ow_reg_add(map,
+ STATUS_REG_OFS) + 2);
+ status_reg += sr.x[0] << 16;
+ }
+ } while ((status_reg & sr_ok_datamask) != sr_ok_datamask);
+
+ return (((status_reg & sr_ok_datamask) == sr_ok_datamask) ? 0 : -EIO);
+}
+
+/*
+ * Execute lpddr2-nvm operations @ block level
+ */
+static int lpddr2_nvm_do_block_op(struct mtd_info *mtd, loff_t start_add,
+ uint64_t len, u_char block_op)
+{
+ struct map_info *map = mtd->priv;
+ u_long add, end_add;
+ int ret = 0;
+
+ mutex_lock(&lpdd2_nvm_mutex);
+
+ ow_enable(map);
+
+ add = start_add;
+ end_add = add + len;
+
+ do {
+ ret = lpddr2_nvm_do_op(map, block_op, 0x00, add, add, NULL);
+ if (ret)
+ goto out;
+ add += mtd->erasesize;
+ } while (add < end_add);
+
+out:
+ ow_disable(map);
+ mutex_unlock(&lpdd2_nvm_mutex);
+ return ret;
+}
+
+/*
+ * verify presence of PFOW string
+ */
+static int lpddr2_nvm_pfow_present(struct map_info *map)
+{
+ map_word pfow_val[4];
+ unsigned int found = 1;
+
+ mutex_lock(&lpdd2_nvm_mutex);
+
+ ow_enable(map);
+
+ /* Load string from array */
+ pfow_val[0] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_P));
+ pfow_val[1] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_F));
+ pfow_val[2] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_O));
+ pfow_val[3] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_W));
+
+ /* Verify the string loaded vs expected */
+ if (!map_word_equal(map, build_map_word('P'), pfow_val[0]))
+ found = 0;
+ if (!map_word_equal(map, build_map_word('F'), pfow_val[1]))
+ found = 0;
+ if (!map_word_equal(map, build_map_word('O'), pfow_val[2]))
+ found = 0;
+ if (!map_word_equal(map, build_map_word('W'), pfow_val[3]))
+ found = 0;
+
+ ow_disable(map);
+
+ mutex_unlock(&lpdd2_nvm_mutex);
+
+ return found;
+}
+
+/*
+ * lpddr2_nvm driver read method
+ */
+static int lpddr2_nvm_read(struct mtd_info *mtd, loff_t start_add,
+ size_t len, size_t *retlen, u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+
+ mutex_lock(&lpdd2_nvm_mutex);
+
+ *retlen = len;
+
+ map_copy_from(map, buf, start_add, *retlen);
+
+ mutex_unlock(&lpdd2_nvm_mutex);
+ return 0;
+}
+
+/*
+ * lpddr2_nvm driver write method
+ */
+static int lpddr2_nvm_write(struct mtd_info *mtd, loff_t start_add,
+ size_t len, size_t *retlen, const u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct pcm_int_data *pcm_data = map->fldrv_priv;
+ u_long add, current_len, tot_len, target_len, my_data;
+ u_char *write_buf = (u_char *)buf;
+ int ret = 0;
+
+ mutex_lock(&lpdd2_nvm_mutex);
+
+ ow_enable(map);
+
+ /* Set start value for the variables */
+ add = start_add;
+ target_len = len;
+ tot_len = 0;
+
+ while (tot_len < target_len) {
+ if (!(IS_ALIGNED(add, mtd->writesize))) { /* do sw program */
+ my_data = write_buf[tot_len];
+ my_data += (write_buf[tot_len+1]) << 8;
+ if (pcm_data->bus_width == 0x0004) {/* 2x16 devices */
+ my_data += (write_buf[tot_len+2]) << 16;
+ my_data += (write_buf[tot_len+3]) << 24;
+ }
+ ret = lpddr2_nvm_do_op(map, LPDDR2_NVM_SW_OVERWRITE,
+ my_data, add, 0x00, NULL);
+ if (ret)
+ goto out;
+
+ add += pcm_data->bus_width;
+ tot_len += pcm_data->bus_width;
+ } else { /* do buffer program */
+ current_len = min(target_len - tot_len,
+ (u_long) mtd->writesize);
+ ret = lpddr2_nvm_do_op(map, LPDDR2_NVM_BUF_OVERWRITE,
+ 0x00, add, current_len, write_buf + tot_len);
+ if (ret)
+ goto out;
+
+ add += current_len;
+ tot_len += current_len;
+ }
+ }
+
+out:
+ *retlen = tot_len;
+ ow_disable(map);
+ mutex_unlock(&lpdd2_nvm_mutex);
+ return ret;
+}
+
+/*
+ * lpddr2_nvm driver erase method
+ */
+static int lpddr2_nvm_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ return lpddr2_nvm_do_block_op(mtd, instr->addr, instr->len,
+ LPDDR2_NVM_ERASE);
+}
+
+/*
+ * lpddr2_nvm driver unlock method
+ */
+static int lpddr2_nvm_unlock(struct mtd_info *mtd, loff_t start_add,
+ uint64_t len)
+{
+ return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_UNLOCK);
+}
+
+/*
+ * lpddr2_nvm driver lock method
+ */
+static int lpddr2_nvm_lock(struct mtd_info *mtd, loff_t start_add,
+ uint64_t len)
+{
+ return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_LOCK);
+}
+
+static const struct mtd_info lpddr2_nvm_mtd_info = {
+ .type = MTD_RAM,
+ .writesize = 1,
+ .flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK),
+ ._read = lpddr2_nvm_read,
+ ._write = lpddr2_nvm_write,
+ ._erase = lpddr2_nvm_erase,
+ ._unlock = lpddr2_nvm_unlock,
+ ._lock = lpddr2_nvm_lock,
+};
+
+/*
+ * lpddr2_nvm driver probe method
+ */
+static int lpddr2_nvm_probe(struct platform_device *pdev)
+{
+ struct map_info *map;
+ struct mtd_info *mtd;
+ struct resource *add_range;
+ struct pcm_int_data *pcm_data;
+
+ /* Allocate memory control_regs data structures */
+ pcm_data = devm_kzalloc(&pdev->dev, sizeof(*pcm_data), GFP_KERNEL);
+ if (!pcm_data)
+ return -ENOMEM;
+
+ pcm_data->bus_width = BUS_WIDTH;
+
+ /* Allocate memory for map_info & mtd_info data structures */
+ map = devm_kzalloc(&pdev->dev, sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ mtd = devm_kzalloc(&pdev->dev, sizeof(*mtd), GFP_KERNEL);
+ if (!mtd)
+ return -ENOMEM;
+
+ /* lpddr2_nvm address range */
+ add_range = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!add_range)
+ return -ENODEV;
+
+ /* Populate map_info data structure */
+ *map = (struct map_info) {
+ .virt = devm_ioremap_resource(&pdev->dev, add_range),
+ .name = pdev->dev.init_name,
+ .phys = add_range->start,
+ .size = resource_size(add_range),
+ .bankwidth = pcm_data->bus_width / 2,
+ .pfow_base = OW_BASE_ADDRESS,
+ .fldrv_priv = pcm_data,
+ };
+
+ if (IS_ERR(map->virt))
+ return PTR_ERR(map->virt);
+
+ simple_map_init(map); /* fill with default methods */
+
+ pcm_data->ctl_regs = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(pcm_data->ctl_regs))
+ return PTR_ERR(pcm_data->ctl_regs);
+
+ /* Populate mtd_info data structure */
+ *mtd = lpddr2_nvm_mtd_info;
+ mtd->dev.parent = &pdev->dev;
+ mtd->name = pdev->dev.init_name;
+ mtd->priv = map;
+ mtd->size = resource_size(add_range);
+ mtd->erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width;
+ mtd->writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width;
+
+ /* Verify the presence of the device looking for PFOW string */
+ if (!lpddr2_nvm_pfow_present(map)) {
+ pr_err("device not recognized\n");
+ return -EINVAL;
+ }
+ /* Parse partitions and register the MTD device */
+ return mtd_device_register(mtd, NULL, 0);
+}
+
+/*
+ * lpddr2_nvm driver remove method
+ */
+static int lpddr2_nvm_remove(struct platform_device *pdev)
+{
+ WARN_ON(mtd_device_unregister(dev_get_drvdata(&pdev->dev)));
+
+ return 0;
+}
+
+/* Initialize platform_driver data structure for lpddr2_nvm */
+static struct platform_driver lpddr2_nvm_drv = {
+ .driver = {
+ .name = "lpddr2_nvm",
+ },
+ .probe = lpddr2_nvm_probe,
+ .remove = lpddr2_nvm_remove,
+};
+
+module_platform_driver(lpddr2_nvm_drv);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vincenzo Aliberti <vincenzo.aliberti@gmail.com>");
+MODULE_DESCRIPTION("MTD driver for LPDDR2-NVM PCM memories");
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
new file mode 100644
index 0000000000..3c3939bc2d
--- /dev/null
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -0,0 +1,760 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * LPDDR flash memory device operations. This module provides read, write,
+ * erase, lock/unlock support for LPDDR flash memories
+ * (C) 2008 Korolev Alexey <akorolev@infradead.org>
+ * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com>
+ * Many thanks to Roman Borisov for initial enabling
+ *
+ * TODO:
+ * Implement VPP management
+ * Implement XIP support
+ * Implement OTP support
+ */
+#include <linux/mtd/pfow.h>
+#include <linux/mtd/qinfo.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
+ size_t *retlen, u_char *buf);
+static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to,
+ size_t len, size_t *retlen, const u_char *buf);
+static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen);
+static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr);
+static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
+ size_t *retlen, void **mtdbuf, resource_size_t *phys);
+static int lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len);
+static int get_chip(struct map_info *map, struct flchip *chip, int mode);
+static int chip_ready(struct map_info *map, struct flchip *chip, int mode);
+static void put_chip(struct map_info *map, struct flchip *chip);
+
+struct mtd_info *lpddr_cmdset(struct map_info *map)
+{
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ struct flchip_shared *shared;
+ struct flchip *chip;
+ struct mtd_info *mtd;
+ int numchips;
+ int i, j;
+
+ mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
+ if (!mtd)
+ return NULL;
+ mtd->priv = map;
+ mtd->type = MTD_NORFLASH;
+
+ /* Fill in the default mtd operations */
+ mtd->_read = lpddr_read;
+ mtd->type = MTD_NORFLASH;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->flags &= ~MTD_BIT_WRITEABLE;
+ mtd->_erase = lpddr_erase;
+ mtd->_write = lpddr_write_buffers;
+ mtd->_writev = lpddr_writev;
+ mtd->_lock = lpddr_lock;
+ mtd->_unlock = lpddr_unlock;
+ if (map_is_linear(map)) {
+ mtd->_point = lpddr_point;
+ mtd->_unpoint = lpddr_unpoint;
+ }
+ mtd->size = 1 << lpddr->qinfo->DevSizeShift;
+ mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
+ mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
+
+ shared = kmalloc_array(lpddr->numchips, sizeof(struct flchip_shared),
+ GFP_KERNEL);
+ if (!shared) {
+ kfree(mtd);
+ return NULL;
+ }
+
+ chip = &lpddr->chips[0];
+ numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum;
+ for (i = 0; i < numchips; i++) {
+ shared[i].writing = shared[i].erasing = NULL;
+ mutex_init(&shared[i].lock);
+ for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) {
+ *chip = lpddr->chips[i];
+ chip->start += j << lpddr->chipshift;
+ chip->oldstate = chip->state = FL_READY;
+ chip->priv = &shared[i];
+ /* those should be reset too since
+ they create memory references. */
+ init_waitqueue_head(&chip->wq);
+ mutex_init(&chip->mutex);
+ chip++;
+ }
+ }
+
+ return mtd;
+}
+EXPORT_SYMBOL(lpddr_cmdset);
+
+static void print_drs_error(unsigned int dsr)
+{
+ int prog_status = (dsr & DSR_RPS) >> 8;
+
+ if (!(dsr & DSR_AVAILABLE))
+ pr_notice("DSR.15: (0) Device not Available\n");
+ if ((prog_status & 0x03) == 0x03)
+ pr_notice("DSR.9,8: (11) Attempt to program invalid half with 41h command\n");
+ else if (prog_status & 0x02)
+ pr_notice("DSR.9,8: (10) Object Mode Program attempt in region with Control Mode data\n");
+ else if (prog_status & 0x01)
+ pr_notice("DSR.9,8: (01) Program attempt in region with Object Mode data\n");
+ if (!(dsr & DSR_READY_STATUS))
+ pr_notice("DSR.7: (0) Device is Busy\n");
+ if (dsr & DSR_ESS)
+ pr_notice("DSR.6: (1) Erase Suspended\n");
+ if (dsr & DSR_ERASE_STATUS)
+ pr_notice("DSR.5: (1) Erase/Blank check error\n");
+ if (dsr & DSR_PROGRAM_STATUS)
+ pr_notice("DSR.4: (1) Program Error\n");
+ if (dsr & DSR_VPPS)
+ pr_notice("DSR.3: (1) Vpp low detect, operation aborted\n");
+ if (dsr & DSR_PSS)
+ pr_notice("DSR.2: (1) Program suspended\n");
+ if (dsr & DSR_DPS)
+ pr_notice("DSR.1: (1) Aborted Erase/Program attempt on locked block\n");
+}
+
+static int wait_for_ready(struct map_info *map, struct flchip *chip,
+ unsigned int chip_op_time)
+{
+ unsigned int timeo, reset_timeo, sleep_time;
+ unsigned int dsr;
+ flstate_t chip_state = chip->state;
+ int ret = 0;
+
+ /* set our timeout to 8 times the expected delay */
+ timeo = chip_op_time * 8;
+ if (!timeo)
+ timeo = 500000;
+ reset_timeo = timeo;
+ sleep_time = chip_op_time / 2;
+
+ for (;;) {
+ dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR));
+ if (dsr & DSR_READY_STATUS)
+ break;
+ if (!timeo) {
+ printk(KERN_ERR "%s: Flash timeout error state %d \n",
+ map->name, chip_state);
+ ret = -ETIME;
+ break;
+ }
+
+ /* OK Still waiting. Drop the lock, wait a while and retry. */
+ mutex_unlock(&chip->mutex);
+ if (sleep_time >= 1000000/HZ) {
+ /*
+ * Half of the normal delay still remaining
+ * can be performed with a sleeping delay instead
+ * of busy waiting.
+ */
+ msleep(sleep_time/1000);
+ timeo -= sleep_time;
+ sleep_time = 1000000/HZ;
+ } else {
+ udelay(1);
+ cond_resched();
+ timeo--;
+ }
+ mutex_lock(&chip->mutex);
+
+ while (chip->state != chip_state) {
+ /* Someone's suspended the operation: sleep */
+ DECLARE_WAITQUEUE(wait, current);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ mutex_lock(&chip->mutex);
+ }
+ if (chip->erase_suspended || chip->write_suspended) {
+ /* Suspend has occurred while sleep: reset timeout */
+ timeo = reset_timeo;
+ chip->erase_suspended = chip->write_suspended = 0;
+ }
+ }
+ /* check status for errors */
+ if (dsr & DSR_ERR) {
+ /* Clear DSR*/
+ map_write(map, CMD(~(DSR_ERR)), map->pfow_base + PFOW_DSR);
+ printk(KERN_WARNING"%s: Bad status on wait: 0x%x \n",
+ map->name, dsr);
+ print_drs_error(dsr);
+ ret = -EIO;
+ }
+ chip->state = FL_READY;
+ return ret;
+}
+
+static int get_chip(struct map_info *map, struct flchip *chip, int mode)
+{
+ int ret;
+ DECLARE_WAITQUEUE(wait, current);
+
+ retry:
+ if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)
+ && chip->state != FL_SYNCING) {
+ /*
+ * OK. We have possibility for contension on the write/erase
+ * operations which are global to the real chip and not per
+ * partition. So let's fight it over in the partition which
+ * currently has authority on the operation.
+ *
+ * The rules are as follows:
+ *
+ * - any write operation must own shared->writing.
+ *
+ * - any erase operation must own _both_ shared->writing and
+ * shared->erasing.
+ *
+ * - contension arbitration is handled in the owner's context.
+ *
+ * The 'shared' struct can be read and/or written only when
+ * its lock is taken.
+ */
+ struct flchip_shared *shared = chip->priv;
+ struct flchip *contender;
+ mutex_lock(&shared->lock);
+ contender = shared->writing;
+ if (contender && contender != chip) {
+ /*
+ * The engine to perform desired operation on this
+ * partition is already in use by someone else.
+ * Let's fight over it in the context of the chip
+ * currently using it. If it is possible to suspend,
+ * that other partition will do just that, otherwise
+ * it'll happily send us to sleep. In any case, when
+ * get_chip returns success we're clear to go ahead.
+ */
+ ret = mutex_trylock(&contender->mutex);
+ mutex_unlock(&shared->lock);
+ if (!ret)
+ goto retry;
+ mutex_unlock(&chip->mutex);
+ ret = chip_ready(map, contender, mode);
+ mutex_lock(&chip->mutex);
+
+ if (ret == -EAGAIN) {
+ mutex_unlock(&contender->mutex);
+ goto retry;
+ }
+ if (ret) {
+ mutex_unlock(&contender->mutex);
+ return ret;
+ }
+ mutex_lock(&shared->lock);
+
+ /* We should not own chip if it is already in FL_SYNCING
+ * state. Put contender and retry. */
+ if (chip->state == FL_SYNCING) {
+ put_chip(map, contender);
+ mutex_unlock(&contender->mutex);
+ goto retry;
+ }
+ mutex_unlock(&contender->mutex);
+ }
+
+ /* Check if we have suspended erase on this chip.
+ Must sleep in such a case. */
+ if (mode == FL_ERASING && shared->erasing
+ && shared->erasing->oldstate == FL_ERASING) {
+ mutex_unlock(&shared->lock);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ mutex_lock(&chip->mutex);
+ goto retry;
+ }
+
+ /* We now own it */
+ shared->writing = chip;
+ if (mode == FL_ERASING)
+ shared->erasing = chip;
+ mutex_unlock(&shared->lock);
+ }
+
+ ret = chip_ready(map, chip, mode);
+ if (ret == -EAGAIN)
+ goto retry;
+
+ return ret;
+}
+
+static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
+{
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ int ret = 0;
+ DECLARE_WAITQUEUE(wait, current);
+
+ /* Prevent setting state FL_SYNCING for chip in suspended state. */
+ if (FL_SYNCING == mode && FL_READY != chip->oldstate)
+ goto sleep;
+
+ switch (chip->state) {
+ case FL_READY:
+ case FL_JEDEC_QUERY:
+ return 0;
+
+ case FL_ERASING:
+ if (!lpddr->qinfo->SuspEraseSupp ||
+ !(mode == FL_READY || mode == FL_POINT))
+ goto sleep;
+
+ map_write(map, CMD(LPDDR_SUSPEND),
+ map->pfow_base + PFOW_PROGRAM_ERASE_SUSPEND);
+ chip->oldstate = FL_ERASING;
+ chip->state = FL_ERASE_SUSPENDING;
+ ret = wait_for_ready(map, chip, 0);
+ if (ret) {
+ /* Oops. something got wrong. */
+ /* Resume and pretend we weren't here. */
+ put_chip(map, chip);
+ printk(KERN_ERR "%s: suspend operation failed."
+ "State may be wrong \n", map->name);
+ return -EIO;
+ }
+ chip->erase_suspended = 1;
+ chip->state = FL_READY;
+ return 0;
+ /* Erase suspend */
+ case FL_POINT:
+ /* Only if there's no operation suspended... */
+ if (mode == FL_READY && chip->oldstate == FL_READY)
+ return 0;
+ fallthrough;
+ default:
+sleep:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ mutex_lock(&chip->mutex);
+ return -EAGAIN;
+ }
+}
+
+static void put_chip(struct map_info *map, struct flchip *chip)
+{
+ if (chip->priv) {
+ struct flchip_shared *shared = chip->priv;
+ mutex_lock(&shared->lock);
+ if (shared->writing == chip && chip->oldstate == FL_READY) {
+ /* We own the ability to write, but we're done */
+ shared->writing = shared->erasing;
+ if (shared->writing && shared->writing != chip) {
+ /* give back the ownership */
+ struct flchip *loaner = shared->writing;
+ mutex_lock(&loaner->mutex);
+ mutex_unlock(&shared->lock);
+ mutex_unlock(&chip->mutex);
+ put_chip(map, loaner);
+ mutex_lock(&chip->mutex);
+ mutex_unlock(&loaner->mutex);
+ wake_up(&chip->wq);
+ return;
+ }
+ shared->erasing = NULL;
+ shared->writing = NULL;
+ } else if (shared->erasing == chip && shared->writing != chip) {
+ /*
+ * We own the ability to erase without the ability
+ * to write, which means the erase was suspended
+ * and some other partition is currently writing.
+ * Don't let the switch below mess things up since
+ * we don't have ownership to resume anything.
+ */
+ mutex_unlock(&shared->lock);
+ wake_up(&chip->wq);
+ return;
+ }
+ mutex_unlock(&shared->lock);
+ }
+
+ switch (chip->oldstate) {
+ case FL_ERASING:
+ map_write(map, CMD(LPDDR_RESUME),
+ map->pfow_base + PFOW_COMMAND_CODE);
+ map_write(map, CMD(LPDDR_START_EXECUTION),
+ map->pfow_base + PFOW_COMMAND_EXECUTE);
+ chip->oldstate = FL_READY;
+ chip->state = FL_ERASING;
+ break;
+ case FL_READY:
+ break;
+ default:
+ printk(KERN_ERR "%s: put_chip() called with oldstate %d!\n",
+ map->name, chip->oldstate);
+ }
+ wake_up(&chip->wq);
+}
+
+static int do_write_buffer(struct map_info *map, struct flchip *chip,
+ unsigned long adr, const struct kvec **pvec,
+ unsigned long *pvec_seek, int len)
+{
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ map_word datum;
+ int ret, wbufsize, word_gap;
+ const struct kvec *vec;
+ unsigned long vec_seek;
+ unsigned long prog_buf_ofs;
+
+ wbufsize = 1 << lpddr->qinfo->BufSizeShift;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, FL_WRITING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+ /* Figure out the number of words to write */
+ word_gap = (-adr & (map_bankwidth(map)-1));
+ if (word_gap) {
+ word_gap = map_bankwidth(map) - word_gap;
+ adr -= word_gap;
+ datum = map_word_ff(map);
+ }
+ /* Write data */
+ /* Get the program buffer offset from PFOW register data first*/
+ prog_buf_ofs = map->pfow_base + CMDVAL(map_read(map,
+ map->pfow_base + PFOW_PROGRAM_BUFFER_OFFSET));
+ vec = *pvec;
+ vec_seek = *pvec_seek;
+ do {
+ int n = map_bankwidth(map) - word_gap;
+
+ if (n > vec->iov_len - vec_seek)
+ n = vec->iov_len - vec_seek;
+ if (n > len)
+ n = len;
+
+ if (!word_gap && (len < map_bankwidth(map)))
+ datum = map_word_ff(map);
+
+ datum = map_word_load_partial(map, datum,
+ vec->iov_base + vec_seek, word_gap, n);
+
+ len -= n;
+ word_gap += n;
+ if (!len || word_gap == map_bankwidth(map)) {
+ map_write(map, datum, prog_buf_ofs);
+ prog_buf_ofs += map_bankwidth(map);
+ word_gap = 0;
+ }
+
+ vec_seek += n;
+ if (vec_seek == vec->iov_len) {
+ vec++;
+ vec_seek = 0;
+ }
+ } while (len);
+ *pvec = vec;
+ *pvec_seek = vec_seek;
+
+ /* GO GO GO */
+ send_pfow_command(map, LPDDR_BUFF_PROGRAM, adr, wbufsize, NULL);
+ chip->state = FL_WRITING;
+ ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->ProgBufferTime));
+ if (ret) {
+ printk(KERN_WARNING"%s Buffer program error: %d at %lx; \n",
+ map->name, ret, adr);
+ goto out;
+ }
+
+ out: put_chip(map, chip);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
+{
+ struct map_info *map = mtd->priv;
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ int chipnum = adr >> lpddr->chipshift;
+ struct flchip *chip = &lpddr->chips[chipnum];
+ int ret;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, FL_ERASING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+ send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL);
+ chip->state = FL_ERASING;
+ ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->BlockEraseTime)*1000);
+ if (ret) {
+ printk(KERN_WARNING"%s Erase block error %d at : %llx\n",
+ map->name, ret, adr);
+ goto out;
+ }
+ out: put_chip(map, chip);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ int chipnum = adr >> lpddr->chipshift;
+ struct flchip *chip = &lpddr->chips[chipnum];
+ int ret = 0;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, FL_READY);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ map_copy_from(map, buf, adr, len);
+ *retlen = len;
+
+ put_chip(map, chip);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
+ size_t *retlen, void **mtdbuf, resource_size_t *phys)
+{
+ struct map_info *map = mtd->priv;
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ int chipnum = adr >> lpddr->chipshift;
+ unsigned long ofs, last_end = 0;
+ struct flchip *chip = &lpddr->chips[chipnum];
+ int ret = 0;
+
+ if (!map->virt)
+ return -EINVAL;
+
+ /* ofs: offset within the first chip that the first read should start */
+ ofs = adr - (chipnum << lpddr->chipshift);
+ *mtdbuf = (void *)map->virt + chip->start + ofs;
+
+ while (len) {
+ unsigned long thislen;
+
+ if (chipnum >= lpddr->numchips)
+ break;
+
+ /* We cannot point across chips that are virtually disjoint */
+ if (!last_end)
+ last_end = chip->start;
+ else if (chip->start != last_end)
+ break;
+
+ if ((len + ofs - 1) >> lpddr->chipshift)
+ thislen = (1<<lpddr->chipshift) - ofs;
+ else
+ thislen = len;
+ /* get the chip */
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, FL_POINT);
+ mutex_unlock(&chip->mutex);
+ if (ret)
+ break;
+
+ chip->state = FL_POINT;
+ chip->ref_point_counter++;
+ *retlen += thislen;
+ len -= thislen;
+
+ ofs = 0;
+ last_end += 1 << lpddr->chipshift;
+ chipnum++;
+ chip = &lpddr->chips[chipnum];
+ }
+ return 0;
+}
+
+static int lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
+{
+ struct map_info *map = mtd->priv;
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ int chipnum = adr >> lpddr->chipshift, err = 0;
+ unsigned long ofs;
+
+ /* ofs: offset within the first chip that the first read should start */
+ ofs = adr - (chipnum << lpddr->chipshift);
+
+ while (len) {
+ unsigned long thislen;
+ struct flchip *chip;
+
+ chip = &lpddr->chips[chipnum];
+ if (chipnum >= lpddr->numchips)
+ break;
+
+ if ((len + ofs - 1) >> lpddr->chipshift)
+ thislen = (1<<lpddr->chipshift) - ofs;
+ else
+ thislen = len;
+
+ mutex_lock(&chip->mutex);
+ if (chip->state == FL_POINT) {
+ chip->ref_point_counter--;
+ if (chip->ref_point_counter == 0)
+ chip->state = FL_READY;
+ } else {
+ printk(KERN_WARNING "%s: Warning: unpoint called on non"
+ "pointed region\n", map->name);
+ err = -EINVAL;
+ }
+
+ put_chip(map, chip);
+ mutex_unlock(&chip->mutex);
+
+ len -= thislen;
+ ofs = 0;
+ chipnum++;
+ }
+
+ return err;
+}
+
+static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct kvec vec;
+
+ vec.iov_base = (void *) buf;
+ vec.iov_len = len;
+
+ return lpddr_writev(mtd, &vec, 1, to, retlen);
+}
+
+
+static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
+{
+ struct map_info *map = mtd->priv;
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ int ret = 0;
+ int chipnum;
+ unsigned long ofs, vec_seek, i;
+ int wbufsize = 1 << lpddr->qinfo->BufSizeShift;
+ size_t len = 0;
+
+ for (i = 0; i < count; i++)
+ len += vecs[i].iov_len;
+
+ if (!len)
+ return 0;
+
+ chipnum = to >> lpddr->chipshift;
+
+ ofs = to;
+ vec_seek = 0;
+
+ do {
+ /* We must not cross write block boundaries */
+ int size = wbufsize - (ofs & (wbufsize-1));
+
+ if (size > len)
+ size = len;
+
+ ret = do_write_buffer(map, &lpddr->chips[chipnum],
+ ofs, &vecs, &vec_seek, size);
+ if (ret)
+ return ret;
+
+ ofs += size;
+ (*retlen) += size;
+ len -= size;
+
+ /* Be nice and reschedule with the chip in a usable
+ * state for other processes */
+ cond_resched();
+
+ } while (len);
+
+ return 0;
+}
+
+static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ unsigned long ofs, len;
+ int ret;
+ struct map_info *map = mtd->priv;
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ int size = 1 << lpddr->qinfo->UniformBlockSizeShift;
+
+ ofs = instr->addr;
+ len = instr->len;
+
+ while (len > 0) {
+ ret = do_erase_oneblock(mtd, ofs);
+ if (ret)
+ return ret;
+ ofs += size;
+ len -= size;
+ }
+
+ return 0;
+}
+
+#define DO_XXLOCK_LOCK 1
+#define DO_XXLOCK_UNLOCK 2
+static int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
+{
+ int ret = 0;
+ struct map_info *map = mtd->priv;
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ int chipnum = adr >> lpddr->chipshift;
+ struct flchip *chip = &lpddr->chips[chipnum];
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, FL_LOCKING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ if (thunk == DO_XXLOCK_LOCK) {
+ send_pfow_command(map, LPDDR_LOCK_BLOCK, adr, adr + len, NULL);
+ chip->state = FL_LOCKING;
+ } else if (thunk == DO_XXLOCK_UNLOCK) {
+ send_pfow_command(map, LPDDR_UNLOCK_BLOCK, adr, adr + len, NULL);
+ chip->state = FL_UNLOCKING;
+ } else
+ BUG();
+
+ ret = wait_for_ready(map, chip, 1);
+ if (ret) {
+ printk(KERN_ERR "%s: block unlock error status %d \n",
+ map->name, ret);
+ goto out;
+ }
+out: put_chip(map, chip);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ return do_xxlock(mtd, ofs, len, DO_XXLOCK_LOCK);
+}
+
+static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ return do_xxlock(mtd, ofs, len, DO_XXLOCK_UNLOCK);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>");
+MODULE_DESCRIPTION("MTD driver for LPDDR flash chips");
diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
new file mode 100644
index 0000000000..137ae5f0a1
--- /dev/null
+++ b/drivers/mtd/lpddr/qinfo_probe.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Probing flash chips with QINFO records.
+ * (C) 2008 Korolev Alexey <akorolev@infradead.org>
+ * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com>
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <linux/mtd/xip.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/pfow.h>
+#include <linux/mtd/qinfo.h>
+
+static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr);
+struct mtd_info *lpddr_probe(struct map_info *map);
+static struct lpddr_private *lpddr_probe_chip(struct map_info *map);
+static int lpddr_pfow_present(struct map_info *map,
+ struct lpddr_private *lpddr);
+
+static struct qinfo_query_info qinfo_array[] = {
+ /* General device info */
+ {0, 0, "DevSizeShift", "Device size 2^n bytes"},
+ {0, 3, "BufSizeShift", "Program buffer size 2^n bytes"},
+ /* Erase block information */
+ {1, 1, "TotalBlocksNum", "Total number of blocks"},
+ {1, 2, "UniformBlockSizeShift", "Uniform block size 2^n bytes"},
+ /* Partition information */
+ {2, 1, "HWPartsNum", "Number of hardware partitions"},
+ /* Optional features */
+ {5, 1, "SuspEraseSupp", "Suspend erase supported"},
+ /* Operation typical time */
+ {10, 0, "SingleWordProgTime", "Single word program 2^n u-sec"},
+ {10, 1, "ProgBufferTime", "Program buffer write 2^n u-sec"},
+ {10, 2, "BlockEraseTime", "Block erase 2^n m-sec"},
+ {10, 3, "FullChipEraseTime", "Full chip erase 2^n m-sec"},
+};
+
+static long lpddr_get_qinforec_pos(struct map_info *map, char *id_str)
+{
+ int qinfo_lines = ARRAY_SIZE(qinfo_array);
+ int i;
+ int bankwidth = map_bankwidth(map) * 8;
+ int major, minor;
+
+ for (i = 0; i < qinfo_lines; i++) {
+ if (strcmp(id_str, qinfo_array[i].id_str) == 0) {
+ major = qinfo_array[i].major & ((1 << bankwidth) - 1);
+ minor = qinfo_array[i].minor & ((1 << bankwidth) - 1);
+ return minor | (major << bankwidth);
+ }
+ }
+ printk(KERN_ERR"%s qinfo id string is wrong! \n", map->name);
+ BUG();
+ return -1;
+}
+
+static uint16_t lpddr_info_query(struct map_info *map, char *id_str)
+{
+ unsigned int dsr, val;
+ int bits_per_chip = map_bankwidth(map) * 8;
+ unsigned long adr = lpddr_get_qinforec_pos(map, id_str);
+ int attempts = 20;
+
+ /* Write a request for the PFOW record */
+ map_write(map, CMD(LPDDR_INFO_QUERY),
+ map->pfow_base + PFOW_COMMAND_CODE);
+ map_write(map, CMD(adr & ((1 << bits_per_chip) - 1)),
+ map->pfow_base + PFOW_COMMAND_ADDRESS_L);
+ map_write(map, CMD(adr >> bits_per_chip),
+ map->pfow_base + PFOW_COMMAND_ADDRESS_H);
+ map_write(map, CMD(LPDDR_START_EXECUTION),
+ map->pfow_base + PFOW_COMMAND_EXECUTE);
+
+ while ((attempts--) > 0) {
+ dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR));
+ if (dsr & DSR_READY_STATUS)
+ break;
+ udelay(10);
+ }
+
+ val = CMDVAL(map_read(map, map->pfow_base + PFOW_COMMAND_DATA));
+ return val;
+}
+
+static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
+{
+ map_word pfow_val[4];
+
+ /* Check identification string */
+ pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
+ pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
+ pfow_val[2] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_O);
+ pfow_val[3] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_W);
+
+ if (!map_word_equal(map, CMD('P'), pfow_val[0]))
+ goto out;
+
+ if (!map_word_equal(map, CMD('F'), pfow_val[1]))
+ goto out;
+
+ if (!map_word_equal(map, CMD('O'), pfow_val[2]))
+ goto out;
+
+ if (!map_word_equal(map, CMD('W'), pfow_val[3]))
+ goto out;
+
+ return 1; /* "PFOW" is found */
+out:
+ printk(KERN_WARNING"%s: PFOW string at 0x%lx is not found \n",
+ map->name, map->pfow_base);
+ return 0;
+}
+
+static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr)
+{
+
+ lpddr->qinfo = kzalloc(sizeof(struct qinfo_chip), GFP_KERNEL);
+ if (!lpddr->qinfo)
+ return 0;
+
+ /* Get the ManuID */
+ lpddr->ManufactId = CMDVAL(map_read(map, map->pfow_base + PFOW_MANUFACTURER_ID));
+ /* Get the DeviceID */
+ lpddr->DevId = CMDVAL(map_read(map, map->pfow_base + PFOW_DEVICE_ID));
+ /* read parameters from chip qinfo table */
+ lpddr->qinfo->DevSizeShift = lpddr_info_query(map, "DevSizeShift");
+ lpddr->qinfo->TotalBlocksNum = lpddr_info_query(map, "TotalBlocksNum");
+ lpddr->qinfo->BufSizeShift = lpddr_info_query(map, "BufSizeShift");
+ lpddr->qinfo->HWPartsNum = lpddr_info_query(map, "HWPartsNum");
+ lpddr->qinfo->UniformBlockSizeShift =
+ lpddr_info_query(map, "UniformBlockSizeShift");
+ lpddr->qinfo->SuspEraseSupp = lpddr_info_query(map, "SuspEraseSupp");
+ lpddr->qinfo->SingleWordProgTime =
+ lpddr_info_query(map, "SingleWordProgTime");
+ lpddr->qinfo->ProgBufferTime = lpddr_info_query(map, "ProgBufferTime");
+ lpddr->qinfo->BlockEraseTime = lpddr_info_query(map, "BlockEraseTime");
+ return 1;
+}
+static struct lpddr_private *lpddr_probe_chip(struct map_info *map)
+{
+ struct lpddr_private lpddr;
+ struct lpddr_private *retlpddr;
+ int numvirtchips;
+
+
+ if ((map->pfow_base + 0x1000) >= map->size) {
+ printk(KERN_NOTICE"%s Probe at base (0x%08lx) past the end of"
+ "the map(0x%08lx)\n", map->name,
+ (unsigned long)map->pfow_base, map->size - 1);
+ return NULL;
+ }
+ memset(&lpddr, 0, sizeof(struct lpddr_private));
+ if (!lpddr_pfow_present(map, &lpddr))
+ return NULL;
+
+ if (!lpddr_chip_setup(map, &lpddr))
+ return NULL;
+
+ /* Ok so we found a chip */
+ lpddr.chipshift = lpddr.qinfo->DevSizeShift;
+ lpddr.numchips = 1;
+
+ numvirtchips = lpddr.numchips * lpddr.qinfo->HWPartsNum;
+ retlpddr = kzalloc(struct_size(retlpddr, chips, numvirtchips),
+ GFP_KERNEL);
+ if (!retlpddr)
+ return NULL;
+
+ memcpy(retlpddr, &lpddr, sizeof(struct lpddr_private));
+
+ retlpddr->numchips = numvirtchips;
+ retlpddr->chipshift = retlpddr->qinfo->DevSizeShift -
+ __ffs(retlpddr->qinfo->HWPartsNum);
+
+ return retlpddr;
+}
+
+struct mtd_info *lpddr_probe(struct map_info *map)
+{
+ struct mtd_info *mtd = NULL;
+ struct lpddr_private *lpddr;
+
+ /* First probe the map to see if we havecan open PFOW here */
+ lpddr = lpddr_probe_chip(map);
+ if (!lpddr)
+ return NULL;
+
+ map->fldrv_priv = lpddr;
+ mtd = lpddr_cmdset(map);
+ if (mtd) {
+ if (mtd->size > map->size) {
+ printk(KERN_WARNING "Reducing visibility of %ldKiB chip"
+ "to %ldKiB\n", (unsigned long)mtd->size >> 10,
+ (unsigned long)map->size >> 10);
+ mtd->size = map->size;
+ }
+ return mtd;
+ }
+
+ kfree(lpddr->qinfo);
+ kfree(lpddr);
+ map->fldrv_priv = NULL;
+ return NULL;
+}
+
+static struct mtd_chip_driver lpddr_chipdrv = {
+ .probe = lpddr_probe,
+ .name = "qinfo_probe",
+ .module = THIS_MODULE
+};
+
+static int __init lpddr_probe_init(void)
+{
+ register_mtd_chip_driver(&lpddr_chipdrv);
+ return 0;
+}
+
+static void __exit lpddr_probe_exit(void)
+{
+ unregister_mtd_chip_driver(&lpddr_chipdrv);
+}
+
+module_init(lpddr_probe_init);
+module_exit(lpddr_probe_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vasiliy Leonenko <vasiliy.leonenko@gmail.com>");
+MODULE_DESCRIPTION("Driver to probe qinfo flash chips");
+
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
new file mode 100644
index 0000000000..e098ae937c
--- /dev/null
+++ b/drivers/mtd/maps/Kconfig
@@ -0,0 +1,388 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "Mapping drivers for chip access"
+ depends on MTD!=n
+ depends on HAS_IOMEM
+
+config MTD_COMPLEX_MAPPINGS
+ bool "Support non-linear mappings of flash chips"
+ help
+ This causes the chip drivers to allow for complicated
+ paged mappings of flash chips.
+
+config MTD_PHYSMAP
+ tristate "Flash device in physical memory map"
+ depends on MTD_CFI || MTD_JEDECPROBE || MTD_ROM || MTD_RAM || MTD_LPDDR
+ help
+ This provides a 'mapping' driver which allows the NOR Flash and
+ ROM driver code to communicate with chips which are mapped
+ physically into the CPU's memory. You will need to configure
+ the physical address and size of the flash chips on your
+ particular board as well as the bus width, either statically
+ with config options or at run-time.
+
+ To compile this driver as a module, choose M here: the
+ module will be called physmap.
+
+config MTD_PHYSMAP_COMPAT
+ bool "Physmap compat support"
+ depends on MTD_PHYSMAP
+ default n
+ help
+ Setup a simple mapping via the Kconfig options. Normally the
+ physmap configuration options are done via your board's
+ resource file.
+
+ If unsure, say N here.
+
+config MTD_PHYSMAP_START
+ hex "Physical start address of flash mapping"
+ depends on MTD_PHYSMAP_COMPAT
+ default "0x8000000"
+ help
+ This is the physical memory location at which the flash chips
+ are mapped on your particular target board. Refer to the
+ memory map which should hopefully be in the documentation for
+ your board.
+
+config MTD_PHYSMAP_LEN
+ hex "Physical length of flash mapping"
+ depends on MTD_PHYSMAP_COMPAT
+ default "0"
+ help
+ This is the total length of the mapping of the flash chips on
+ your particular board. If there is space, or aliases, in the
+ physical memory map between the chips, this could be larger
+ than the total amount of flash present. Refer to the memory
+ map which should hopefully be in the documentation for your
+ board.
+
+config MTD_PHYSMAP_BANKWIDTH
+ int "Bank width in octets"
+ depends on MTD_PHYSMAP_COMPAT
+ default "2"
+ help
+ This is the total width of the data bus of the flash devices
+ in octets. For example, if you have a data bus width of 32
+ bits, you would set the bus width octet value to 4. This is
+ used internally by the CFI drivers.
+
+config MTD_PHYSMAP_OF
+ bool "Memory device in physical memory map based on OF description"
+ depends on OF && MTD_PHYSMAP
+ help
+ This provides a 'mapping' driver which allows the NOR Flash, ROM
+ and RAM driver code to communicate with chips which are mapped
+ physically into the CPU's memory. The mapping description here is
+ taken from OF device tree.
+
+config MTD_PHYSMAP_BT1_ROM
+ bool "Baikal-T1 Boot ROMs OF-based physical memory map handling"
+ depends on MTD_PHYSMAP_OF
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ select MTD_COMPLEX_MAPPINGS
+ select MULTIPLEXER
+ select MUX_MMIO
+ help
+ This provides some extra DT physmap parsing for the Baikal-T1
+ platforms, some detection and setting up ROMs-specific accessors.
+
+config MTD_PHYSMAP_VERSATILE
+ bool "ARM Versatile OF-based physical memory map handling"
+ depends on MTD_PHYSMAP_OF
+ depends on MFD_SYSCON
+ default y if (ARCH_INTEGRATOR || ARCH_VERSATILE || ARCH_REALVIEW)
+ help
+ This provides some extra DT physmap parsing for the ARM Versatile
+ platforms, basically to add a VPP (write protection) callback so
+ the flash can be taken out of write protection.
+
+config MTD_PHYSMAP_GEMINI
+ bool "Cortina Gemini OF-based physical memory map handling"
+ depends on MTD_PHYSMAP_OF
+ depends on MFD_SYSCON
+ select MTD_COMPLEX_MAPPINGS
+ default ARCH_GEMINI
+ help
+ This provides some extra DT physmap parsing for the Gemini
+ platforms, some detection and setting up parallel mode on the
+ external interface.
+
+config MTD_PHYSMAP_IXP4XX
+ bool "Intel IXP4xx OF-based physical memory map handling"
+ depends on MTD_PHYSMAP_OF
+ depends on ARM
+ select MTD_COMPLEX_MAPPINGS
+ select MTD_CFI_BE_BYTE_SWAP if CPU_BIG_ENDIAN
+ default ARCH_IXP4XX
+ help
+ This provides some extra DT physmap parsing for the Intel IXP4xx
+ platforms, some elaborate endianness handling in particular.
+
+config MTD_PHYSMAP_GPIO_ADDR
+ bool "GPIO-assisted Flash Chip Support"
+ depends on MTD_PHYSMAP
+ depends on GPIOLIB || COMPILE_TEST
+ depends on MTD_COMPLEX_MAPPINGS
+ help
+ Extend the physmap driver to allow flashes to be partially
+ physically addressed and assisted by GPIOs.
+
+config MTD_SUN_UFLASH
+ tristate "Sun Microsystems userflash support"
+ depends on SPARC && MTD_CFI && PCI
+ help
+ This provides a 'mapping' driver which supports the way in
+ which user-programmable flash chips are connected on various
+ Sun Microsystems boardsets. This driver will require CFI support
+ in the kernel, so if you did not enable CFI previously, do that now.
+
+config MTD_SC520CDP
+ tristate "CFI Flash device mapped on AMD SC520 CDP"
+ depends on (MELAN || COMPILE_TEST) && MTD_CFI
+ help
+ The SC520 CDP board has two banks of CFI-compliant chips and one
+ Dual-in-line JEDEC chip. This 'mapping' driver supports that
+ arrangement, implementing three MTD devices.
+
+config MTD_NETSC520
+ tristate "CFI Flash device mapped on AMD NetSc520"
+ depends on (MELAN || COMPILE_TEST) && MTD_CFI
+ help
+ This enables access routines for the flash chips on the AMD NetSc520
+ demonstration board. If you have one of these boards and would like
+ to use the flash chips on it, say 'Y'.
+
+config MTD_TS5500
+ tristate "JEDEC Flash device mapped on Technologic Systems TS-5500"
+ depends on TS5500 || COMPILE_TEST
+ select MTD_JEDECPROBE
+ select MTD_CFI_AMDSTD
+ help
+ This provides a driver for the on-board flash of the Technologic
+ System's TS-5500 board. The 2MB flash is split into 3 partitions
+ which are accessed as separate MTD devices.
+
+ mtd0 and mtd2 are the two BIOS drives, which use the resident
+ flash disk (RFD) flash translation layer.
+
+ mtd1 allows you to reprogram your BIOS. BE VERY CAREFUL.
+
+ Note that jumper 3 ("Write Enable Drive A") must be set
+ otherwise detection won't succeed.
+
+config MTD_SBC_GXX
+ tristate "CFI Flash device mapped on Arcom SBC-GXx boards"
+ depends on X86 && MTD_CFI_INTELEXT && MTD_COMPLEX_MAPPINGS
+ help
+ This provides a driver for the on-board flash of Arcom Control
+ Systems' SBC-GXn family of boards, formerly known as SBC-MediaGX.
+ By default the flash is split into 3 partitions which are accessed
+ as separate MTD devices. This board utilizes Intel StrataFlash.
+ More info at
+ <http://www.arcomcontrols.com/products/icp/pc104/processors/SBC_GX1.htm>.
+
+config MTD_PXA2XX
+ tristate "CFI Flash device mapped on Intel XScale PXA2xx based boards"
+ depends on (PXA25x || PXA27x) && MTD_CFI_INTELEXT
+ help
+ This provides a driver for the NOR flash attached to a PXA2xx chip.
+
+config MTD_SCx200_DOCFLASH
+ tristate "Flash device mapped with DOCCS on NatSemi SCx200"
+ depends on SCx200 && MTD_CFI
+ help
+ Enable support for a flash chip mapped using the DOCCS signal on a
+ National Semiconductor SCx200 processor.
+
+ If you don't know what to do here, say N.
+
+ If compiled as a module, it will be called scx200_docflash.
+
+config MTD_AMD76XROM
+ tristate "BIOS flash chip on AMD76x southbridge"
+ depends on X86 && MTD_JEDECPROBE
+ help
+ Support for treating the BIOS flash chip on AMD76x motherboards
+ as an MTD device - with this you can reprogram your BIOS.
+
+ BE VERY CAREFUL.
+
+config MTD_ICHXROM
+ tristate "BIOS flash chip on Intel Controller Hub 2/3/4/5"
+ depends on X86 && MTD_JEDECPROBE
+ help
+ Support for treating the BIOS flash chip on ICHX motherboards
+ as an MTD device - with this you can reprogram your BIOS.
+
+ BE VERY CAREFUL.
+
+config MTD_ESB2ROM
+ tristate "BIOS flash chip on Intel ESB Controller Hub 2"
+ depends on X86 && MTD_JEDECPROBE && PCI
+ help
+ Support for treating the BIOS flash chip on ESB2 motherboards
+ as an MTD device - with this you can reprogram your BIOS.
+
+ BE VERY CAREFUL.
+
+config MTD_CK804XROM
+ tristate "BIOS flash chip on Nvidia CK804"
+ depends on X86 && MTD_JEDECPROBE && PCI
+ help
+ Support for treating the BIOS flash chip on nvidia motherboards
+ as an MTD device - with this you can reprogram your BIOS.
+
+ BE VERY CAREFUL.
+
+config MTD_SCB2_FLASH
+ tristate "BIOS flash chip on Intel SCB2 boards"
+ depends on X86 && MTD_JEDECPROBE && PCI
+ help
+ Support for treating the BIOS flash chip on Intel SCB2 boards
+ as an MTD device - with this you can reprogram your BIOS.
+
+ BE VERY CAREFUL.
+
+config MTD_TSUNAMI
+ tristate "Flash chips on Tsunami TIG bus"
+ depends on ALPHA_TSUNAMI && MTD_COMPLEX_MAPPINGS
+ help
+ Support for the flash chip on Tsunami TIG bus.
+
+config MTD_NETtel
+ tristate "CFI flash device on SnapGear/SecureEdge"
+ depends on X86 && MTD_JEDECPROBE
+ help
+ Support for flash chips on NETtel/SecureEdge/SnapGear boards.
+
+config MTD_LANTIQ
+ tristate "Lantiq SoC NOR support"
+ depends on LANTIQ
+ help
+ Support for NOR flash attached to the Lantiq SoC's External Bus Unit.
+
+config MTD_L440GX
+ tristate "BIOS flash chip on Intel L440GX boards"
+ depends on X86 && MTD_JEDECPROBE
+ help
+ Support for treating the BIOS flash chip on Intel L440GX motherboards
+ as an MTD device - with this you can reprogram your BIOS.
+
+ BE VERY CAREFUL.
+
+config MTD_CFI_FLAGADM
+ tristate "CFI Flash device mapping on FlagaDM"
+ depends on PPC_8xx && MTD_CFI
+ help
+ Mapping for the Flaga digital module. If you don't have one, ignore
+ this setting.
+
+config MTD_SOLUTIONENGINE
+ tristate "CFI Flash device mapped on Hitachi SolutionEngine"
+ depends on SOLUTION_ENGINE && MTD_CFI && MTD_REDBOOT_PARTS
+ help
+ This enables access to the flash chips on the Hitachi SolutionEngine and
+ similar boards. Say 'Y' if you are building a kernel for such a board.
+
+config MTD_SA1100
+ tristate "CFI Flash device mapped on StrongARM SA11x0"
+ depends on MTD_CFI && ARCH_SA1100
+ help
+ This enables access to the flash chips on most platforms based on
+ the SA1100 and SA1110, including the Assabet and the Compaq iPAQ.
+ If you have such a board, say 'Y'.
+
+config MTD_DC21285
+ tristate "CFI Flash device mapped on DC21285 Footbridge"
+ depends on MTD_CFI && ARCH_FOOTBRIDGE && MTD_COMPLEX_MAPPINGS
+ help
+ This provides a driver for the flash accessed using Intel's
+ 21285 bridge used with Intel's StrongARM processors. More info at
+ <https://www.intel.com/design/bridge/docs/21285_documentation.htm>.
+
+config MTD_IMPA7
+ tristate "JEDEC Flash device mapped on impA7"
+ depends on ARM && MTD_JEDECPROBE
+ help
+ This enables access to the NOR Flash on the impA7 board of
+ implementa GmbH. If you have such a board, say 'Y' here.
+
+# This needs CFI or JEDEC, depending on the cards found.
+config MTD_PCI
+ tristate "PCI MTD driver"
+ depends on PCI && MTD_COMPLEX_MAPPINGS
+ help
+ Mapping for accessing flash devices on add-in cards like the Intel XScale
+ IQ80310 card, and the Intel EBSA285 card in blank ROM programming mode
+ (please see the manual for the link settings).
+
+ If you are not sure, say N.
+
+config MTD_PCMCIA
+ tristate "PCMCIA MTD driver"
+ depends on PCMCIA && MTD_COMPLEX_MAPPINGS
+ help
+ Map driver for accessing PCMCIA linear flash memory cards. These
+ cards are usually around 4-16MiB in size. This does not include
+ Compact Flash cards which are treated as IDE devices.
+
+config MTD_PCMCIA_ANONYMOUS
+ bool "Use PCMCIA MTD drivers for anonymous PCMCIA cards"
+ depends on MTD_PCMCIA
+ help
+ If this option is enabled, PCMCIA cards which do not report
+ anything about themselves are assumed to be MTD cards.
+
+ If unsure, say N.
+
+config MTD_UCLINUX
+ bool "Generic uClinux RAM/ROM filesystem support"
+ depends on (MTD_RAM=y || MTD_ROM=y) && (!MMU || COLDFIRE)
+ help
+ Map driver to support image based filesystems for uClinux.
+
+config MTD_INTEL_VR_NOR
+ tristate "NOR flash on Intel Vermilion Range Expansion Bus CS0"
+ depends on PCI
+ help
+ Map driver for a NOR flash bank located on the Expansion Bus of the
+ Intel Vermilion Range chipset.
+
+config MTD_PLATRAM
+ tristate "Map driver for platform device RAM (mtd-ram)"
+ select MTD_RAM
+ help
+ Map driver for RAM areas described via the platform device
+ system.
+
+ This selection automatically selects the map_ram driver.
+
+config MTD_VMU
+ tristate "Map driver for Dreamcast VMU"
+ depends on MAPLE
+ help
+ This driver enables access to the Dreamcast Visual Memory Unit (VMU).
+
+ Most Dreamcast users will want to say Y here.
+
+ To build this as a module select M here, the module will be called
+ vmu-flash.
+
+config MTD_PISMO
+ tristate "MTD discovery driver for PISMO modules"
+ depends on I2C
+ depends on ARCH_VERSATILE
+ help
+ This driver allows for discovery of PISMO modules - see
+ <http://www.pismoworld.org/>. These are small modules containing
+ up to five memory devices (eg, SRAM, flash, DOC) described by an
+ I2C EEPROM.
+
+ This driver does not create any MTD maps itself; instead it
+ creates MTD physmap and MTD SRAM platform devices. If you
+ enable this option, you should consider enabling MTD_PHYSMAP
+ and/or MTD_PLATRAM according to the devices on your module.
+
+ When built as a module, it will be called pismo.ko
+
+endmenu
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
new file mode 100644
index 0000000000..094cfb2440
--- /dev/null
+++ b/drivers/mtd/maps/Makefile
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# linux/drivers/maps/Makefile
+#
+
+ifeq ($(CONFIG_MTD_COMPLEX_MAPPINGS),y)
+obj-$(CONFIG_MTD) += map_funcs.o
+endif
+
+# Chip mappings
+obj-$(CONFIG_MTD_CFI_FLAGADM) += cfi_flagadm.o
+obj-$(CONFIG_MTD_DC21285) += dc21285.o
+obj-$(CONFIG_MTD_L440GX) += l440gx.o
+obj-$(CONFIG_MTD_AMD76XROM) += amd76xrom.o
+obj-$(CONFIG_MTD_ESB2ROM) += esb2rom.o
+obj-$(CONFIG_MTD_ICHXROM) += ichxrom.o
+obj-$(CONFIG_MTD_CK804XROM) += ck804xrom.o
+obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o
+obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o
+physmap-objs-y += physmap-core.o
+physmap-objs-$(CONFIG_MTD_PHYSMAP_BT1_ROM) += physmap-bt1-rom.o
+physmap-objs-$(CONFIG_MTD_PHYSMAP_VERSATILE) += physmap-versatile.o
+physmap-objs-$(CONFIG_MTD_PHYSMAP_GEMINI) += physmap-gemini.o
+physmap-objs-$(CONFIG_MTD_PHYSMAP_IXP4XX) += physmap-ixp4xx.o
+physmap-objs := $(physmap-objs-y)
+obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
+obj-$(CONFIG_MTD_PISMO) += pismo.o
+obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
+obj-$(CONFIG_MTD_SA1100) += sa1100-flash.o
+obj-$(CONFIG_MTD_SBC_GXX) += sbc_gxx.o
+obj-$(CONFIG_MTD_SC520CDP) += sc520cdp.o
+obj-$(CONFIG_MTD_NETSC520) += netsc520.o
+obj-$(CONFIG_MTD_TS5500) += ts5500_flash.o
+obj-$(CONFIG_MTD_SUN_UFLASH) += sun_uflash.o
+obj-$(CONFIG_MTD_SCx200_DOCFLASH)+= scx200_docflash.o
+obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o
+obj-$(CONFIG_MTD_PCI) += pci.o
+obj-$(CONFIG_MTD_IMPA7) += impa7.o
+obj-$(CONFIG_MTD_UCLINUX) += uclinux.o
+obj-$(CONFIG_MTD_NETtel) += nettel.o
+obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
+obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o
+obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o
+obj-$(CONFIG_MTD_VMU) += vmu-flash.o
+obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
new file mode 100644
index 0000000000..281fcbaa74
--- /dev/null
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * amd76xrom.c
+ *
+ * Normal mappings of chips in physical memory
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/flashchip.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/list.h>
+
+
+#define xstr(s) str(s)
+#define str(s) #s
+#define MOD_NAME xstr(KBUILD_BASENAME)
+
+#define ADDRESS_NAME_LEN 18
+
+#define ROM_PROBE_STEP_SIZE (64*1024) /* 64KiB */
+
+struct amd76xrom_window {
+ void __iomem *virt;
+ unsigned long phys;
+ unsigned long size;
+ struct list_head maps;
+ struct resource rsrc;
+ struct pci_dev *pdev;
+};
+
+struct amd76xrom_map_info {
+ struct list_head list;
+ struct map_info map;
+ struct mtd_info *mtd;
+ struct resource rsrc;
+ char map_name[sizeof(MOD_NAME) + 2 + ADDRESS_NAME_LEN];
+};
+
+/* The 2 bits controlling the window size are often set to allow reading
+ * the BIOS, but too small to allow writing, since the lock registers are
+ * 4MiB lower in the address space than the data.
+ *
+ * This is intended to prevent flashing the bios, perhaps accidentally.
+ *
+ * This parameter allows the normal driver to over-ride the BIOS settings.
+ *
+ * The bits are 6 and 7. If both bits are set, it is a 5MiB window.
+ * If only the 7 Bit is set, it is a 4MiB window. Otherwise, a
+ * 64KiB window.
+ *
+ */
+static uint win_size_bits;
+module_param(win_size_bits, uint, 0);
+MODULE_PARM_DESC(win_size_bits, "ROM window size bits override for 0x43 byte, normally set by BIOS.");
+
+static struct amd76xrom_window amd76xrom_window = {
+ .maps = LIST_HEAD_INIT(amd76xrom_window.maps),
+};
+
+static void amd76xrom_cleanup(struct amd76xrom_window *window)
+{
+ struct amd76xrom_map_info *map, *scratch;
+ u8 byte;
+
+ if (window->pdev) {
+ /* Disable writes through the rom window */
+ pci_read_config_byte(window->pdev, 0x40, &byte);
+ pci_write_config_byte(window->pdev, 0x40, byte & ~1);
+ pci_dev_put(window->pdev);
+ }
+
+ /* Free all of the mtd devices */
+ list_for_each_entry_safe(map, scratch, &window->maps, list) {
+ if (map->rsrc.parent) {
+ release_resource(&map->rsrc);
+ }
+ mtd_device_unregister(map->mtd);
+ map_destroy(map->mtd);
+ list_del(&map->list);
+ kfree(map);
+ }
+ if (window->rsrc.parent)
+ release_resource(&window->rsrc);
+
+ if (window->virt) {
+ iounmap(window->virt);
+ window->virt = NULL;
+ window->phys = 0;
+ window->size = 0;
+ window->pdev = NULL;
+ }
+}
+
+
+static int amd76xrom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
+ u8 byte;
+ struct amd76xrom_window *window = &amd76xrom_window;
+ struct amd76xrom_map_info *map = NULL;
+ unsigned long map_top;
+
+ /* Remember the pci dev I find the window in - already have a ref */
+ window->pdev = pdev;
+
+ /* Enable the selected rom window. This is often incorrectly
+ * set up by the BIOS, and the 4MiB offset for the lock registers
+ * requires the full 5MiB of window space.
+ *
+ * This 'write, then read' approach leaves the bits for
+ * other uses of the hardware info.
+ */
+ pci_read_config_byte(pdev, 0x43, &byte);
+ pci_write_config_byte(pdev, 0x43, byte | win_size_bits );
+
+ /* Assume the rom window is properly setup, and find it's size */
+ pci_read_config_byte(pdev, 0x43, &byte);
+ if ((byte & ((1<<7)|(1<<6))) == ((1<<7)|(1<<6))) {
+ window->phys = 0xffb00000; /* 5MiB */
+ }
+ else if ((byte & (1<<7)) == (1<<7)) {
+ window->phys = 0xffc00000; /* 4MiB */
+ }
+ else {
+ window->phys = 0xffff0000; /* 64KiB */
+ }
+ window->size = 0xffffffffUL - window->phys + 1UL;
+
+ /*
+ * Try to reserve the window mem region. If this fails then
+ * it is likely due to a fragment of the window being
+ * "reserved" by the BIOS. In the case that the
+ * request_mem_region() fails then once the rom size is
+ * discovered we will try to reserve the unreserved fragment.
+ */
+ window->rsrc.name = MOD_NAME;
+ window->rsrc.start = window->phys;
+ window->rsrc.end = window->phys + window->size - 1;
+ window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (request_resource(&iomem_resource, &window->rsrc)) {
+ window->rsrc.parent = NULL;
+ printk(KERN_ERR MOD_NAME
+ " %s(): Unable to register resource %pR - kernel bug?\n",
+ __func__, &window->rsrc);
+ return -EBUSY;
+ }
+
+
+ /* Enable writes through the rom window */
+ pci_read_config_byte(pdev, 0x40, &byte);
+ pci_write_config_byte(pdev, 0x40, byte | 1);
+
+ /* FIXME handle registers 0x80 - 0x8C the bios region locks */
+
+ /* For write accesses caches are useless */
+ window->virt = ioremap(window->phys, window->size);
+ if (!window->virt) {
+ printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
+ window->phys, window->size);
+ goto out;
+ }
+
+ /* Get the first address to look for an rom chip at */
+ map_top = window->phys;
+#if 1
+ /* The probe sequence run over the firmware hub lock
+ * registers sets them to 0x7 (no access).
+ * Probe at most the last 4M of the address space.
+ */
+ if (map_top < 0xffc00000) {
+ map_top = 0xffc00000;
+ }
+#endif
+ /* Loop through and look for rom chips */
+ while((map_top - 1) < 0xffffffffUL) {
+ struct cfi_private *cfi;
+ unsigned long offset;
+ int i;
+
+ if (!map) {
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ goto out;
+ }
+ memset(map, 0, sizeof(*map));
+ INIT_LIST_HEAD(&map->list);
+ map->map.name = map->map_name;
+ map->map.phys = map_top;
+ offset = map_top - window->phys;
+ map->map.virt = (void __iomem *)
+ (((unsigned long)(window->virt)) + offset);
+ map->map.size = 0xffffffffUL - map_top + 1UL;
+ /* Set the name of the map to the address I am trying */
+ sprintf(map->map_name, "%s @%08Lx",
+ MOD_NAME, (unsigned long long)map->map.phys);
+
+ /* There is no generic VPP support */
+ for(map->map.bankwidth = 32; map->map.bankwidth;
+ map->map.bankwidth >>= 1)
+ {
+ char **probe_type;
+ /* Skip bankwidths that are not supported */
+ if (!map_bankwidth_supported(map->map.bankwidth))
+ continue;
+
+ /* Setup the map methods */
+ simple_map_init(&map->map);
+
+ /* Try all of the probe methods */
+ probe_type = rom_probe_types;
+ for(; *probe_type; probe_type++) {
+ map->mtd = do_map_probe(*probe_type, &map->map);
+ if (map->mtd)
+ goto found;
+ }
+ }
+ map_top += ROM_PROBE_STEP_SIZE;
+ continue;
+ found:
+ /* Trim the size if we are larger than the map */
+ if (map->mtd->size > map->map.size) {
+ printk(KERN_WARNING MOD_NAME
+ " rom(%llu) larger than window(%lu). fixing...\n",
+ (unsigned long long)map->mtd->size, map->map.size);
+ map->mtd->size = map->map.size;
+ }
+ if (window->rsrc.parent) {
+ /*
+ * Registering the MTD device in iomem may not be possible
+ * if there is a BIOS "reserved" and BUSY range. If this
+ * fails then continue anyway.
+ */
+ map->rsrc.name = map->map_name;
+ map->rsrc.start = map->map.phys;
+ map->rsrc.end = map->map.phys + map->mtd->size - 1;
+ map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (request_resource(&window->rsrc, &map->rsrc)) {
+ printk(KERN_ERR MOD_NAME
+ ": cannot reserve MTD resource\n");
+ map->rsrc.parent = NULL;
+ }
+ }
+
+ /* Make the whole region visible in the map */
+ map->map.virt = window->virt;
+ map->map.phys = window->phys;
+ cfi = map->map.fldrv_priv;
+ for(i = 0; i < cfi->numchips; i++) {
+ cfi->chips[i].start += offset;
+ }
+
+ /* Now that the mtd devices is complete claim and export it */
+ map->mtd->owner = THIS_MODULE;
+ if (mtd_device_register(map->mtd, NULL, 0)) {
+ map_destroy(map->mtd);
+ map->mtd = NULL;
+ goto out;
+ }
+
+
+ /* Calculate the new value of map_top */
+ map_top += map->mtd->size;
+
+ /* File away the map structure */
+ list_add(&map->list, &window->maps);
+ map = NULL;
+ }
+
+ out:
+ /* Free any left over map structures */
+ kfree(map);
+ /* See if I have any map structures */
+ if (list_empty(&window->maps)) {
+ amd76xrom_cleanup(window);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+
+static void amd76xrom_remove_one(struct pci_dev *pdev)
+{
+ struct amd76xrom_window *window = &amd76xrom_window;
+
+ amd76xrom_cleanup(window);
+}
+
+static const struct pci_device_id amd76xrom_pci_tbl[] = {
+ { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7440,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_AMD, 0x7468 }, /* amd8111 support */
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, amd76xrom_pci_tbl);
+
+#if 0
+static struct pci_driver amd76xrom_driver = {
+ .name = MOD_NAME,
+ .id_table = amd76xrom_pci_tbl,
+ .probe = amd76xrom_init_one,
+ .remove = amd76xrom_remove_one,
+};
+#endif
+
+static int __init init_amd76xrom(void)
+{
+ struct pci_dev *pdev;
+ const struct pci_device_id *id;
+ pdev = NULL;
+ for(id = amd76xrom_pci_tbl; id->vendor; id++) {
+ pdev = pci_get_device(id->vendor, id->device, NULL);
+ if (pdev) {
+ break;
+ }
+ }
+ if (pdev) {
+ return amd76xrom_init_one(pdev, &amd76xrom_pci_tbl[0]);
+ }
+ return -ENXIO;
+#if 0
+ return pci_register_driver(&amd76xrom_driver);
+#endif
+}
+
+static void __exit cleanup_amd76xrom(void)
+{
+ amd76xrom_remove_one(amd76xrom_window.pdev);
+}
+
+module_init(init_amd76xrom);
+module_exit(cleanup_amd76xrom);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Eric Biederman <ebiederman@lnxi.com>");
+MODULE_DESCRIPTION("MTD map driver for BIOS chips on the AMD76X southbridge");
diff --git a/drivers/mtd/maps/cfi_flagadm.c b/drivers/mtd/maps/cfi_flagadm.c
new file mode 100644
index 0000000000..70f4886284
--- /dev/null
+++ b/drivers/mtd/maps/cfi_flagadm.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright © 2001 Flaga hf. Medical Devices, Kári Davíðsson <kd@flaga.is>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+
+/* We split the flash chip up into four parts.
+ * 1: bootloader first 128k (0x00000000 - 0x0001FFFF) size 0x020000
+ * 2: kernel 640k (0x00020000 - 0x000BFFFF) size 0x0A0000
+ * 3: compressed 1536k root ramdisk (0x000C0000 - 0x0023FFFF) size 0x180000
+ * 4: writeable diskpartition (jffs)(0x00240000 - 0x003FFFFF) size 0x1C0000
+ */
+
+#define FLASH_PHYS_ADDR 0x40000000
+#define FLASH_SIZE 0x400000
+
+#define FLASH_PARTITION0_ADDR 0x00000000
+#define FLASH_PARTITION0_SIZE 0x00020000
+
+#define FLASH_PARTITION1_ADDR 0x00020000
+#define FLASH_PARTITION1_SIZE 0x000A0000
+
+#define FLASH_PARTITION2_ADDR 0x000C0000
+#define FLASH_PARTITION2_SIZE 0x00180000
+
+#define FLASH_PARTITION3_ADDR 0x00240000
+#define FLASH_PARTITION3_SIZE 0x001C0000
+
+
+static struct map_info flagadm_map = {
+ .name = "FlagaDM flash device",
+ .size = FLASH_SIZE,
+ .bankwidth = 2,
+};
+
+static const struct mtd_partition flagadm_parts[] = {
+ {
+ .name = "Bootloader",
+ .offset = FLASH_PARTITION0_ADDR,
+ .size = FLASH_PARTITION0_SIZE
+ },
+ {
+ .name = "Kernel image",
+ .offset = FLASH_PARTITION1_ADDR,
+ .size = FLASH_PARTITION1_SIZE
+ },
+ {
+ .name = "Initial ramdisk image",
+ .offset = FLASH_PARTITION2_ADDR,
+ .size = FLASH_PARTITION2_SIZE
+ },
+ {
+ .name = "Persistent storage",
+ .offset = FLASH_PARTITION3_ADDR,
+ .size = FLASH_PARTITION3_SIZE
+ }
+};
+
+#define PARTITION_COUNT ARRAY_SIZE(flagadm_parts)
+
+static struct mtd_info *mymtd;
+
+static int __init init_flagadm(void)
+{
+ printk(KERN_NOTICE "FlagaDM flash device: %x at %x\n",
+ FLASH_SIZE, FLASH_PHYS_ADDR);
+
+ flagadm_map.phys = FLASH_PHYS_ADDR;
+ flagadm_map.virt = ioremap(FLASH_PHYS_ADDR,
+ FLASH_SIZE);
+
+ if (!flagadm_map.virt) {
+ printk("Failed to ioremap\n");
+ return -EIO;
+ }
+
+ simple_map_init(&flagadm_map);
+
+ mymtd = do_map_probe("cfi_probe", &flagadm_map);
+ if (mymtd) {
+ mymtd->owner = THIS_MODULE;
+ mtd_device_register(mymtd, flagadm_parts, PARTITION_COUNT);
+ printk(KERN_NOTICE "FlagaDM flash device initialized\n");
+ return 0;
+ }
+
+ iounmap((void __iomem *)flagadm_map.virt);
+ return -ENXIO;
+}
+
+static void __exit cleanup_flagadm(void)
+{
+ if (mymtd) {
+ mtd_device_unregister(mymtd);
+ map_destroy(mymtd);
+ }
+ if (flagadm_map.virt) {
+ iounmap((void __iomem *)flagadm_map.virt);
+ flagadm_map.virt = NULL;
+ }
+}
+
+module_init(init_flagadm);
+module_exit(cleanup_flagadm);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kári Davíðsson <kd@flaga.is>");
+MODULE_DESCRIPTION("MTD map driver for Flaga digital module");
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
new file mode 100644
index 0000000000..c0216bc740
--- /dev/null
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -0,0 +1,386 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ck804xrom.c
+ *
+ * Normal mappings of chips in physical memory
+ *
+ * Dave Olsen <dolsen@lnxi.com>
+ * Ryan Jackson <rjackson@lnxi.com>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/flashchip.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/list.h>
+
+
+#define MOD_NAME KBUILD_BASENAME
+
+#define ADDRESS_NAME_LEN 18
+
+#define ROM_PROBE_STEP_SIZE (64*1024)
+
+#define DEV_CK804 1
+#define DEV_MCP55 2
+
+struct ck804xrom_window {
+ void __iomem *virt;
+ unsigned long phys;
+ unsigned long size;
+ struct list_head maps;
+ struct resource rsrc;
+ struct pci_dev *pdev;
+};
+
+struct ck804xrom_map_info {
+ struct list_head list;
+ struct map_info map;
+ struct mtd_info *mtd;
+ struct resource rsrc;
+ char map_name[sizeof(MOD_NAME) + 2 + ADDRESS_NAME_LEN];
+};
+
+/*
+ * The following applies to ck804 only:
+ * The 2 bits controlling the window size are often set to allow reading
+ * the BIOS, but too small to allow writing, since the lock registers are
+ * 4MiB lower in the address space than the data.
+ *
+ * This is intended to prevent flashing the bios, perhaps accidentally.
+ *
+ * This parameter allows the normal driver to override the BIOS settings.
+ *
+ * The bits are 6 and 7. If both bits are set, it is a 5MiB window.
+ * If only the 7 Bit is set, it is a 4MiB window. Otherwise, a
+ * 64KiB window.
+ *
+ * The following applies to mcp55 only:
+ * The 15 bits controlling the window size are distributed as follows:
+ * byte @0x88: bit 0..7
+ * byte @0x8c: bit 8..15
+ * word @0x90: bit 16..30
+ * If all bits are enabled, we have a 16? MiB window
+ * Please set win_size_bits to 0x7fffffff if you actually want to do something
+ */
+static uint win_size_bits = 0;
+module_param(win_size_bits, uint, 0);
+MODULE_PARM_DESC(win_size_bits, "ROM window size bits override, normally set by BIOS.");
+
+static struct ck804xrom_window ck804xrom_window = {
+ .maps = LIST_HEAD_INIT(ck804xrom_window.maps),
+};
+
+static void ck804xrom_cleanup(struct ck804xrom_window *window)
+{
+ struct ck804xrom_map_info *map, *scratch;
+ u8 byte;
+
+ if (window->pdev) {
+ /* Disable writes through the rom window */
+ pci_read_config_byte(window->pdev, 0x6d, &byte);
+ pci_write_config_byte(window->pdev, 0x6d, byte & ~1);
+ }
+
+ /* Free all of the mtd devices */
+ list_for_each_entry_safe(map, scratch, &window->maps, list) {
+ if (map->rsrc.parent)
+ release_resource(&map->rsrc);
+
+ mtd_device_unregister(map->mtd);
+ map_destroy(map->mtd);
+ list_del(&map->list);
+ kfree(map);
+ }
+ if (window->rsrc.parent)
+ release_resource(&window->rsrc);
+
+ if (window->virt) {
+ iounmap(window->virt);
+ window->virt = NULL;
+ window->phys = 0;
+ window->size = 0;
+ }
+ pci_dev_put(window->pdev);
+}
+
+
+static int __init ck804xrom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
+ u8 byte;
+ u16 word;
+ struct ck804xrom_window *window = &ck804xrom_window;
+ struct ck804xrom_map_info *map = NULL;
+ unsigned long map_top;
+
+ /* Remember the pci dev I find the window in */
+ window->pdev = pci_dev_get(pdev);
+
+ switch (ent->driver_data) {
+ case DEV_CK804:
+ /* Enable the selected rom window. This is often incorrectly
+ * set up by the BIOS, and the 4MiB offset for the lock registers
+ * requires the full 5MiB of window space.
+ *
+ * This 'write, then read' approach leaves the bits for
+ * other uses of the hardware info.
+ */
+ pci_read_config_byte(pdev, 0x88, &byte);
+ pci_write_config_byte(pdev, 0x88, byte | win_size_bits );
+
+ /* Assume the rom window is properly setup, and find it's size */
+ pci_read_config_byte(pdev, 0x88, &byte);
+
+ if ((byte & ((1<<7)|(1<<6))) == ((1<<7)|(1<<6)))
+ window->phys = 0xffb00000; /* 5MiB */
+ else if ((byte & (1<<7)) == (1<<7))
+ window->phys = 0xffc00000; /* 4MiB */
+ else
+ window->phys = 0xffff0000; /* 64KiB */
+ break;
+
+ case DEV_MCP55:
+ pci_read_config_byte(pdev, 0x88, &byte);
+ pci_write_config_byte(pdev, 0x88, byte | (win_size_bits & 0xff));
+
+ pci_read_config_byte(pdev, 0x8c, &byte);
+ pci_write_config_byte(pdev, 0x8c, byte | ((win_size_bits & 0xff00) >> 8));
+
+ pci_read_config_word(pdev, 0x90, &word);
+ pci_write_config_word(pdev, 0x90, word | ((win_size_bits & 0x7fff0000) >> 16));
+
+ window->phys = 0xff000000; /* 16MiB, hardcoded for now */
+ break;
+ }
+
+ window->size = 0xffffffffUL - window->phys + 1UL;
+
+ /*
+ * Try to reserve the window mem region. If this fails then
+ * it is likely due to a fragment of the window being
+ * "reserved" by the BIOS. In the case that the
+ * request_mem_region() fails then once the rom size is
+ * discovered we will try to reserve the unreserved fragment.
+ */
+ window->rsrc.name = MOD_NAME;
+ window->rsrc.start = window->phys;
+ window->rsrc.end = window->phys + window->size - 1;
+ window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (request_resource(&iomem_resource, &window->rsrc)) {
+ window->rsrc.parent = NULL;
+ printk(KERN_ERR MOD_NAME
+ " %s(): Unable to register resource %pR - kernel bug?\n",
+ __func__, &window->rsrc);
+ }
+
+
+ /* Enable writes through the rom window */
+ pci_read_config_byte(pdev, 0x6d, &byte);
+ pci_write_config_byte(pdev, 0x6d, byte | 1);
+
+ /* FIXME handle registers 0x80 - 0x8C the bios region locks */
+
+ /* For write accesses caches are useless */
+ window->virt = ioremap(window->phys, window->size);
+ if (!window->virt) {
+ printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
+ window->phys, window->size);
+ goto out;
+ }
+
+ /* Get the first address to look for a rom chip at */
+ map_top = window->phys;
+#if 1
+ /* The probe sequence run over the firmware hub lock
+ * registers sets them to 0x7 (no access).
+ * Probe at most the last 4MiB of the address space.
+ */
+ if (map_top < 0xffc00000)
+ map_top = 0xffc00000;
+#endif
+ /* Loop through and look for rom chips. Since we don't know the
+ * starting address for each chip, probe every ROM_PROBE_STEP_SIZE
+ * bytes from the starting address of the window.
+ */
+ while((map_top - 1) < 0xffffffffUL) {
+ struct cfi_private *cfi;
+ unsigned long offset;
+ int i;
+
+ if (!map) {
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ goto out;
+ }
+ memset(map, 0, sizeof(*map));
+ INIT_LIST_HEAD(&map->list);
+ map->map.name = map->map_name;
+ map->map.phys = map_top;
+ offset = map_top - window->phys;
+ map->map.virt = (void __iomem *)
+ (((unsigned long)(window->virt)) + offset);
+ map->map.size = 0xffffffffUL - map_top + 1UL;
+ /* Set the name of the map to the address I am trying */
+ sprintf(map->map_name, "%s @%08Lx",
+ MOD_NAME, (unsigned long long)map->map.phys);
+
+ /* There is no generic VPP support */
+ for(map->map.bankwidth = 32; map->map.bankwidth;
+ map->map.bankwidth >>= 1)
+ {
+ char **probe_type;
+ /* Skip bankwidths that are not supported */
+ if (!map_bankwidth_supported(map->map.bankwidth))
+ continue;
+
+ /* Setup the map methods */
+ simple_map_init(&map->map);
+
+ /* Try all of the probe methods */
+ probe_type = rom_probe_types;
+ for(; *probe_type; probe_type++) {
+ map->mtd = do_map_probe(*probe_type, &map->map);
+ if (map->mtd)
+ goto found;
+ }
+ }
+ map_top += ROM_PROBE_STEP_SIZE;
+ continue;
+ found:
+ /* Trim the size if we are larger than the map */
+ if (map->mtd->size > map->map.size) {
+ printk(KERN_WARNING MOD_NAME
+ " rom(%llu) larger than window(%lu). fixing...\n",
+ (unsigned long long)map->mtd->size, map->map.size);
+ map->mtd->size = map->map.size;
+ }
+ if (window->rsrc.parent) {
+ /*
+ * Registering the MTD device in iomem may not be possible
+ * if there is a BIOS "reserved" and BUSY range. If this
+ * fails then continue anyway.
+ */
+ map->rsrc.name = map->map_name;
+ map->rsrc.start = map->map.phys;
+ map->rsrc.end = map->map.phys + map->mtd->size - 1;
+ map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (request_resource(&window->rsrc, &map->rsrc)) {
+ printk(KERN_ERR MOD_NAME
+ ": cannot reserve MTD resource\n");
+ map->rsrc.parent = NULL;
+ }
+ }
+
+ /* Make the whole region visible in the map */
+ map->map.virt = window->virt;
+ map->map.phys = window->phys;
+ cfi = map->map.fldrv_priv;
+ for(i = 0; i < cfi->numchips; i++)
+ cfi->chips[i].start += offset;
+
+ /* Now that the mtd devices is complete claim and export it */
+ map->mtd->owner = THIS_MODULE;
+ if (mtd_device_register(map->mtd, NULL, 0)) {
+ map_destroy(map->mtd);
+ map->mtd = NULL;
+ goto out;
+ }
+
+
+ /* Calculate the new value of map_top */
+ map_top += map->mtd->size;
+
+ /* File away the map structure */
+ list_add(&map->list, &window->maps);
+ map = NULL;
+ }
+
+ out:
+ /* Free any left over map structures */
+ kfree(map);
+
+ /* See if I have any map structures */
+ if (list_empty(&window->maps)) {
+ ck804xrom_cleanup(window);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+
+static void ck804xrom_remove_one(struct pci_dev *pdev)
+{
+ struct ck804xrom_window *window = &ck804xrom_window;
+
+ ck804xrom_cleanup(window);
+}
+
+static const struct pci_device_id ck804xrom_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0051), .driver_data = DEV_CK804 },
+ { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0360), .driver_data = DEV_MCP55 },
+ { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0361), .driver_data = DEV_MCP55 },
+ { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0362), .driver_data = DEV_MCP55 },
+ { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0363), .driver_data = DEV_MCP55 },
+ { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0364), .driver_data = DEV_MCP55 },
+ { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0365), .driver_data = DEV_MCP55 },
+ { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0366), .driver_data = DEV_MCP55 },
+ { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0367), .driver_data = DEV_MCP55 },
+ { 0, }
+};
+
+#if 0
+MODULE_DEVICE_TABLE(pci, ck804xrom_pci_tbl);
+
+static struct pci_driver ck804xrom_driver = {
+ .name = MOD_NAME,
+ .id_table = ck804xrom_pci_tbl,
+ .probe = ck804xrom_init_one,
+ .remove = ck804xrom_remove_one,
+};
+#endif
+
+static int __init init_ck804xrom(void)
+{
+ struct pci_dev *pdev;
+ const struct pci_device_id *id;
+ int retVal;
+ pdev = NULL;
+
+ for(id = ck804xrom_pci_tbl; id->vendor; id++) {
+ pdev = pci_get_device(id->vendor, id->device, NULL);
+ if (pdev)
+ break;
+ }
+ if (pdev) {
+ retVal = ck804xrom_init_one(pdev, id);
+ pci_dev_put(pdev);
+ return retVal;
+ }
+ return -ENXIO;
+#if 0
+ return pci_register_driver(&ck804xrom_driver);
+#endif
+}
+
+static void __exit cleanup_ck804xrom(void)
+{
+ ck804xrom_remove_one(ck804xrom_window.pdev);
+}
+
+module_init(init_ck804xrom);
+module_exit(cleanup_ck804xrom);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Eric Biederman <ebiederman@lnxi.com>, Dave Olsen <dolsen@lnxi.com>");
+MODULE_DESCRIPTION("MTD map driver for BIOS chips on the Nvidia ck804 southbridge");
+
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
new file mode 100644
index 0000000000..70a3db3ab8
--- /dev/null
+++ b/drivers/mtd/maps/dc21285.c
@@ -0,0 +1,231 @@
+/*
+ * MTD map driver for flash on the DC21285 (the StrongARM-110 companion chip)
+ *
+ * (C) 2000 Nicolas Pitre <nico@fluxnic.net>
+ *
+ * This code is GPL
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+#include <asm/hardware/dec21285.h>
+#include <asm/mach-types.h>
+
+
+static struct mtd_info *dc21285_mtd;
+
+#ifdef CONFIG_ARCH_NETWINDER
+/*
+ * This is really ugly, but it seams to be the only
+ * realiable way to do it, as the cpld state machine
+ * is unpredictible. So we have a 25us penalty per
+ * write access.
+ */
+static void nw_en_write(void)
+{
+ unsigned long flags;
+
+ /*
+ * we want to write a bit pattern XXX1 to Xilinx to enable
+ * the write gate, which will be open for about the next 2ms.
+ */
+ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
+ nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE);
+ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
+
+ /*
+ * let the ISA bus to catch on...
+ */
+ udelay(25);
+}
+#else
+#define nw_en_write() do { } while (0)
+#endif
+
+static map_word dc21285_read8(struct map_info *map, unsigned long ofs)
+{
+ map_word val;
+ val.x[0] = *(uint8_t*)(map->virt + ofs);
+ return val;
+}
+
+static map_word dc21285_read16(struct map_info *map, unsigned long ofs)
+{
+ map_word val;
+ val.x[0] = *(uint16_t*)(map->virt + ofs);
+ return val;
+}
+
+static map_word dc21285_read32(struct map_info *map, unsigned long ofs)
+{
+ map_word val;
+ val.x[0] = *(uint32_t*)(map->virt + ofs);
+ return val;
+}
+
+static void dc21285_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ memcpy(to, (void*)(map->virt + from), len);
+}
+
+static void dc21285_write8(struct map_info *map, const map_word d, unsigned long adr)
+{
+ if (machine_is_netwinder())
+ nw_en_write();
+ *CSR_ROMWRITEREG = adr & 3;
+ adr &= ~3;
+ *(uint8_t*)(map->virt + adr) = d.x[0];
+}
+
+static void dc21285_write16(struct map_info *map, const map_word d, unsigned long adr)
+{
+ if (machine_is_netwinder())
+ nw_en_write();
+ *CSR_ROMWRITEREG = adr & 3;
+ adr &= ~3;
+ *(uint16_t*)(map->virt + adr) = d.x[0];
+}
+
+static void dc21285_write32(struct map_info *map, const map_word d, unsigned long adr)
+{
+ if (machine_is_netwinder())
+ nw_en_write();
+ *(uint32_t*)(map->virt + adr) = d.x[0];
+}
+
+static void dc21285_copy_to_32(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ while (len > 0) {
+ map_word d;
+ d.x[0] = *((uint32_t*)from);
+ dc21285_write32(map, d, to);
+ from += 4;
+ to += 4;
+ len -= 4;
+ }
+}
+
+static void dc21285_copy_to_16(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ while (len > 0) {
+ map_word d;
+ d.x[0] = *((uint16_t*)from);
+ dc21285_write16(map, d, to);
+ from += 2;
+ to += 2;
+ len -= 2;
+ }
+}
+
+static void dc21285_copy_to_8(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ map_word d;
+ d.x[0] = *((uint8_t*)from);
+ dc21285_write8(map, d, to);
+ from++;
+ to++;
+ len--;
+}
+
+static struct map_info dc21285_map = {
+ .name = "DC21285 flash",
+ .phys = NO_XIP,
+ .size = 16*1024*1024,
+ .copy_from = dc21285_copy_from,
+};
+
+/* Partition stuff */
+static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
+
+static int __init init_dc21285(void)
+{
+ /* Determine bankwidth */
+ switch (*CSR_SA110_CNTL & (3<<14)) {
+ case SA110_CNTL_ROMWIDTH_8:
+ dc21285_map.bankwidth = 1;
+ dc21285_map.read = dc21285_read8;
+ dc21285_map.write = dc21285_write8;
+ dc21285_map.copy_to = dc21285_copy_to_8;
+ break;
+ case SA110_CNTL_ROMWIDTH_16:
+ dc21285_map.bankwidth = 2;
+ dc21285_map.read = dc21285_read16;
+ dc21285_map.write = dc21285_write16;
+ dc21285_map.copy_to = dc21285_copy_to_16;
+ break;
+ case SA110_CNTL_ROMWIDTH_32:
+ dc21285_map.bankwidth = 4;
+ dc21285_map.read = dc21285_read32;
+ dc21285_map.write = dc21285_write32;
+ dc21285_map.copy_to = dc21285_copy_to_32;
+ break;
+ default:
+ printk (KERN_ERR "DC21285 flash: undefined bankwidth\n");
+ return -ENXIO;
+ }
+ printk (KERN_NOTICE "DC21285 flash support (%d-bit bankwidth)\n",
+ dc21285_map.bankwidth*8);
+
+ /* Let's map the flash area */
+ dc21285_map.virt = ioremap(DC21285_FLASH, 16*1024*1024);
+ if (!dc21285_map.virt) {
+ printk("Failed to ioremap\n");
+ return -EIO;
+ }
+
+ if (machine_is_ebsa285()) {
+ dc21285_mtd = do_map_probe("cfi_probe", &dc21285_map);
+ } else {
+ dc21285_mtd = do_map_probe("jedec_probe", &dc21285_map);
+ }
+
+ if (!dc21285_mtd) {
+ iounmap(dc21285_map.virt);
+ return -ENXIO;
+ }
+
+ dc21285_mtd->owner = THIS_MODULE;
+
+ mtd_device_parse_register(dc21285_mtd, probes, NULL, NULL, 0);
+
+ if(machine_is_ebsa285()) {
+ /*
+ * Flash timing is determined with bits 19-16 of the
+ * CSR_SA110_CNTL. The value is the number of wait cycles, or
+ * 0 for 16 cycles (the default). Cycles are 20 ns.
+ * Here we use 7 for 140 ns flash chips.
+ */
+ /* access time */
+ *CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x000f0000) | (7 << 16));
+ /* burst time */
+ *CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x00f00000) | (7 << 20));
+ /* tristate time */
+ *CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x0f000000) | (7 << 24));
+ }
+
+ return 0;
+}
+
+static void __exit cleanup_dc21285(void)
+{
+ mtd_device_unregister(dc21285_mtd);
+ map_destroy(dc21285_mtd);
+ iounmap(dc21285_map.virt);
+}
+
+module_init(init_dc21285);
+module_exit(cleanup_dc21285);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net>");
+MODULE_DESCRIPTION("MTD map driver for DC21285 boards");
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
new file mode 100644
index 0000000000..15d5b76ff5
--- /dev/null
+++ b/drivers/mtd/maps/esb2rom.c
@@ -0,0 +1,452 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * esb2rom.c
+ *
+ * Normal mappings of flash chips in physical memory
+ * through the Intel ESB2 Southbridge.
+ *
+ * This was derived from ichxrom.c in May 2006 by
+ * Lew Glendenning <lglendenning@lnxi.com>
+ *
+ * Eric Biederman, of course, was a major help in this effort.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/flashchip.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/list.h>
+
+#define MOD_NAME KBUILD_BASENAME
+
+#define ADDRESS_NAME_LEN 18
+
+#define ROM_PROBE_STEP_SIZE (64*1024) /* 64KiB */
+
+#define BIOS_CNTL 0xDC
+#define BIOS_LOCK_ENABLE 0x02
+#define BIOS_WRITE_ENABLE 0x01
+
+/* This became a 16-bit register, and EN2 has disappeared */
+#define FWH_DEC_EN1 0xD8
+#define FWH_F8_EN 0x8000
+#define FWH_F0_EN 0x4000
+#define FWH_E8_EN 0x2000
+#define FWH_E0_EN 0x1000
+#define FWH_D8_EN 0x0800
+#define FWH_D0_EN 0x0400
+#define FWH_C8_EN 0x0200
+#define FWH_C0_EN 0x0100
+#define FWH_LEGACY_F_EN 0x0080
+#define FWH_LEGACY_E_EN 0x0040
+/* reserved 0x0020 and 0x0010 */
+#define FWH_70_EN 0x0008
+#define FWH_60_EN 0x0004
+#define FWH_50_EN 0x0002
+#define FWH_40_EN 0x0001
+
+/* these are 32-bit values */
+#define FWH_SEL1 0xD0
+#define FWH_SEL2 0xD4
+
+#define FWH_8MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
+ FWH_D8_EN | FWH_D0_EN | FWH_C8_EN | FWH_C0_EN | \
+ FWH_70_EN | FWH_60_EN | FWH_50_EN | FWH_40_EN)
+
+#define FWH_7MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
+ FWH_D8_EN | FWH_D0_EN | FWH_C8_EN | FWH_C0_EN | \
+ FWH_70_EN | FWH_60_EN | FWH_50_EN)
+
+#define FWH_6MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
+ FWH_D8_EN | FWH_D0_EN | FWH_C8_EN | FWH_C0_EN | \
+ FWH_70_EN | FWH_60_EN)
+
+#define FWH_5MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
+ FWH_D8_EN | FWH_D0_EN | FWH_C8_EN | FWH_C0_EN | \
+ FWH_70_EN)
+
+#define FWH_4MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
+ FWH_D8_EN | FWH_D0_EN | FWH_C8_EN | FWH_C0_EN)
+
+#define FWH_3_5MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
+ FWH_D8_EN | FWH_D0_EN | FWH_C8_EN)
+
+#define FWH_3MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
+ FWH_D8_EN | FWH_D0_EN)
+
+#define FWH_2_5MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
+ FWH_D8_EN)
+
+#define FWH_2MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN)
+
+#define FWH_1_5MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN)
+
+#define FWH_1MiB (FWH_F8_EN | FWH_F0_EN)
+
+#define FWH_0_5MiB (FWH_F8_EN)
+
+
+struct esb2rom_window {
+ void __iomem* virt;
+ unsigned long phys;
+ unsigned long size;
+ struct list_head maps;
+ struct resource rsrc;
+ struct pci_dev *pdev;
+};
+
+struct esb2rom_map_info {
+ struct list_head list;
+ struct map_info map;
+ struct mtd_info *mtd;
+ struct resource rsrc;
+ char map_name[sizeof(MOD_NAME) + 2 + ADDRESS_NAME_LEN];
+};
+
+static struct esb2rom_window esb2rom_window = {
+ .maps = LIST_HEAD_INIT(esb2rom_window.maps),
+};
+
+static void esb2rom_cleanup(struct esb2rom_window *window)
+{
+ struct esb2rom_map_info *map, *scratch;
+ u8 byte;
+
+ /* Disable writes through the rom window */
+ pci_read_config_byte(window->pdev, BIOS_CNTL, &byte);
+ pci_write_config_byte(window->pdev, BIOS_CNTL,
+ byte & ~BIOS_WRITE_ENABLE);
+
+ /* Free all of the mtd devices */
+ list_for_each_entry_safe(map, scratch, &window->maps, list) {
+ if (map->rsrc.parent)
+ release_resource(&map->rsrc);
+ mtd_device_unregister(map->mtd);
+ map_destroy(map->mtd);
+ list_del(&map->list);
+ kfree(map);
+ }
+ if (window->rsrc.parent)
+ release_resource(&window->rsrc);
+ if (window->virt) {
+ iounmap(window->virt);
+ window->virt = NULL;
+ window->phys = 0;
+ window->size = 0;
+ }
+ pci_dev_put(window->pdev);
+}
+
+static int __init esb2rom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
+ struct esb2rom_window *window = &esb2rom_window;
+ struct esb2rom_map_info *map = NULL;
+ unsigned long map_top;
+ u8 byte;
+ u16 word;
+
+ /* For now I just handle the ecb2 and I assume there
+ * are not a lot of resources up at the top of the address
+ * space. It is possible to handle other devices in the
+ * top 16MiB but it is very painful. Also since
+ * you can only really attach a FWH to an ICHX there
+ * a number of simplifications you can make.
+ *
+ * Also you can page firmware hubs if an 8MiB window isn't enough
+ * but don't currently handle that case either.
+ */
+ window->pdev = pci_dev_get(pdev);
+
+ /* RLG: experiment 2. Force the window registers to the widest values */
+
+/*
+ pci_read_config_word(pdev, FWH_DEC_EN1, &word);
+ printk(KERN_DEBUG "Original FWH_DEC_EN1 : %x\n", word);
+ pci_write_config_byte(pdev, FWH_DEC_EN1, 0xff);
+ pci_read_config_byte(pdev, FWH_DEC_EN1, &byte);
+ printk(KERN_DEBUG "New FWH_DEC_EN1 : %x\n", byte);
+
+ pci_read_config_byte(pdev, FWH_DEC_EN2, &byte);
+ printk(KERN_DEBUG "Original FWH_DEC_EN2 : %x\n", byte);
+ pci_write_config_byte(pdev, FWH_DEC_EN2, 0x0f);
+ pci_read_config_byte(pdev, FWH_DEC_EN2, &byte);
+ printk(KERN_DEBUG "New FWH_DEC_EN2 : %x\n", byte);
+*/
+
+ /* Find a region continuous to the end of the ROM window */
+ window->phys = 0;
+ pci_read_config_word(pdev, FWH_DEC_EN1, &word);
+ printk(KERN_DEBUG "pci_read_config_word : %x\n", word);
+
+ if ((word & FWH_8MiB) == FWH_8MiB)
+ window->phys = 0xff400000;
+ else if ((word & FWH_7MiB) == FWH_7MiB)
+ window->phys = 0xff500000;
+ else if ((word & FWH_6MiB) == FWH_6MiB)
+ window->phys = 0xff600000;
+ else if ((word & FWH_5MiB) == FWH_5MiB)
+ window->phys = 0xFF700000;
+ else if ((word & FWH_4MiB) == FWH_4MiB)
+ window->phys = 0xffc00000;
+ else if ((word & FWH_3_5MiB) == FWH_3_5MiB)
+ window->phys = 0xffc80000;
+ else if ((word & FWH_3MiB) == FWH_3MiB)
+ window->phys = 0xffd00000;
+ else if ((word & FWH_2_5MiB) == FWH_2_5MiB)
+ window->phys = 0xffd80000;
+ else if ((word & FWH_2MiB) == FWH_2MiB)
+ window->phys = 0xffe00000;
+ else if ((word & FWH_1_5MiB) == FWH_1_5MiB)
+ window->phys = 0xffe80000;
+ else if ((word & FWH_1MiB) == FWH_1MiB)
+ window->phys = 0xfff00000;
+ else if ((word & FWH_0_5MiB) == FWH_0_5MiB)
+ window->phys = 0xfff80000;
+
+ if (window->phys == 0) {
+ printk(KERN_ERR MOD_NAME ": Rom window is closed\n");
+ goto out;
+ }
+
+ /* reserved 0x0020 and 0x0010 */
+ window->phys -= 0x400000UL;
+ window->size = (0xffffffffUL - window->phys) + 1UL;
+
+ /* Enable writes through the rom window */
+ pci_read_config_byte(pdev, BIOS_CNTL, &byte);
+ if (!(byte & BIOS_WRITE_ENABLE) && (byte & (BIOS_LOCK_ENABLE))) {
+ /* The BIOS will generate an error if I enable
+ * this device, so don't even try.
+ */
+ printk(KERN_ERR MOD_NAME ": firmware access control, I can't enable writes\n");
+ goto out;
+ }
+ pci_write_config_byte(pdev, BIOS_CNTL, byte | BIOS_WRITE_ENABLE);
+
+ /*
+ * Try to reserve the window mem region. If this fails then
+ * it is likely due to the window being "reserved" by the BIOS.
+ */
+ window->rsrc.name = MOD_NAME;
+ window->rsrc.start = window->phys;
+ window->rsrc.end = window->phys + window->size - 1;
+ window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (request_resource(&iomem_resource, &window->rsrc)) {
+ window->rsrc.parent = NULL;
+ printk(KERN_DEBUG MOD_NAME ": "
+ "%s(): Unable to register resource %pR - kernel bug?\n",
+ __func__, &window->rsrc);
+ }
+
+ /* Map the firmware hub into my address space. */
+ window->virt = ioremap(window->phys, window->size);
+ if (!window->virt) {
+ printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
+ window->phys, window->size);
+ goto out;
+ }
+
+ /* Get the first address to look for an rom chip at */
+ map_top = window->phys;
+ if ((window->phys & 0x3fffff) != 0) {
+ /* if not aligned on 4MiB, look 4MiB lower in address space */
+ map_top = window->phys + 0x400000;
+ }
+#if 1
+ /* The probe sequence run over the firmware hub lock
+ * registers sets them to 0x7 (no access).
+ * (Insane hardware design, but most copied Intel's.)
+ * ==> Probe at most the last 4M of the address space.
+ */
+ if (map_top < 0xffc00000)
+ map_top = 0xffc00000;
+#endif
+ /* Loop through and look for rom chips */
+ while ((map_top - 1) < 0xffffffffUL) {
+ struct cfi_private *cfi;
+ unsigned long offset;
+ int i;
+
+ if (!map) {
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ goto out;
+ }
+ memset(map, 0, sizeof(*map));
+ INIT_LIST_HEAD(&map->list);
+ map->map.name = map->map_name;
+ map->map.phys = map_top;
+ offset = map_top - window->phys;
+ map->map.virt = (void __iomem *)
+ (((unsigned long)(window->virt)) + offset);
+ map->map.size = 0xffffffffUL - map_top + 1UL;
+ /* Set the name of the map to the address I am trying */
+ sprintf(map->map_name, "%s @%08Lx",
+ MOD_NAME, (unsigned long long)map->map.phys);
+
+ /* Firmware hubs only use vpp when being programmed
+ * in a factory setting. So in-place programming
+ * needs to use a different method.
+ */
+ for(map->map.bankwidth = 32; map->map.bankwidth;
+ map->map.bankwidth >>= 1) {
+ char **probe_type;
+ /* Skip bankwidths that are not supported */
+ if (!map_bankwidth_supported(map->map.bankwidth))
+ continue;
+
+ /* Setup the map methods */
+ simple_map_init(&map->map);
+
+ /* Try all of the probe methods */
+ probe_type = rom_probe_types;
+ for(; *probe_type; probe_type++) {
+ map->mtd = do_map_probe(*probe_type, &map->map);
+ if (map->mtd)
+ goto found;
+ }
+ }
+ map_top += ROM_PROBE_STEP_SIZE;
+ continue;
+ found:
+ /* Trim the size if we are larger than the map */
+ if (map->mtd->size > map->map.size) {
+ printk(KERN_WARNING MOD_NAME
+ " rom(%llu) larger than window(%lu). fixing...\n",
+ (unsigned long long)map->mtd->size, map->map.size);
+ map->mtd->size = map->map.size;
+ }
+ if (window->rsrc.parent) {
+ /*
+ * Registering the MTD device in iomem may not be possible
+ * if there is a BIOS "reserved" and BUSY range. If this
+ * fails then continue anyway.
+ */
+ map->rsrc.name = map->map_name;
+ map->rsrc.start = map->map.phys;
+ map->rsrc.end = map->map.phys + map->mtd->size - 1;
+ map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (request_resource(&window->rsrc, &map->rsrc)) {
+ printk(KERN_ERR MOD_NAME
+ ": cannot reserve MTD resource\n");
+ map->rsrc.parent = NULL;
+ }
+ }
+
+ /* Make the whole region visible in the map */
+ map->map.virt = window->virt;
+ map->map.phys = window->phys;
+ cfi = map->map.fldrv_priv;
+ for(i = 0; i < cfi->numchips; i++)
+ cfi->chips[i].start += offset;
+
+ /* Now that the mtd devices is complete claim and export it */
+ map->mtd->owner = THIS_MODULE;
+ if (mtd_device_register(map->mtd, NULL, 0)) {
+ map_destroy(map->mtd);
+ map->mtd = NULL;
+ goto out;
+ }
+
+ /* Calculate the new value of map_top */
+ map_top += map->mtd->size;
+
+ /* File away the map structure */
+ list_add(&map->list, &window->maps);
+ map = NULL;
+ }
+
+ out:
+ /* Free any left over map structures */
+ kfree(map);
+
+ /* See if I have any map structures */
+ if (list_empty(&window->maps)) {
+ esb2rom_cleanup(window);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void esb2rom_remove_one(struct pci_dev *pdev)
+{
+ struct esb2rom_window *window = &esb2rom_window;
+ esb2rom_cleanup(window);
+}
+
+static const struct pci_device_id esb2rom_pci_tbl[] = {
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { 0, },
+};
+
+#if 0
+MODULE_DEVICE_TABLE(pci, esb2rom_pci_tbl);
+
+static struct pci_driver esb2rom_driver = {
+ .name = MOD_NAME,
+ .id_table = esb2rom_pci_tbl,
+ .probe = esb2rom_init_one,
+ .remove = esb2rom_remove_one,
+};
+#endif
+
+static int __init init_esb2rom(void)
+{
+ struct pci_dev *pdev;
+ const struct pci_device_id *id;
+ int retVal;
+
+ pdev = NULL;
+ for (id = esb2rom_pci_tbl; id->vendor; id++) {
+ printk(KERN_DEBUG "device id = %x\n", id->device);
+ pdev = pci_get_device(id->vendor, id->device, NULL);
+ if (pdev) {
+ printk(KERN_DEBUG "matched device = %x\n", id->device);
+ break;
+ }
+ }
+ if (pdev) {
+ printk(KERN_DEBUG "matched device id %x\n", id->device);
+ retVal = esb2rom_init_one(pdev, &esb2rom_pci_tbl[0]);
+ pci_dev_put(pdev);
+ printk(KERN_DEBUG "retVal = %d\n", retVal);
+ return retVal;
+ }
+ return -ENXIO;
+#if 0
+ return pci_register_driver(&esb2rom_driver);
+#endif
+}
+
+static void __exit cleanup_esb2rom(void)
+{
+ esb2rom_remove_one(esb2rom_window.pdev);
+}
+
+module_init(init_esb2rom);
+module_exit(cleanup_esb2rom);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lew Glendenning <lglendenning@lnxi.com>");
+MODULE_DESCRIPTION("MTD map driver for BIOS chips on the ESB2 southbridge");
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
new file mode 100644
index 0000000000..c8b2793691
--- /dev/null
+++ b/drivers/mtd/maps/ichxrom.c
@@ -0,0 +1,381 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ichxrom.c
+ *
+ * Normal mappings of chips in physical memory
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/flashchip.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/list.h>
+
+#define xstr(s) str(s)
+#define str(s) #s
+#define MOD_NAME xstr(KBUILD_BASENAME)
+
+#define ADDRESS_NAME_LEN 18
+
+#define ROM_PROBE_STEP_SIZE (64*1024) /* 64KiB */
+
+#define BIOS_CNTL 0x4e
+#define FWH_DEC_EN1 0xE3
+#define FWH_DEC_EN2 0xF0
+#define FWH_SEL1 0xE8
+#define FWH_SEL2 0xEE
+
+struct ichxrom_window {
+ void __iomem* virt;
+ unsigned long phys;
+ unsigned long size;
+ struct list_head maps;
+ struct resource rsrc;
+ struct pci_dev *pdev;
+};
+
+struct ichxrom_map_info {
+ struct list_head list;
+ struct map_info map;
+ struct mtd_info *mtd;
+ struct resource rsrc;
+ char map_name[sizeof(MOD_NAME) + 2 + ADDRESS_NAME_LEN];
+};
+
+static struct ichxrom_window ichxrom_window = {
+ .maps = LIST_HEAD_INIT(ichxrom_window.maps),
+};
+
+static void ichxrom_cleanup(struct ichxrom_window *window)
+{
+ struct ichxrom_map_info *map, *scratch;
+ u16 word;
+ int ret;
+
+ /* Disable writes through the rom window */
+ ret = pci_read_config_word(window->pdev, BIOS_CNTL, &word);
+ if (!ret)
+ pci_write_config_word(window->pdev, BIOS_CNTL, word & ~1);
+ pci_dev_put(window->pdev);
+
+ /* Free all of the mtd devices */
+ list_for_each_entry_safe(map, scratch, &window->maps, list) {
+ if (map->rsrc.parent)
+ release_resource(&map->rsrc);
+ mtd_device_unregister(map->mtd);
+ map_destroy(map->mtd);
+ list_del(&map->list);
+ kfree(map);
+ }
+ if (window->rsrc.parent)
+ release_resource(&window->rsrc);
+ if (window->virt) {
+ iounmap(window->virt);
+ window->virt = NULL;
+ window->phys = 0;
+ window->size = 0;
+ window->pdev = NULL;
+ }
+}
+
+
+static int __init ichxrom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
+ struct ichxrom_window *window = &ichxrom_window;
+ struct ichxrom_map_info *map = NULL;
+ unsigned long map_top;
+ u8 byte;
+ u16 word;
+
+ /* For now I just handle the ichx and I assume there
+ * are not a lot of resources up at the top of the address
+ * space. It is possible to handle other devices in the
+ * top 16MB but it is very painful. Also since
+ * you can only really attach a FWH to an ICHX there
+ * a number of simplifications you can make.
+ *
+ * Also you can page firmware hubs if an 8MB window isn't enough
+ * but don't currently handle that case either.
+ */
+ window->pdev = pdev;
+
+ /* Find a region continuous to the end of the ROM window */
+ window->phys = 0;
+ pci_read_config_byte(pdev, FWH_DEC_EN1, &byte);
+ if (byte == 0xff) {
+ window->phys = 0xffc00000;
+ pci_read_config_byte(pdev, FWH_DEC_EN2, &byte);
+ if ((byte & 0x0f) == 0x0f) {
+ window->phys = 0xff400000;
+ }
+ else if ((byte & 0x0e) == 0x0e) {
+ window->phys = 0xff500000;
+ }
+ else if ((byte & 0x0c) == 0x0c) {
+ window->phys = 0xff600000;
+ }
+ else if ((byte & 0x08) == 0x08) {
+ window->phys = 0xff700000;
+ }
+ }
+ else if ((byte & 0xfe) == 0xfe) {
+ window->phys = 0xffc80000;
+ }
+ else if ((byte & 0xfc) == 0xfc) {
+ window->phys = 0xffd00000;
+ }
+ else if ((byte & 0xf8) == 0xf8) {
+ window->phys = 0xffd80000;
+ }
+ else if ((byte & 0xf0) == 0xf0) {
+ window->phys = 0xffe00000;
+ }
+ else if ((byte & 0xe0) == 0xe0) {
+ window->phys = 0xffe80000;
+ }
+ else if ((byte & 0xc0) == 0xc0) {
+ window->phys = 0xfff00000;
+ }
+ else if ((byte & 0x80) == 0x80) {
+ window->phys = 0xfff80000;
+ }
+
+ if (window->phys == 0) {
+ printk(KERN_ERR MOD_NAME ": Rom window is closed\n");
+ goto out;
+ }
+ window->phys -= 0x400000UL;
+ window->size = (0xffffffffUL - window->phys) + 1UL;
+
+ /* Enable writes through the rom window */
+ pci_read_config_word(pdev, BIOS_CNTL, &word);
+ if (!(word & 1) && (word & (1<<1))) {
+ /* The BIOS will generate an error if I enable
+ * this device, so don't even try.
+ */
+ printk(KERN_ERR MOD_NAME ": firmware access control, I can't enable writes\n");
+ goto out;
+ }
+ pci_write_config_word(pdev, BIOS_CNTL, word | 1);
+
+ /*
+ * Try to reserve the window mem region. If this fails then
+ * it is likely due to the window being "reserved" by the BIOS.
+ */
+ window->rsrc.name = MOD_NAME;
+ window->rsrc.start = window->phys;
+ window->rsrc.end = window->phys + window->size - 1;
+ window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (request_resource(&iomem_resource, &window->rsrc)) {
+ window->rsrc.parent = NULL;
+ printk(KERN_DEBUG MOD_NAME ": "
+ "%s(): Unable to register resource %pR - kernel bug?\n",
+ __func__, &window->rsrc);
+ }
+
+ /* Map the firmware hub into my address space. */
+ window->virt = ioremap(window->phys, window->size);
+ if (!window->virt) {
+ printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
+ window->phys, window->size);
+ goto out;
+ }
+
+ /* Get the first address to look for an rom chip at */
+ map_top = window->phys;
+ if ((window->phys & 0x3fffff) != 0) {
+ map_top = window->phys + 0x400000;
+ }
+#if 1
+ /* The probe sequence run over the firmware hub lock
+ * registers sets them to 0x7 (no access).
+ * Probe at most the last 4M of the address space.
+ */
+ if (map_top < 0xffc00000) {
+ map_top = 0xffc00000;
+ }
+#endif
+ /* Loop through and look for rom chips */
+ while((map_top - 1) < 0xffffffffUL) {
+ struct cfi_private *cfi;
+ unsigned long offset;
+ int i;
+
+ if (!map) {
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ goto out;
+ }
+ memset(map, 0, sizeof(*map));
+ INIT_LIST_HEAD(&map->list);
+ map->map.name = map->map_name;
+ map->map.phys = map_top;
+ offset = map_top - window->phys;
+ map->map.virt = (void __iomem *)
+ (((unsigned long)(window->virt)) + offset);
+ map->map.size = 0xffffffffUL - map_top + 1UL;
+ /* Set the name of the map to the address I am trying */
+ sprintf(map->map_name, "%s @%08Lx",
+ MOD_NAME, (unsigned long long)map->map.phys);
+
+ /* Firmware hubs only use vpp when being programmed
+ * in a factory setting. So in-place programming
+ * needs to use a different method.
+ */
+ for(map->map.bankwidth = 32; map->map.bankwidth;
+ map->map.bankwidth >>= 1)
+ {
+ char **probe_type;
+ /* Skip bankwidths that are not supported */
+ if (!map_bankwidth_supported(map->map.bankwidth))
+ continue;
+
+ /* Setup the map methods */
+ simple_map_init(&map->map);
+
+ /* Try all of the probe methods */
+ probe_type = rom_probe_types;
+ for(; *probe_type; probe_type++) {
+ map->mtd = do_map_probe(*probe_type, &map->map);
+ if (map->mtd)
+ goto found;
+ }
+ }
+ map_top += ROM_PROBE_STEP_SIZE;
+ continue;
+ found:
+ /* Trim the size if we are larger than the map */
+ if (map->mtd->size > map->map.size) {
+ printk(KERN_WARNING MOD_NAME
+ " rom(%llu) larger than window(%lu). fixing...\n",
+ (unsigned long long)map->mtd->size, map->map.size);
+ map->mtd->size = map->map.size;
+ }
+ if (window->rsrc.parent) {
+ /*
+ * Registering the MTD device in iomem may not be possible
+ * if there is a BIOS "reserved" and BUSY range. If this
+ * fails then continue anyway.
+ */
+ map->rsrc.name = map->map_name;
+ map->rsrc.start = map->map.phys;
+ map->rsrc.end = map->map.phys + map->mtd->size - 1;
+ map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (request_resource(&window->rsrc, &map->rsrc)) {
+ printk(KERN_ERR MOD_NAME
+ ": cannot reserve MTD resource\n");
+ map->rsrc.parent = NULL;
+ }
+ }
+
+ /* Make the whole region visible in the map */
+ map->map.virt = window->virt;
+ map->map.phys = window->phys;
+ cfi = map->map.fldrv_priv;
+ for(i = 0; i < cfi->numchips; i++) {
+ cfi->chips[i].start += offset;
+ }
+
+ /* Now that the mtd devices is complete claim and export it */
+ map->mtd->owner = THIS_MODULE;
+ if (mtd_device_register(map->mtd, NULL, 0)) {
+ map_destroy(map->mtd);
+ map->mtd = NULL;
+ goto out;
+ }
+
+
+ /* Calculate the new value of map_top */
+ map_top += map->mtd->size;
+
+ /* File away the map structure */
+ list_add(&map->list, &window->maps);
+ map = NULL;
+ }
+
+ out:
+ /* Free any left over map structures */
+ kfree(map);
+
+ /* See if I have any map structures */
+ if (list_empty(&window->maps)) {
+ ichxrom_cleanup(window);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+
+static void ichxrom_remove_one(struct pci_dev *pdev)
+{
+ struct ichxrom_window *window = &ichxrom_window;
+ ichxrom_cleanup(window);
+}
+
+static const struct pci_device_id ichxrom_pci_tbl[] = {
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { 0, },
+};
+
+#if 0
+MODULE_DEVICE_TABLE(pci, ichxrom_pci_tbl);
+
+static struct pci_driver ichxrom_driver = {
+ .name = MOD_NAME,
+ .id_table = ichxrom_pci_tbl,
+ .probe = ichxrom_init_one,
+ .remove = ichxrom_remove_one,
+};
+#endif
+
+static int __init init_ichxrom(void)
+{
+ struct pci_dev *pdev;
+ const struct pci_device_id *id;
+
+ pdev = NULL;
+ for (id = ichxrom_pci_tbl; id->vendor; id++) {
+ pdev = pci_get_device(id->vendor, id->device, NULL);
+ if (pdev) {
+ break;
+ }
+ }
+ if (pdev) {
+ return ichxrom_init_one(pdev, &ichxrom_pci_tbl[0]);
+ }
+ return -ENXIO;
+#if 0
+ return pci_register_driver(&ichxrom_driver);
+#endif
+}
+
+static void __exit cleanup_ichxrom(void)
+{
+ ichxrom_remove_one(ichxrom_window.pdev);
+}
+
+module_init(init_ichxrom);
+module_exit(cleanup_ichxrom);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Eric Biederman <ebiederman@lnxi.com>");
+MODULE_DESCRIPTION("MTD map driver for BIOS chips on the ICHX southbridge");
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
new file mode 100644
index 0000000000..b41401852f
--- /dev/null
+++ b/drivers/mtd/maps/impa7.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Handle mapping of the NOR flash on implementa A7 boards
+ *
+ * Copyright 2002 SYSGO Real-Time Solutions GmbH
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#define WINDOW_ADDR0 0x00000000 /* physical properties of flash */
+#define WINDOW_SIZE0 0x00800000
+#define WINDOW_ADDR1 0x10000000 /* physical properties of flash */
+#define WINDOW_SIZE1 0x00800000
+#define NUM_FLASHBANKS 2
+#define BUSWIDTH 4
+
+#define MSG_PREFIX "impA7:" /* prefix for our printk()'s */
+#define MTDID "impa7-%d" /* for mtdparts= partitioning */
+
+static struct mtd_info *impa7_mtd[NUM_FLASHBANKS];
+
+static const char * const rom_probe_types[] = { "jedec_probe", NULL };
+
+static struct map_info impa7_map[NUM_FLASHBANKS] = {
+ {
+ .name = "impA7 NOR Flash Bank #0",
+ .size = WINDOW_SIZE0,
+ .bankwidth = BUSWIDTH,
+ },
+ {
+ .name = "impA7 NOR Flash Bank #1",
+ .size = WINDOW_SIZE1,
+ .bankwidth = BUSWIDTH,
+ },
+};
+
+/*
+ * MTD partitioning stuff
+ */
+static const struct mtd_partition partitions[] =
+{
+ {
+ .name = "FileSystem",
+ .size = 0x800000,
+ .offset = 0x00000000
+ },
+};
+
+static int __init init_impa7(void)
+{
+ const char * const *type;
+ int i;
+ static struct { u_long addr; u_long size; } pt[NUM_FLASHBANKS] = {
+ { WINDOW_ADDR0, WINDOW_SIZE0 },
+ { WINDOW_ADDR1, WINDOW_SIZE1 },
+ };
+ int devicesfound = 0;
+
+ for(i=0; i<NUM_FLASHBANKS; i++)
+ {
+ printk(KERN_NOTICE MSG_PREFIX "probing 0x%08lx at 0x%08lx\n",
+ pt[i].size, pt[i].addr);
+
+ impa7_map[i].phys = pt[i].addr;
+ impa7_map[i].virt = ioremap(pt[i].addr, pt[i].size);
+ if (!impa7_map[i].virt) {
+ printk(MSG_PREFIX "failed to ioremap\n");
+ return -EIO;
+ }
+ simple_map_init(&impa7_map[i]);
+
+ impa7_mtd[i] = NULL;
+ type = rom_probe_types;
+ for(; !impa7_mtd[i] && *type; type++) {
+ impa7_mtd[i] = do_map_probe(*type, &impa7_map[i]);
+ }
+
+ if (impa7_mtd[i]) {
+ impa7_mtd[i]->owner = THIS_MODULE;
+ devicesfound++;
+ mtd_device_register(impa7_mtd[i], partitions,
+ ARRAY_SIZE(partitions));
+ } else {
+ iounmap((void __iomem *)impa7_map[i].virt);
+ }
+ }
+ return devicesfound == 0 ? -ENXIO : 0;
+}
+
+static void __exit cleanup_impa7(void)
+{
+ int i;
+ for (i=0; i<NUM_FLASHBANKS; i++) {
+ if (impa7_mtd[i]) {
+ mtd_device_unregister(impa7_mtd[i]);
+ map_destroy(impa7_mtd[i]);
+ iounmap((void __iomem *)impa7_map[i].virt);
+ impa7_map[i].virt = NULL;
+ }
+ }
+}
+
+module_init(init_impa7);
+module_exit(cleanup_impa7);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pavel Bartusek <pba@sysgo.de>");
+MODULE_DESCRIPTION("MTD map driver for implementa impA7");
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
new file mode 100644
index 0000000000..d67b845b0e
--- /dev/null
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -0,0 +1,265 @@
+/*
+ * drivers/mtd/maps/intel_vr_nor.c
+ *
+ * An MTD map driver for a NOR flash bank on the Expansion Bus of the Intel
+ * Vermilion Range chipset.
+ *
+ * The Vermilion Range Expansion Bus supports four chip selects, each of which
+ * has 64MiB of address space. The 2nd BAR of the Expansion Bus PCI Device
+ * is a 256MiB memory region containing the address spaces for all four of the
+ * chip selects, with start addresses hardcoded on 64MiB boundaries.
+ *
+ * This map driver only supports NOR flash on chip select 0. The buswidth
+ * (either 8 bits or 16 bits) is determined by reading the Expansion Bus Timing
+ * and Control Register for Chip Select 0 (EXP_TIMING_CS0). This driver does
+ * not modify the value in the EXP_TIMING_CS0 register except to enable writing
+ * and disable boot acceleration. The timing parameters in the register are
+ * assumed to have been properly initialized by the BIOS. The reset default
+ * timing parameters are maximally conservative (slow), so access to the flash
+ * will be slower than it should be if the BIOS has not initialized the timing
+ * parameters.
+ *
+ * Author: Andy Lowe <alowe@mvista.com>
+ *
+ * 2006 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/flashchip.h>
+
+#define DRV_NAME "vr_nor"
+
+struct vr_nor_mtd {
+ void __iomem *csr_base;
+ struct map_info map;
+ struct mtd_info *info;
+ struct pci_dev *dev;
+};
+
+/* Expansion Bus Configuration and Status Registers are in BAR 0 */
+#define EXP_CSR_MBAR 0
+/* Expansion Bus Memory Window is BAR 1 */
+#define EXP_WIN_MBAR 1
+/* Maximum address space for Chip Select 0 is 64MiB */
+#define CS0_SIZE 0x04000000
+/* Chip Select 0 is at offset 0 in the Memory Window */
+#define CS0_START 0x0
+/* Chip Select 0 Timing Register is at offset 0 in CSR */
+#define EXP_TIMING_CS0 0x00
+#define TIMING_CS_EN (1 << 31) /* Chip Select Enable */
+#define TIMING_BOOT_ACCEL_DIS (1 << 8) /* Boot Acceleration Disable */
+#define TIMING_WR_EN (1 << 1) /* Write Enable */
+#define TIMING_BYTE_EN (1 << 0) /* 8-bit vs 16-bit bus */
+#define TIMING_MASK 0x3FFF0000
+
+static void vr_nor_destroy_partitions(struct vr_nor_mtd *p)
+{
+ mtd_device_unregister(p->info);
+}
+
+static int vr_nor_init_partitions(struct vr_nor_mtd *p)
+{
+ /* register the flash bank */
+ /* partition the flash bank */
+ return mtd_device_register(p->info, NULL, 0);
+}
+
+static void vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
+{
+ map_destroy(p->info);
+}
+
+static int vr_nor_mtd_setup(struct vr_nor_mtd *p)
+{
+ static const char * const probe_types[] =
+ { "cfi_probe", "jedec_probe", NULL };
+ const char * const *type;
+
+ for (type = probe_types; !p->info && *type; type++)
+ p->info = do_map_probe(*type, &p->map);
+ if (!p->info)
+ return -ENODEV;
+
+ p->info->dev.parent = &p->dev->dev;
+
+ return 0;
+}
+
+static void vr_nor_destroy_maps(struct vr_nor_mtd *p)
+{
+ unsigned int exp_timing_cs0;
+
+ /* write-protect the flash bank */
+ exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
+ exp_timing_cs0 &= ~TIMING_WR_EN;
+ writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
+
+ /* unmap the flash window */
+ iounmap(p->map.virt);
+
+ /* unmap the csr window */
+ iounmap(p->csr_base);
+}
+
+/*
+ * Initialize the map_info structure and map the flash.
+ * Returns 0 on success, nonzero otherwise.
+ */
+static int vr_nor_init_maps(struct vr_nor_mtd *p)
+{
+ unsigned long csr_phys, csr_len;
+ unsigned long win_phys, win_len;
+ unsigned int exp_timing_cs0;
+ int err;
+
+ csr_phys = pci_resource_start(p->dev, EXP_CSR_MBAR);
+ csr_len = pci_resource_len(p->dev, EXP_CSR_MBAR);
+ win_phys = pci_resource_start(p->dev, EXP_WIN_MBAR);
+ win_len = pci_resource_len(p->dev, EXP_WIN_MBAR);
+
+ if (!csr_phys || !csr_len || !win_phys || !win_len)
+ return -ENODEV;
+
+ if (win_len < (CS0_START + CS0_SIZE))
+ return -ENXIO;
+
+ p->csr_base = ioremap(csr_phys, csr_len);
+ if (!p->csr_base)
+ return -ENOMEM;
+
+ exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
+ if (!(exp_timing_cs0 & TIMING_CS_EN)) {
+ dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 "
+ "is disabled.\n");
+ err = -ENODEV;
+ goto release;
+ }
+ if ((exp_timing_cs0 & TIMING_MASK) == TIMING_MASK) {
+ dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 "
+ "is configured for maximally slow access times.\n");
+ }
+ p->map.name = DRV_NAME;
+ p->map.bankwidth = (exp_timing_cs0 & TIMING_BYTE_EN) ? 1 : 2;
+ p->map.phys = win_phys + CS0_START;
+ p->map.size = CS0_SIZE;
+ p->map.virt = ioremap(p->map.phys, p->map.size);
+ if (!p->map.virt) {
+ err = -ENOMEM;
+ goto release;
+ }
+ simple_map_init(&p->map);
+
+ /* Enable writes to flash bank */
+ exp_timing_cs0 |= TIMING_BOOT_ACCEL_DIS | TIMING_WR_EN;
+ writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
+
+ return 0;
+
+ release:
+ iounmap(p->csr_base);
+ return err;
+}
+
+static const struct pci_device_id vr_nor_pci_ids[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x500D)},
+ {0,}
+};
+
+static void vr_nor_pci_remove(struct pci_dev *dev)
+{
+ struct vr_nor_mtd *p = pci_get_drvdata(dev);
+
+ vr_nor_destroy_partitions(p);
+ vr_nor_destroy_mtd_setup(p);
+ vr_nor_destroy_maps(p);
+ kfree(p);
+ pci_release_regions(dev);
+ pci_disable_device(dev);
+}
+
+static int vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ struct vr_nor_mtd *p = NULL;
+ unsigned int exp_timing_cs0;
+ int err;
+
+ err = pci_enable_device(dev);
+ if (err)
+ goto out;
+
+ err = pci_request_regions(dev, DRV_NAME);
+ if (err)
+ goto disable_dev;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!p)
+ goto release;
+
+ p->dev = dev;
+
+ err = vr_nor_init_maps(p);
+ if (err)
+ goto release;
+
+ err = vr_nor_mtd_setup(p);
+ if (err)
+ goto destroy_maps;
+
+ err = vr_nor_init_partitions(p);
+ if (err)
+ goto destroy_mtd_setup;
+
+ pci_set_drvdata(dev, p);
+
+ return 0;
+
+ destroy_mtd_setup:
+ map_destroy(p->info);
+
+ destroy_maps:
+ /* write-protect the flash bank */
+ exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
+ exp_timing_cs0 &= ~TIMING_WR_EN;
+ writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
+
+ /* unmap the flash window */
+ iounmap(p->map.virt);
+
+ /* unmap the csr window */
+ iounmap(p->csr_base);
+
+ release:
+ kfree(p);
+ pci_release_regions(dev);
+
+ disable_dev:
+ pci_disable_device(dev);
+
+ out:
+ return err;
+}
+
+static struct pci_driver vr_nor_pci_driver = {
+ .name = DRV_NAME,
+ .probe = vr_nor_pci_probe,
+ .remove = vr_nor_pci_remove,
+ .id_table = vr_nor_pci_ids,
+};
+
+module_pci_driver(vr_nor_pci_driver);
+
+MODULE_AUTHOR("Andy Lowe");
+MODULE_DESCRIPTION("MTD map driver for NOR flash on Intel Vermilion Range");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, vr_nor_pci_ids);
diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c
new file mode 100644
index 0000000000..832b880d1a
--- /dev/null
+++ b/drivers/mtd/maps/l440gx.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * BIOS Flash chip on Intel 440GX board.
+ *
+ * Bugs this currently does not work under linuxBIOS.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+
+#define PIIXE_IOBASE_RESOURCE 11
+
+#define WINDOW_ADDR 0xfff00000
+#define WINDOW_SIZE 0x00100000
+#define BUSWIDTH 1
+
+static u32 iobase;
+#define IOBASE iobase
+#define TRIBUF_PORT (IOBASE+0x37)
+#define VPP_PORT (IOBASE+0x28)
+
+static struct mtd_info *mymtd;
+
+
+/* Is this really the vpp port? */
+static DEFINE_SPINLOCK(l440gx_vpp_lock);
+static int l440gx_vpp_refcnt;
+static void l440gx_set_vpp(struct map_info *map, int vpp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&l440gx_vpp_lock, flags);
+ if (vpp) {
+ if (++l440gx_vpp_refcnt == 1) /* first nested 'on' */
+ outl(inl(VPP_PORT) | 1, VPP_PORT);
+ } else {
+ if (--l440gx_vpp_refcnt == 0) /* last nested 'off' */
+ outl(inl(VPP_PORT) & ~1, VPP_PORT);
+ }
+ spin_unlock_irqrestore(&l440gx_vpp_lock, flags);
+}
+
+static struct map_info l440gx_map = {
+ .name = "L440GX BIOS",
+ .size = WINDOW_SIZE,
+ .bankwidth = BUSWIDTH,
+ .phys = WINDOW_ADDR,
+#if 0
+ /* FIXME verify that this is the
+ * appripriate code for vpp enable/disable
+ */
+ .set_vpp = l440gx_set_vpp
+#endif
+};
+
+static int __init init_l440gx(void)
+{
+ struct pci_dev *dev, *pm_dev;
+ struct resource *pm_iobase;
+ __u16 word;
+
+ dev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82371AB_0, NULL);
+
+ pm_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82371AB_3, NULL);
+
+ pci_dev_put(dev);
+
+ if (!dev || !pm_dev) {
+ printk(KERN_NOTICE "L440GX flash mapping: failed to find PIIX4 ISA bridge, cannot continue\n");
+ pci_dev_put(pm_dev);
+ return -ENODEV;
+ }
+
+ l440gx_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
+
+ if (!l440gx_map.virt) {
+ printk(KERN_WARNING "Failed to ioremap L440GX flash region\n");
+ pci_dev_put(pm_dev);
+ return -ENOMEM;
+ }
+ simple_map_init(&l440gx_map);
+ pr_debug("window_addr = %p\n", l440gx_map.virt);
+
+ /* Setup the pm iobase resource
+ * This code should move into some kind of generic bridge
+ * driver but for the moment I'm content with getting the
+ * allocation correct.
+ */
+ pm_iobase = &pm_dev->resource[PIIXE_IOBASE_RESOURCE];
+ if (!(pm_iobase->flags & IORESOURCE_IO)) {
+ pm_iobase->name = "pm iobase";
+ pm_iobase->start = 0;
+ pm_iobase->end = 63;
+ pm_iobase->flags = IORESOURCE_IO;
+
+ /* Put the current value in the resource */
+ pci_read_config_dword(pm_dev, 0x40, &iobase);
+ iobase &= ~1;
+ pm_iobase->start += iobase & ~1;
+ pm_iobase->end += iobase & ~1;
+
+ pci_dev_put(pm_dev);
+
+ /* Allocate the resource region */
+ if (pci_assign_resource(pm_dev, PIIXE_IOBASE_RESOURCE) != 0) {
+ pci_dev_put(dev);
+ pci_dev_put(pm_dev);
+ printk(KERN_WARNING "Could not allocate pm iobase resource\n");
+ iounmap(l440gx_map.virt);
+ return -ENXIO;
+ }
+ }
+ /* Set the iobase */
+ iobase = pm_iobase->start;
+ pci_write_config_dword(pm_dev, 0x40, iobase | 1);
+
+
+ /* Set XBCS# */
+ pci_read_config_word(dev, 0x4e, &word);
+ word |= 0x4;
+ pci_write_config_word(dev, 0x4e, word);
+
+ /* Supply write voltage to the chip */
+ l440gx_set_vpp(&l440gx_map, 1);
+
+ /* Enable the gate on the WE line */
+ outb(inb(TRIBUF_PORT) & ~1, TRIBUF_PORT);
+
+ printk(KERN_NOTICE "Enabled WE line to L440GX BIOS flash chip.\n");
+
+ mymtd = do_map_probe("jedec_probe", &l440gx_map);
+ if (!mymtd) {
+ printk(KERN_NOTICE "JEDEC probe on BIOS chip failed. Using ROM\n");
+ mymtd = do_map_probe("map_rom", &l440gx_map);
+ }
+ if (mymtd) {
+ mymtd->owner = THIS_MODULE;
+
+ mtd_device_register(mymtd, NULL, 0);
+ return 0;
+ }
+
+ iounmap(l440gx_map.virt);
+ return -ENXIO;
+}
+
+static void __exit cleanup_l440gx(void)
+{
+ mtd_device_unregister(mymtd);
+ map_destroy(mymtd);
+
+ iounmap(l440gx_map.virt);
+}
+
+module_init(init_l440gx);
+module_exit(cleanup_l440gx);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("MTD map driver for BIOS chips on Intel L440GX motherboards");
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
new file mode 100644
index 0000000000..a1da1c8973
--- /dev/null
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2004 Liu Peng Infineon IFAP DC COM CPE
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/cfi.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/physmap.h>
+#include <linux/of.h>
+
+#include <lantiq_soc.h>
+
+/*
+ * The NOR flash is connected to the same external bus unit (EBU) as PCI.
+ * To make PCI work we need to enable the endianness swapping for the address
+ * written to the EBU. This endianness swapping works for PCI correctly but
+ * fails for attached NOR devices. To workaround this we need to use a complex
+ * map. The workaround involves swapping all addresses whilst probing the chip.
+ * Once probing is complete we stop swapping the addresses but swizzle the
+ * unlock addresses to ensure that access to the NOR device works correctly.
+ */
+
+enum {
+ LTQ_NOR_PROBING,
+ LTQ_NOR_NORMAL
+};
+
+struct ltq_mtd {
+ struct resource *res;
+ struct mtd_info *mtd;
+ struct map_info *map;
+};
+
+static const char ltq_map_name[] = "ltq_nor";
+
+static map_word
+ltq_read16(struct map_info *map, unsigned long adr)
+{
+ unsigned long flags;
+ map_word temp;
+
+ if (map->map_priv_1 == LTQ_NOR_PROBING)
+ adr ^= 2;
+ spin_lock_irqsave(&ebu_lock, flags);
+ temp.x[0] = *(u16 *)(map->virt + adr);
+ spin_unlock_irqrestore(&ebu_lock, flags);
+ return temp;
+}
+
+static void
+ltq_write16(struct map_info *map, map_word d, unsigned long adr)
+{
+ unsigned long flags;
+
+ if (map->map_priv_1 == LTQ_NOR_PROBING)
+ adr ^= 2;
+ spin_lock_irqsave(&ebu_lock, flags);
+ *(u16 *)(map->virt + adr) = d.x[0];
+ spin_unlock_irqrestore(&ebu_lock, flags);
+}
+
+/*
+ * The following 2 functions copy data between iomem and a cached memory
+ * section. As memcpy() makes use of pre-fetching we cannot use it here.
+ * The normal alternative of using memcpy_{to,from}io also makes use of
+ * memcpy() on MIPS so it is not applicable either. We are therefore stuck
+ * with having to use our own loop.
+ */
+static void
+ltq_copy_from(struct map_info *map, void *to,
+ unsigned long from, ssize_t len)
+{
+ unsigned char *f = (unsigned char *)map->virt + from;
+ unsigned char *t = (unsigned char *)to;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ebu_lock, flags);
+ while (len--)
+ *t++ = *f++;
+ spin_unlock_irqrestore(&ebu_lock, flags);
+}
+
+static void
+ltq_copy_to(struct map_info *map, unsigned long to,
+ const void *from, ssize_t len)
+{
+ unsigned char *f = (unsigned char *)from;
+ unsigned char *t = (unsigned char *)map->virt + to;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ebu_lock, flags);
+ while (len--)
+ *t++ = *f++;
+ spin_unlock_irqrestore(&ebu_lock, flags);
+}
+
+static int
+ltq_mtd_probe(struct platform_device *pdev)
+{
+ struct ltq_mtd *ltq_mtd;
+ struct cfi_private *cfi;
+ int err;
+
+ ltq_mtd = devm_kzalloc(&pdev->dev, sizeof(struct ltq_mtd), GFP_KERNEL);
+ if (!ltq_mtd)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ltq_mtd);
+
+ ltq_mtd->map->virt = devm_platform_get_and_ioremap_resource(pdev, 0, &ltq_mtd->res);
+ if (IS_ERR(ltq_mtd->map->virt))
+ return PTR_ERR(ltq_mtd->map->virt);
+
+ ltq_mtd->map = devm_kzalloc(&pdev->dev, sizeof(struct map_info),
+ GFP_KERNEL);
+ if (!ltq_mtd->map)
+ return -ENOMEM;
+
+ ltq_mtd->map->phys = ltq_mtd->res->start;
+ ltq_mtd->map->size = resource_size(ltq_mtd->res);
+
+ ltq_mtd->map->name = ltq_map_name;
+ ltq_mtd->map->bankwidth = 2;
+ ltq_mtd->map->read = ltq_read16;
+ ltq_mtd->map->write = ltq_write16;
+ ltq_mtd->map->copy_from = ltq_copy_from;
+ ltq_mtd->map->copy_to = ltq_copy_to;
+
+ ltq_mtd->map->map_priv_1 = LTQ_NOR_PROBING;
+ ltq_mtd->mtd = do_map_probe("cfi_probe", ltq_mtd->map);
+ ltq_mtd->map->map_priv_1 = LTQ_NOR_NORMAL;
+
+ if (!ltq_mtd->mtd) {
+ dev_err(&pdev->dev, "probing failed\n");
+ return -ENXIO;
+ }
+
+ ltq_mtd->mtd->dev.parent = &pdev->dev;
+ mtd_set_of_node(ltq_mtd->mtd, pdev->dev.of_node);
+
+ cfi = ltq_mtd->map->fldrv_priv;
+ cfi->addr_unlock1 ^= 1;
+ cfi->addr_unlock2 ^= 1;
+
+ err = mtd_device_register(ltq_mtd->mtd, NULL, 0);
+ if (err) {
+ dev_err(&pdev->dev, "failed to add partitions\n");
+ goto err_destroy;
+ }
+
+ return 0;
+
+err_destroy:
+ map_destroy(ltq_mtd->mtd);
+ return err;
+}
+
+static int
+ltq_mtd_remove(struct platform_device *pdev)
+{
+ struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev);
+
+ if (ltq_mtd && ltq_mtd->mtd) {
+ mtd_device_unregister(ltq_mtd->mtd);
+ map_destroy(ltq_mtd->mtd);
+ }
+ return 0;
+}
+
+static const struct of_device_id ltq_mtd_match[] = {
+ { .compatible = "lantiq,nor" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltq_mtd_match);
+
+static struct platform_driver ltq_mtd_driver = {
+ .probe = ltq_mtd_probe,
+ .remove = ltq_mtd_remove,
+ .driver = {
+ .name = "ltq-nor",
+ .of_match_table = ltq_mtd_match,
+ },
+};
+
+module_platform_driver(ltq_mtd_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("John Crispin <john@phrozen.org>");
+MODULE_DESCRIPTION("Lantiq SoC NOR");
diff --git a/drivers/mtd/maps/map_funcs.c b/drivers/mtd/maps/map_funcs.c
new file mode 100644
index 0000000000..5b684c170d
--- /dev/null
+++ b/drivers/mtd/maps/map_funcs.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Out-of-line map I/O functions for simple maps when CONFIG_COMPLEX_MAPPINGS
+ * is enabled.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/mtd/map.h>
+#include <linux/mtd/xip.h>
+
+static map_word __xipram simple_map_read(struct map_info *map, unsigned long ofs)
+{
+ return inline_map_read(map, ofs);
+}
+
+static void __xipram simple_map_write(struct map_info *map, const map_word datum, unsigned long ofs)
+{
+ inline_map_write(map, datum, ofs);
+}
+
+static void __xipram simple_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ inline_map_copy_from(map, to, from, len);
+}
+
+static void __xipram simple_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ inline_map_copy_to(map, to, from, len);
+}
+
+void simple_map_init(struct map_info *map)
+{
+ BUG_ON(!map_bankwidth_supported(map->bankwidth));
+
+ map->read = simple_map_read;
+ map->write = simple_map_write;
+ map->copy_from = simple_map_copy_from;
+ map->copy_to = simple_map_copy_to;
+}
+
+EXPORT_SYMBOL(simple_map_init);
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/netsc520.c b/drivers/mtd/maps/netsc520.c
new file mode 100644
index 0000000000..0bb651624f
--- /dev/null
+++ b/drivers/mtd/maps/netsc520.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* netsc520.c -- MTD map driver for AMD NetSc520 Demonstration Board
+ *
+ * Copyright (C) 2001 Mark Langsdorf (mark.langsdorf@amd.com)
+ * based on sc520cdp.c by Sysgo Real-Time Solutions GmbH
+ *
+ * The NetSc520 is a demonstration board for the Elan Sc520 processor available
+ * from AMD. It has a single back of 16 megs of 32-bit Flash ROM and another
+ * 16 megs of SDRAM.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+
+/*
+** The single, 16 megabyte flash bank is divided into four virtual
+** partitions. The first partition is 768 KiB and is intended to
+** store the kernel image loaded by the bootstrap loader. The second
+** partition is 256 KiB and holds the BIOS image. The third
+** partition is 14.5 MiB and is intended for the flash file system
+** image. The last partition is 512 KiB and contains another copy
+** of the BIOS image and the reset vector.
+**
+** Only the third partition should be mounted. The first partition
+** should not be mounted, but it can erased and written to using the
+** MTD character routines. The second and fourth partitions should
+** not be touched - it is possible to corrupt the BIOS image by
+** mounting these partitions, and potentially the board will not be
+** recoverable afterwards.
+*/
+
+/* partition_info gives details on the logical partitions that the split the
+ * single flash device into. If the size if zero we use up to the end of the
+ * device. */
+static const struct mtd_partition partition_info[] = {
+ {
+ .name = "NetSc520 boot kernel",
+ .offset = 0,
+ .size = 0xc0000
+ },
+ {
+ .name = "NetSc520 Low BIOS",
+ .offset = 0xc0000,
+ .size = 0x40000
+ },
+ {
+ .name = "NetSc520 file system",
+ .offset = 0x100000,
+ .size = 0xe80000
+ },
+ {
+ .name = "NetSc520 High BIOS",
+ .offset = 0xf80000,
+ .size = 0x80000
+ },
+};
+#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
+
+#define WINDOW_SIZE 0x00100000
+#define WINDOW_ADDR 0x00200000
+
+static struct map_info netsc520_map = {
+ .name = "netsc520 Flash Bank",
+ .size = WINDOW_SIZE,
+ .bankwidth = 4,
+ .phys = WINDOW_ADDR,
+};
+
+#define NUM_FLASH_BANKS ARRAY_SIZE(netsc520_map)
+
+static struct mtd_info *mymtd;
+
+static int __init init_netsc520(void)
+{
+ printk(KERN_NOTICE "NetSc520 flash device: 0x%Lx at 0x%Lx\n",
+ (unsigned long long)netsc520_map.size,
+ (unsigned long long)netsc520_map.phys);
+ netsc520_map.virt = ioremap(netsc520_map.phys, netsc520_map.size);
+
+ if (!netsc520_map.virt) {
+ printk("Failed to ioremap\n");
+ return -EIO;
+ }
+
+ simple_map_init(&netsc520_map);
+
+ mymtd = do_map_probe("cfi_probe", &netsc520_map);
+ if(!mymtd)
+ mymtd = do_map_probe("map_ram", &netsc520_map);
+ if(!mymtd)
+ mymtd = do_map_probe("map_rom", &netsc520_map);
+
+ if (!mymtd) {
+ iounmap(netsc520_map.virt);
+ return -ENXIO;
+ }
+
+ mymtd->owner = THIS_MODULE;
+ mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
+ return 0;
+}
+
+static void __exit cleanup_netsc520(void)
+{
+ if (mymtd) {
+ mtd_device_unregister(mymtd);
+ map_destroy(mymtd);
+ }
+ if (netsc520_map.virt) {
+ iounmap(netsc520_map.virt);
+ netsc520_map.virt = NULL;
+ }
+}
+
+module_init(init_netsc520);
+module_exit(cleanup_netsc520);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@amd.com>");
+MODULE_DESCRIPTION("MTD map driver for AMD NetSc520 Demonstration Board");
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
new file mode 100644
index 0000000000..7d349874ff
--- /dev/null
+++ b/drivers/mtd/maps/nettel.c
@@ -0,0 +1,462 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************/
+
+/*
+ * nettel.c -- mappings for NETtel/SecureEdge/SnapGear (x86) boards.
+ *
+ * (C) Copyright 2000-2001, Greg Ungerer (gerg@snapgear.com)
+ * (C) Copyright 2001-2002, SnapGear (www.snapgear.com)
+ */
+
+/****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/cfi.h>
+#include <linux/reboot.h>
+#include <linux/err.h>
+#include <linux/kdev_t.h>
+#include <linux/root_dev.h>
+#include <asm/io.h>
+
+/****************************************************************************/
+
+#define INTEL_BUSWIDTH 1
+#define AMD_WINDOW_MAXSIZE 0x00200000
+#define AMD_BUSWIDTH 1
+
+/*
+ * PAR masks and shifts, assuming 64K pages.
+ */
+#define SC520_PAR_ADDR_MASK 0x00003fff
+#define SC520_PAR_ADDR_SHIFT 16
+#define SC520_PAR_TO_ADDR(par) \
+ (((par)&SC520_PAR_ADDR_MASK) << SC520_PAR_ADDR_SHIFT)
+
+#define SC520_PAR_SIZE_MASK 0x01ffc000
+#define SC520_PAR_SIZE_SHIFT 2
+#define SC520_PAR_TO_SIZE(par) \
+ ((((par)&SC520_PAR_SIZE_MASK) << SC520_PAR_SIZE_SHIFT) + (64*1024))
+
+#define SC520_PAR(cs, addr, size) \
+ ((cs) | \
+ ((((size)-(64*1024)) >> SC520_PAR_SIZE_SHIFT) & SC520_PAR_SIZE_MASK) | \
+ (((addr) >> SC520_PAR_ADDR_SHIFT) & SC520_PAR_ADDR_MASK))
+
+#define SC520_PAR_BOOTCS 0x8a000000
+#define SC520_PAR_ROMCS1 0xaa000000
+#define SC520_PAR_ROMCS2 0xca000000 /* Cache disabled, 64K page */
+
+static void *nettel_mmcrp = NULL;
+
+#ifdef CONFIG_MTD_CFI_INTELEXT
+static struct mtd_info *intel_mtd;
+#endif
+static struct mtd_info *amd_mtd;
+
+/****************************************************************************/
+
+/****************************************************************************/
+
+#ifdef CONFIG_MTD_CFI_INTELEXT
+static struct map_info nettel_intel_map = {
+ .name = "SnapGear Intel",
+ .size = 0,
+ .bankwidth = INTEL_BUSWIDTH,
+};
+
+static struct mtd_partition nettel_intel_partitions[] = {
+ {
+ .name = "SnapGear kernel",
+ .offset = 0,
+ .size = 0x000e0000
+ },
+ {
+ .name = "SnapGear filesystem",
+ .offset = 0x00100000,
+ },
+ {
+ .name = "SnapGear config",
+ .offset = 0x000e0000,
+ .size = 0x00020000
+ },
+ {
+ .name = "SnapGear Intel",
+ .offset = 0
+ },
+ {
+ .name = "SnapGear BIOS Config",
+ .offset = 0x007e0000,
+ .size = 0x00020000
+ },
+ {
+ .name = "SnapGear BIOS",
+ .offset = 0x007e0000,
+ .size = 0x00020000
+ },
+};
+#endif
+
+static struct map_info nettel_amd_map = {
+ .name = "SnapGear AMD",
+ .size = AMD_WINDOW_MAXSIZE,
+ .bankwidth = AMD_BUSWIDTH,
+};
+
+static const struct mtd_partition nettel_amd_partitions[] = {
+ {
+ .name = "SnapGear BIOS config",
+ .offset = 0x000e0000,
+ .size = 0x00010000
+ },
+ {
+ .name = "SnapGear BIOS",
+ .offset = 0x000f0000,
+ .size = 0x00010000
+ },
+ {
+ .name = "SnapGear AMD",
+ .offset = 0
+ },
+ {
+ .name = "SnapGear high BIOS",
+ .offset = 0x001f0000,
+ .size = 0x00010000
+ }
+};
+
+#define NUM_AMD_PARTITIONS ARRAY_SIZE(nettel_amd_partitions)
+
+/****************************************************************************/
+
+#ifdef CONFIG_MTD_CFI_INTELEXT
+
+/*
+ * Set the Intel flash back to read mode since some old boot
+ * loaders don't.
+ */
+static int nettel_reboot_notifier(struct notifier_block *nb, unsigned long val, void *v)
+{
+ struct cfi_private *cfi = nettel_intel_map.fldrv_priv;
+ unsigned long b;
+
+ /* Make sure all FLASH chips are put back into read mode */
+ for (b = 0; (b < nettel_intel_partitions[3].size); b += 0x100000) {
+ cfi_send_gen_cmd(0xff, 0x55, b, &nettel_intel_map, cfi,
+ cfi->device_type, NULL);
+ }
+ return(NOTIFY_OK);
+}
+
+static struct notifier_block nettel_notifier_block = {
+ nettel_reboot_notifier, NULL, 0
+};
+
+#endif
+
+/****************************************************************************/
+
+static int __init nettel_init(void)
+{
+ volatile unsigned long *amdpar;
+ unsigned long amdaddr, maxsize;
+ int num_amd_partitions=0;
+#ifdef CONFIG_MTD_CFI_INTELEXT
+ volatile unsigned long *intel0par, *intel1par;
+ unsigned long orig_bootcspar, orig_romcs1par;
+ unsigned long intel0addr, intel0size;
+ unsigned long intel1addr, intel1size;
+ int intelboot, intel0cs, intel1cs;
+ int num_intel_partitions;
+#endif
+ int rc = 0;
+
+ nettel_mmcrp = (void *) ioremap(0xfffef000, 4096);
+ if (nettel_mmcrp == NULL) {
+ printk("SNAPGEAR: failed to disable MMCR cache??\n");
+ return(-EIO);
+ }
+
+ /* Set CPU clock to be 33.000MHz */
+ *((unsigned char *) (nettel_mmcrp + 0xc64)) = 0x01;
+
+ amdpar = (volatile unsigned long *) (nettel_mmcrp + 0xc4);
+
+#ifdef CONFIG_MTD_CFI_INTELEXT
+ intelboot = 0;
+ intel0cs = SC520_PAR_ROMCS1;
+ intel0par = (volatile unsigned long *) (nettel_mmcrp + 0xc0);
+ intel1cs = SC520_PAR_ROMCS2;
+ intel1par = (volatile unsigned long *) (nettel_mmcrp + 0xbc);
+
+ /*
+ * Save the CS settings then ensure ROMCS1 and ROMCS2 are off,
+ * otherwise they might clash with where we try to map BOOTCS.
+ */
+ orig_bootcspar = *amdpar;
+ orig_romcs1par = *intel0par;
+ *intel0par = 0;
+ *intel1par = 0;
+#endif
+
+ /*
+ * The first thing to do is determine if we have a separate
+ * boot FLASH device. Typically this is a small (1 to 2MB)
+ * AMD FLASH part. It seems that device size is about the
+ * only way to tell if this is the case...
+ */
+ amdaddr = 0x20000000;
+ maxsize = AMD_WINDOW_MAXSIZE;
+
+ *amdpar = SC520_PAR(SC520_PAR_BOOTCS, amdaddr, maxsize);
+ __asm__ ("wbinvd");
+
+ nettel_amd_map.phys = amdaddr;
+ nettel_amd_map.virt = ioremap(amdaddr, maxsize);
+ if (!nettel_amd_map.virt) {
+ printk("SNAPGEAR: failed to ioremap() BOOTCS\n");
+ iounmap(nettel_mmcrp);
+ return(-EIO);
+ }
+ simple_map_init(&nettel_amd_map);
+
+ if ((amd_mtd = do_map_probe("jedec_probe", &nettel_amd_map))) {
+ printk(KERN_NOTICE "SNAPGEAR: AMD flash device size = %dK\n",
+ (int)(amd_mtd->size>>10));
+
+ amd_mtd->owner = THIS_MODULE;
+
+ /* The high BIOS partition is only present for 2MB units */
+ num_amd_partitions = NUM_AMD_PARTITIONS;
+ if (amd_mtd->size < AMD_WINDOW_MAXSIZE)
+ num_amd_partitions--;
+ /* Don't add the partition until after the primary INTEL's */
+
+#ifdef CONFIG_MTD_CFI_INTELEXT
+ /*
+ * Map the Intel flash into memory after the AMD
+ * It has to start on a multiple of maxsize.
+ */
+ maxsize = SC520_PAR_TO_SIZE(orig_romcs1par);
+ if (maxsize < (32 * 1024 * 1024))
+ maxsize = (32 * 1024 * 1024);
+ intel0addr = amdaddr + maxsize;
+#endif
+ } else {
+#ifdef CONFIG_MTD_CFI_INTELEXT
+ /* INTEL boot FLASH */
+ intelboot++;
+
+ if (!orig_romcs1par) {
+ intel0cs = SC520_PAR_BOOTCS;
+ intel0par = (volatile unsigned long *)
+ (nettel_mmcrp + 0xc4);
+ intel1cs = SC520_PAR_ROMCS1;
+ intel1par = (volatile unsigned long *)
+ (nettel_mmcrp + 0xc0);
+
+ intel0addr = SC520_PAR_TO_ADDR(orig_bootcspar);
+ maxsize = SC520_PAR_TO_SIZE(orig_bootcspar);
+ } else {
+ /* Kernel base is on ROMCS1, not BOOTCS */
+ intel0cs = SC520_PAR_ROMCS1;
+ intel0par = (volatile unsigned long *)
+ (nettel_mmcrp + 0xc0);
+ intel1cs = SC520_PAR_BOOTCS;
+ intel1par = (volatile unsigned long *)
+ (nettel_mmcrp + 0xc4);
+
+ intel0addr = SC520_PAR_TO_ADDR(orig_romcs1par);
+ maxsize = SC520_PAR_TO_SIZE(orig_romcs1par);
+ }
+
+ /* Destroy useless AMD MTD mapping */
+ amd_mtd = NULL;
+ iounmap(nettel_amd_map.virt);
+ nettel_amd_map.virt = NULL;
+#else
+ /* Only AMD flash supported */
+ rc = -ENXIO;
+ goto out_unmap2;
+#endif
+ }
+
+#ifdef CONFIG_MTD_CFI_INTELEXT
+ /*
+ * We have determined the INTEL FLASH configuration, so lets
+ * go ahead and probe for them now.
+ */
+
+ /* Set PAR to the maximum size */
+ if (maxsize < (32 * 1024 * 1024))
+ maxsize = (32 * 1024 * 1024);
+ *intel0par = SC520_PAR(intel0cs, intel0addr, maxsize);
+
+ /* Turn other PAR off so the first probe doesn't find it */
+ *intel1par = 0;
+
+ /* Probe for the size of the first Intel flash */
+ nettel_intel_map.size = maxsize;
+ nettel_intel_map.phys = intel0addr;
+ nettel_intel_map.virt = ioremap(intel0addr, maxsize);
+ if (!nettel_intel_map.virt) {
+ printk("SNAPGEAR: failed to ioremap() ROMCS1\n");
+ rc = -EIO;
+ goto out_unmap2;
+ }
+ simple_map_init(&nettel_intel_map);
+
+ intel_mtd = do_map_probe("cfi_probe", &nettel_intel_map);
+ if (!intel_mtd) {
+ rc = -ENXIO;
+ goto out_unmap1;
+ }
+
+ /* Set PAR to the detected size */
+ intel0size = intel_mtd->size;
+ *intel0par = SC520_PAR(intel0cs, intel0addr, intel0size);
+
+ /*
+ * Map second Intel FLASH right after first. Set its size to the
+ * same maxsize used for the first Intel FLASH.
+ */
+ intel1addr = intel0addr + intel0size;
+ *intel1par = SC520_PAR(intel1cs, intel1addr, maxsize);
+ __asm__ ("wbinvd");
+
+ maxsize += intel0size;
+
+ /* Delete the old map and probe again to do both chips */
+ map_destroy(intel_mtd);
+ intel_mtd = NULL;
+ iounmap(nettel_intel_map.virt);
+
+ nettel_intel_map.size = maxsize;
+ nettel_intel_map.virt = ioremap(intel0addr, maxsize);
+ if (!nettel_intel_map.virt) {
+ printk("SNAPGEAR: failed to ioremap() ROMCS1/2\n");
+ rc = -EIO;
+ goto out_unmap2;
+ }
+
+ intel_mtd = do_map_probe("cfi_probe", &nettel_intel_map);
+ if (! intel_mtd) {
+ rc = -ENXIO;
+ goto out_unmap1;
+ }
+
+ intel1size = intel_mtd->size - intel0size;
+ if (intel1size > 0) {
+ *intel1par = SC520_PAR(intel1cs, intel1addr, intel1size);
+ __asm__ ("wbinvd");
+ } else {
+ *intel1par = 0;
+ }
+
+ printk(KERN_NOTICE "SNAPGEAR: Intel flash device size = %lldKiB\n",
+ (unsigned long long)(intel_mtd->size >> 10));
+
+ intel_mtd->owner = THIS_MODULE;
+
+ num_intel_partitions = ARRAY_SIZE(nettel_intel_partitions);
+
+ if (intelboot) {
+ /*
+ * Adjust offset and size of last boot partition.
+ * Must allow for BIOS region at end of FLASH.
+ */
+ nettel_intel_partitions[1].size = (intel0size + intel1size) -
+ (1024*1024 + intel_mtd->erasesize);
+ nettel_intel_partitions[3].size = intel0size + intel1size;
+ nettel_intel_partitions[4].offset =
+ (intel0size + intel1size) - intel_mtd->erasesize;
+ nettel_intel_partitions[4].size = intel_mtd->erasesize;
+ nettel_intel_partitions[5].offset =
+ nettel_intel_partitions[4].offset;
+ nettel_intel_partitions[5].size =
+ nettel_intel_partitions[4].size;
+ } else {
+ /* No BIOS regions when AMD boot */
+ num_intel_partitions -= 2;
+ }
+ rc = mtd_device_register(intel_mtd, nettel_intel_partitions,
+ num_intel_partitions);
+ if (rc)
+ goto out_map_destroy;
+#endif
+
+ if (amd_mtd) {
+ rc = mtd_device_register(amd_mtd, nettel_amd_partitions,
+ num_amd_partitions);
+ if (rc)
+ goto out_mtd_unreg;
+ }
+
+#ifdef CONFIG_MTD_CFI_INTELEXT
+ register_reboot_notifier(&nettel_notifier_block);
+#endif
+
+ return rc;
+
+out_mtd_unreg:
+#ifdef CONFIG_MTD_CFI_INTELEXT
+ mtd_device_unregister(intel_mtd);
+out_map_destroy:
+ map_destroy(intel_mtd);
+out_unmap1:
+ iounmap(nettel_intel_map.virt);
+#endif
+
+out_unmap2:
+ iounmap(nettel_mmcrp);
+ iounmap(nettel_amd_map.virt);
+
+ return rc;
+}
+
+/****************************************************************************/
+
+static void __exit nettel_cleanup(void)
+{
+#ifdef CONFIG_MTD_CFI_INTELEXT
+ unregister_reboot_notifier(&nettel_notifier_block);
+#endif
+ if (amd_mtd) {
+ mtd_device_unregister(amd_mtd);
+ map_destroy(amd_mtd);
+ }
+ if (nettel_mmcrp) {
+ iounmap(nettel_mmcrp);
+ nettel_mmcrp = NULL;
+ }
+ if (nettel_amd_map.virt) {
+ iounmap(nettel_amd_map.virt);
+ nettel_amd_map.virt = NULL;
+ }
+#ifdef CONFIG_MTD_CFI_INTELEXT
+ if (intel_mtd) {
+ mtd_device_unregister(intel_mtd);
+ map_destroy(intel_mtd);
+ }
+ if (nettel_intel_map.virt) {
+ iounmap(nettel_intel_map.virt);
+ nettel_intel_map.virt = NULL;
+ }
+#endif
+}
+
+/****************************************************************************/
+
+module_init(nettel_init);
+module_exit(nettel_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>");
+MODULE_DESCRIPTION("SnapGear/SecureEdge FLASH support");
+
+/****************************************************************************/
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
new file mode 100644
index 0000000000..ca00d211e7
--- /dev/null
+++ b/drivers/mtd/maps/pci.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/drivers/mtd/maps/pci.c
+ *
+ * Copyright (C) 2001 Russell King, All rights reserved.
+ *
+ * Generic PCI memory map driver. We support the following boards:
+ * - Intel IQ80310 ATU.
+ * - Intel EBSA285 (blank rom programming mode). Tested working 27/09/2001
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+struct map_pci_info;
+
+struct mtd_pci_info {
+ int (*init)(struct pci_dev *dev, struct map_pci_info *map);
+ void (*exit)(struct pci_dev *dev, struct map_pci_info *map);
+ unsigned long (*translate)(struct map_pci_info *map, unsigned long ofs);
+ const char *map_name;
+};
+
+struct map_pci_info {
+ struct map_info map;
+ void __iomem *base;
+ void (*exit)(struct pci_dev *dev, struct map_pci_info *map);
+ unsigned long (*translate)(struct map_pci_info *map, unsigned long ofs);
+ struct pci_dev *dev;
+};
+
+static map_word mtd_pci_read8(struct map_info *_map, unsigned long ofs)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+ map_word val;
+ val.x[0]= readb(map->base + map->translate(map, ofs));
+ return val;
+}
+
+static map_word mtd_pci_read32(struct map_info *_map, unsigned long ofs)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+ map_word val;
+ val.x[0] = readl(map->base + map->translate(map, ofs));
+ return val;
+}
+
+static void mtd_pci_copyfrom(struct map_info *_map, void *to, unsigned long from, ssize_t len)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+ memcpy_fromio(to, map->base + map->translate(map, from), len);
+}
+
+static void mtd_pci_write8(struct map_info *_map, map_word val, unsigned long ofs)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+ writeb(val.x[0], map->base + map->translate(map, ofs));
+}
+
+static void mtd_pci_write32(struct map_info *_map, map_word val, unsigned long ofs)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+ writel(val.x[0], map->base + map->translate(map, ofs));
+}
+
+static void mtd_pci_copyto(struct map_info *_map, unsigned long to, const void *from, ssize_t len)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+ memcpy_toio(map->base + map->translate(map, to), from, len);
+}
+
+static const struct map_info mtd_pci_map = {
+ .phys = NO_XIP,
+ .copy_from = mtd_pci_copyfrom,
+ .copy_to = mtd_pci_copyto,
+};
+
+/*
+ * Intel IOP80310 Flash driver
+ */
+
+static int
+intel_iq80310_init(struct pci_dev *dev, struct map_pci_info *map)
+{
+ u32 win_base;
+
+ map->map.bankwidth = 1;
+ map->map.read = mtd_pci_read8;
+ map->map.write = mtd_pci_write8;
+
+ map->map.size = 0x00800000;
+ map->base = ioremap(pci_resource_start(dev, 0),
+ pci_resource_len(dev, 0));
+
+ if (!map->base)
+ return -ENOMEM;
+
+ /*
+ * We want to base the memory window at Xscale
+ * bus address 0, not 0x1000.
+ */
+ pci_read_config_dword(dev, 0x44, &win_base);
+ pci_write_config_dword(dev, 0x44, 0);
+
+ map->map.map_priv_2 = win_base;
+
+ return 0;
+}
+
+static void
+intel_iq80310_exit(struct pci_dev *dev, struct map_pci_info *map)
+{
+ if (map->base)
+ iounmap(map->base);
+ pci_write_config_dword(dev, 0x44, map->map.map_priv_2);
+}
+
+static unsigned long
+intel_iq80310_translate(struct map_pci_info *map, unsigned long ofs)
+{
+ unsigned long page_addr = ofs & 0x00400000;
+
+ /*
+ * This mundges the flash location so we avoid
+ * the first 80 bytes (they appear to read nonsense).
+ */
+ if (page_addr) {
+ writel(0x00000008, map->base + 0x1558);
+ writel(0x00000000, map->base + 0x1550);
+ } else {
+ writel(0x00000007, map->base + 0x1558);
+ writel(0x00800000, map->base + 0x1550);
+ ofs += 0x00800000;
+ }
+
+ return ofs;
+}
+
+static struct mtd_pci_info intel_iq80310_info = {
+ .init = intel_iq80310_init,
+ .exit = intel_iq80310_exit,
+ .translate = intel_iq80310_translate,
+ .map_name = "cfi_probe",
+};
+
+/*
+ * Intel DC21285 driver
+ */
+
+static int
+intel_dc21285_init(struct pci_dev *dev, struct map_pci_info *map)
+{
+ unsigned long base, len;
+
+ base = pci_resource_start(dev, PCI_ROM_RESOURCE);
+ len = pci_resource_len(dev, PCI_ROM_RESOURCE);
+
+ if (!len || !base) {
+ /*
+ * No ROM resource
+ */
+ base = pci_resource_start(dev, 2);
+ len = pci_resource_len(dev, 2);
+
+ /*
+ * We need to re-allocate PCI BAR2 address range to the
+ * PCI ROM BAR, and disable PCI BAR2.
+ */
+ } else {
+ /*
+ * Hmm, if an address was allocated to the ROM resource, but
+ * not enabled, should we be allocating a new resource for it
+ * or simply enabling it?
+ */
+ pci_enable_rom(dev);
+ printk("%s: enabling expansion ROM\n", pci_name(dev));
+ }
+
+ if (!len || !base)
+ return -ENXIO;
+
+ map->map.bankwidth = 4;
+ map->map.read = mtd_pci_read32;
+ map->map.write = mtd_pci_write32;
+ map->map.size = len;
+ map->base = ioremap(base, len);
+
+ if (!map->base)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void
+intel_dc21285_exit(struct pci_dev *dev, struct map_pci_info *map)
+{
+ if (map->base)
+ iounmap(map->base);
+
+ /*
+ * We need to undo the PCI BAR2/PCI ROM BAR address alteration.
+ */
+ pci_disable_rom(dev);
+}
+
+static unsigned long
+intel_dc21285_translate(struct map_pci_info *map, unsigned long ofs)
+{
+ return ofs & 0x00ffffc0 ? ofs : (ofs ^ (1 << 5));
+}
+
+static struct mtd_pci_info intel_dc21285_info = {
+ .init = intel_dc21285_init,
+ .exit = intel_dc21285_exit,
+ .translate = intel_dc21285_translate,
+ .map_name = "jedec_probe",
+};
+
+/*
+ * PCI device ID table
+ */
+
+static const struct pci_device_id mtd_pci_ids[] = {
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = 0x530d,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = PCI_CLASS_MEMORY_OTHER << 8,
+ .class_mask = 0xffff00,
+ .driver_data = (unsigned long)&intel_iq80310_info,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_DEC,
+ .device = PCI_DEVICE_ID_DEC_21285,
+ .subvendor = 0, /* DC21285 defaults to 0 on reset */
+ .subdevice = 0, /* DC21285 defaults to 0 on reset */
+ .driver_data = (unsigned long)&intel_dc21285_info,
+ },
+ { 0, }
+};
+
+/*
+ * Generic code follows.
+ */
+
+static int mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ struct mtd_pci_info *info = (struct mtd_pci_info *)id->driver_data;
+ struct map_pci_info *map = NULL;
+ struct mtd_info *mtd = NULL;
+ int err;
+
+ err = pci_enable_device(dev);
+ if (err)
+ goto out;
+
+ err = pci_request_regions(dev, "pci mtd");
+ if (err)
+ goto out;
+
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!map)
+ goto release;
+
+ map->map = mtd_pci_map;
+ map->map.name = pci_name(dev);
+ map->dev = dev;
+ map->exit = info->exit;
+ map->translate = info->translate;
+
+ err = info->init(dev, map);
+ if (err)
+ goto release;
+
+ mtd = do_map_probe(info->map_name, &map->map);
+ err = -ENODEV;
+ if (!mtd)
+ goto release;
+
+ mtd->owner = THIS_MODULE;
+ mtd_device_register(mtd, NULL, 0);
+
+ pci_set_drvdata(dev, mtd);
+
+ return 0;
+
+release:
+ if (map) {
+ map->exit(dev, map);
+ kfree(map);
+ }
+
+ pci_release_regions(dev);
+out:
+ return err;
+}
+
+static void mtd_pci_remove(struct pci_dev *dev)
+{
+ struct mtd_info *mtd = pci_get_drvdata(dev);
+ struct map_pci_info *map = mtd->priv;
+
+ mtd_device_unregister(mtd);
+ map_destroy(mtd);
+ map->exit(dev, map);
+ kfree(map);
+
+ pci_release_regions(dev);
+}
+
+static struct pci_driver mtd_pci_driver = {
+ .name = "MTD PCI",
+ .probe = mtd_pci_probe,
+ .remove = mtd_pci_remove,
+ .id_table = mtd_pci_ids,
+};
+
+module_pci_driver(mtd_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
+MODULE_DESCRIPTION("Generic PCI map driver");
+MODULE_DEVICE_TABLE(pci, mtd_pci_ids);
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
new file mode 100644
index 0000000000..2ac79e1ced
--- /dev/null
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -0,0 +1,753 @@
+/*
+ * pcmciamtd.c - MTD driver for PCMCIA flash memory cards
+ *
+ * Author: Simon Evans <spse@secret.org.uk>
+ *
+ * Copyright (C) 2002 Simon Evans
+ *
+ * Licence: GPL
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <asm/io.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
+
+#define info(format, arg...) printk(KERN_INFO "pcmciamtd: " format "\n" , ## arg)
+
+#define DRIVER_DESC "PCMCIA Flash memory card driver"
+
+/* Size of the PCMCIA address space: 26 bits = 64 MB */
+#define MAX_PCMCIA_ADDR 0x4000000
+
+struct pcmciamtd_dev {
+ struct pcmcia_device *p_dev;
+ void __iomem *win_base; /* ioremapped address of PCMCIA window */
+ unsigned int win_size; /* size of window */
+ unsigned int offset; /* offset into card the window currently points at */
+ struct map_info pcmcia_map;
+ struct mtd_info *mtd_info;
+ int vpp;
+ char mtd_name[sizeof(struct cistpl_vers_1_t)];
+};
+
+
+/* Module parameters */
+
+/* 2 = do 16-bit transfers, 1 = do 8-bit transfers */
+static int bankwidth = 2;
+
+/* Speed of memory accesses, in ns */
+static int mem_speed;
+
+/* Force the size of an SRAM card */
+static int force_size;
+
+/* Force Vpp */
+static int vpp;
+
+/* Set Vpp */
+static int setvpp;
+
+/* Force card to be treated as FLASH, ROM or RAM */
+static int mem_type;
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Simon Evans <spse@secret.org.uk>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+module_param(bankwidth, int, 0);
+MODULE_PARM_DESC(bankwidth, "Set bankwidth (1=8 bit, 2=16 bit, default=2)");
+module_param(mem_speed, int, 0);
+MODULE_PARM_DESC(mem_speed, "Set memory access speed in ns");
+module_param(force_size, int, 0);
+MODULE_PARM_DESC(force_size, "Force size of card in MiB (1-64)");
+module_param(setvpp, int, 0);
+MODULE_PARM_DESC(setvpp, "Set Vpp (0=Never, 1=On writes, 2=Always on, default=0)");
+module_param(vpp, int, 0);
+MODULE_PARM_DESC(vpp, "Vpp value in 1/10ths eg 33=3.3V 120=12V (Dangerous)");
+module_param(mem_type, int, 0);
+MODULE_PARM_DESC(mem_type, "Set Memory type (0=Flash, 1=RAM, 2=ROM, default=0)");
+
+
+/* read/write{8,16} copy_{from,to} routines with window remapping
+ * to access whole card
+ */
+static void __iomem *remap_window(struct map_info *map, unsigned long to)
+{
+ struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
+ struct resource *win = (struct resource *) map->map_priv_2;
+ unsigned int offset;
+ int ret;
+
+ if (!pcmcia_dev_present(dev->p_dev)) {
+ pr_debug("device removed\n");
+ return NULL;
+ }
+
+ offset = to & ~(dev->win_size-1);
+ if (offset != dev->offset) {
+ pr_debug("Remapping window from 0x%8.8x to 0x%8.8x\n",
+ dev->offset, offset);
+ ret = pcmcia_map_mem_page(dev->p_dev, win, offset);
+ if (ret != 0)
+ return NULL;
+ dev->offset = offset;
+ }
+ return dev->win_base + (to & (dev->win_size-1));
+}
+
+
+static map_word pcmcia_read8_remap(struct map_info *map, unsigned long ofs)
+{
+ void __iomem *addr;
+ map_word d = {{0}};
+
+ addr = remap_window(map, ofs);
+ if(!addr)
+ return d;
+
+ d.x[0] = readb(addr);
+ pr_debug("ofs = 0x%08lx (%p) data = 0x%02lx\n", ofs, addr, d.x[0]);
+ return d;
+}
+
+
+static map_word pcmcia_read16_remap(struct map_info *map, unsigned long ofs)
+{
+ void __iomem *addr;
+ map_word d = {{0}};
+
+ addr = remap_window(map, ofs);
+ if(!addr)
+ return d;
+
+ d.x[0] = readw(addr);
+ pr_debug("ofs = 0x%08lx (%p) data = 0x%04lx\n", ofs, addr, d.x[0]);
+ return d;
+}
+
+
+static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
+ unsigned long win_size = dev->win_size;
+
+ pr_debug("to = %p from = %lu len = %zd\n", to, from, len);
+ while(len) {
+ int toread = win_size - (from & (win_size-1));
+ void __iomem *addr;
+
+ if(toread > len)
+ toread = len;
+
+ addr = remap_window(map, from);
+ if(!addr)
+ return;
+
+ pr_debug("memcpy from %p to %p len = %d\n", addr, to, toread);
+ memcpy_fromio(to, addr, toread);
+ len -= toread;
+ to += toread;
+ from += toread;
+ }
+}
+
+
+static void pcmcia_write8_remap(struct map_info *map, map_word d, unsigned long adr)
+{
+ void __iomem *addr = remap_window(map, adr);
+
+ if(!addr)
+ return;
+
+ pr_debug("adr = 0x%08lx (%p) data = 0x%02lx\n", adr, addr, d.x[0]);
+ writeb(d.x[0], addr);
+}
+
+
+static void pcmcia_write16_remap(struct map_info *map, map_word d, unsigned long adr)
+{
+ void __iomem *addr = remap_window(map, adr);
+ if(!addr)
+ return;
+
+ pr_debug("adr = 0x%08lx (%p) data = 0x%04lx\n", adr, addr, d.x[0]);
+ writew(d.x[0], addr);
+}
+
+
+static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
+ unsigned long win_size = dev->win_size;
+
+ pr_debug("to = %lu from = %p len = %zd\n", to, from, len);
+ while(len) {
+ int towrite = win_size - (to & (win_size-1));
+ void __iomem *addr;
+
+ if(towrite > len)
+ towrite = len;
+
+ addr = remap_window(map, to);
+ if(!addr)
+ return;
+
+ pr_debug("memcpy from %p to %p len = %d\n", from, addr, towrite);
+ memcpy_toio(addr, from, towrite);
+ len -= towrite;
+ to += towrite;
+ from += towrite;
+ }
+}
+
+
+/* read/write{8,16} copy_{from,to} routines with direct access */
+
+#define DEV_REMOVED(x) (!(pcmcia_dev_present(((struct pcmciamtd_dev *)map->map_priv_1)->p_dev)))
+
+static map_word pcmcia_read8(struct map_info *map, unsigned long ofs)
+{
+ void __iomem *win_base = (void __iomem *)map->map_priv_2;
+ map_word d = {{0}};
+
+ if(DEV_REMOVED(map))
+ return d;
+
+ d.x[0] = readb(win_base + ofs);
+ pr_debug("ofs = 0x%08lx (%p) data = 0x%02lx\n",
+ ofs, win_base + ofs, d.x[0]);
+ return d;
+}
+
+
+static map_word pcmcia_read16(struct map_info *map, unsigned long ofs)
+{
+ void __iomem *win_base = (void __iomem *)map->map_priv_2;
+ map_word d = {{0}};
+
+ if(DEV_REMOVED(map))
+ return d;
+
+ d.x[0] = readw(win_base + ofs);
+ pr_debug("ofs = 0x%08lx (%p) data = 0x%04lx\n",
+ ofs, win_base + ofs, d.x[0]);
+ return d;
+}
+
+
+static void pcmcia_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ void __iomem *win_base = (void __iomem *)map->map_priv_2;
+
+ if(DEV_REMOVED(map))
+ return;
+
+ pr_debug("to = %p from = %lu len = %zd\n", to, from, len);
+ memcpy_fromio(to, win_base + from, len);
+}
+
+
+static void pcmcia_write8(struct map_info *map, map_word d, unsigned long adr)
+{
+ void __iomem *win_base = (void __iomem *)map->map_priv_2;
+
+ if(DEV_REMOVED(map))
+ return;
+
+ pr_debug("adr = 0x%08lx (%p) data = 0x%02lx\n",
+ adr, win_base + adr, d.x[0]);
+ writeb(d.x[0], win_base + adr);
+}
+
+
+static void pcmcia_write16(struct map_info *map, map_word d, unsigned long adr)
+{
+ void __iomem *win_base = (void __iomem *)map->map_priv_2;
+
+ if(DEV_REMOVED(map))
+ return;
+
+ pr_debug("adr = 0x%08lx (%p) data = 0x%04lx\n",
+ adr, win_base + adr, d.x[0]);
+ writew(d.x[0], win_base + adr);
+}
+
+
+static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ void __iomem *win_base = (void __iomem *)map->map_priv_2;
+
+ if(DEV_REMOVED(map))
+ return;
+
+ pr_debug("to = %lu from = %p len = %zd\n", to, from, len);
+ memcpy_toio(win_base + to, from, len);
+}
+
+
+static DEFINE_MUTEX(pcmcia_vpp_lock);
+static int pcmcia_vpp_refcnt;
+static void pcmciamtd_set_vpp(struct map_info *map, int on)
+{
+ struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
+ struct pcmcia_device *link = dev->p_dev;
+
+ pr_debug("dev = %p on = %d vpp = %d\n\n", dev, on, dev->vpp);
+ mutex_lock(&pcmcia_vpp_lock);
+ if (on) {
+ if (++pcmcia_vpp_refcnt == 1) /* first nested 'on' */
+ pcmcia_fixup_vpp(link, dev->vpp);
+ } else {
+ if (--pcmcia_vpp_refcnt == 0) /* last nested 'off' */
+ pcmcia_fixup_vpp(link, 0);
+ }
+ mutex_unlock(&pcmcia_vpp_lock);
+}
+
+
+static void pcmciamtd_release(struct pcmcia_device *link)
+{
+ struct pcmciamtd_dev *dev = link->priv;
+
+ pr_debug("link = 0x%p\n", link);
+
+ if (link->resource[2]->end) {
+ if(dev->win_base) {
+ iounmap(dev->win_base);
+ dev->win_base = NULL;
+ }
+ }
+ pcmcia_disable_device(link);
+}
+
+
+static int pcmciamtd_cistpl_format(struct pcmcia_device *p_dev,
+ tuple_t *tuple,
+ void *priv_data)
+{
+ cisparse_t parse;
+
+ if (!pcmcia_parse_tuple(tuple, &parse)) {
+ cistpl_format_t *t = &parse.format;
+ (void)t; /* Shut up, gcc */
+ pr_debug("Format type: %u, Error Detection: %u, offset = %u, length =%u\n",
+ t->type, t->edc, t->offset, t->length);
+ }
+ return -ENOSPC;
+}
+
+static int pcmciamtd_cistpl_jedec(struct pcmcia_device *p_dev,
+ tuple_t *tuple,
+ void *priv_data)
+{
+ cisparse_t parse;
+ int i;
+
+ if (!pcmcia_parse_tuple(tuple, &parse)) {
+ cistpl_jedec_t *t = &parse.jedec;
+ for (i = 0; i < t->nid; i++)
+ pr_debug("JEDEC: 0x%02x 0x%02x\n",
+ t->id[i].mfr, t->id[i].info);
+ }
+ return -ENOSPC;
+}
+
+static int pcmciamtd_cistpl_device(struct pcmcia_device *p_dev,
+ tuple_t *tuple,
+ void *priv_data)
+{
+ struct pcmciamtd_dev *dev = priv_data;
+ cisparse_t parse;
+ cistpl_device_t *t = &parse.device;
+ int i;
+
+ if (pcmcia_parse_tuple(tuple, &parse))
+ return -EINVAL;
+
+ pr_debug("Common memory:\n");
+ dev->pcmcia_map.size = t->dev[0].size;
+ /* from here on: DEBUG only */
+ for (i = 0; i < t->ndev; i++) {
+ pr_debug("Region %d, type = %u\n", i, t->dev[i].type);
+ pr_debug("Region %d, wp = %u\n", i, t->dev[i].wp);
+ pr_debug("Region %d, speed = %u ns\n", i, t->dev[i].speed);
+ pr_debug("Region %d, size = %u bytes\n", i, t->dev[i].size);
+ }
+ return 0;
+}
+
+static int pcmciamtd_cistpl_geo(struct pcmcia_device *p_dev,
+ tuple_t *tuple,
+ void *priv_data)
+{
+ struct pcmciamtd_dev *dev = priv_data;
+ cisparse_t parse;
+ cistpl_device_geo_t *t = &parse.device_geo;
+ int i;
+
+ if (pcmcia_parse_tuple(tuple, &parse))
+ return -EINVAL;
+
+ dev->pcmcia_map.bankwidth = t->geo[0].buswidth;
+ /* from here on: DEBUG only */
+ for (i = 0; i < t->ngeo; i++) {
+ pr_debug("region: %d bankwidth = %u\n", i, t->geo[i].buswidth);
+ pr_debug("region: %d erase_block = %u\n", i, t->geo[i].erase_block);
+ pr_debug("region: %d read_block = %u\n", i, t->geo[i].read_block);
+ pr_debug("region: %d write_block = %u\n", i, t->geo[i].write_block);
+ pr_debug("region: %d partition = %u\n", i, t->geo[i].partition);
+ pr_debug("region: %d interleave = %u\n", i, t->geo[i].interleave);
+ }
+ return 0;
+}
+
+
+static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *p_dev, int *new_name)
+{
+ int i;
+
+ if (p_dev->prod_id[0]) {
+ dev->mtd_name[0] = '\0';
+ for (i = 0; i < 4; i++) {
+ if (i)
+ strcat(dev->mtd_name, " ");
+ if (p_dev->prod_id[i])
+ strcat(dev->mtd_name, p_dev->prod_id[i]);
+ }
+ pr_debug("Found name: %s\n", dev->mtd_name);
+ }
+
+ pcmcia_loop_tuple(p_dev, CISTPL_FORMAT, pcmciamtd_cistpl_format, NULL);
+ pcmcia_loop_tuple(p_dev, CISTPL_JEDEC_C, pcmciamtd_cistpl_jedec, NULL);
+ pcmcia_loop_tuple(p_dev, CISTPL_DEVICE, pcmciamtd_cistpl_device, dev);
+ pcmcia_loop_tuple(p_dev, CISTPL_DEVICE_GEO, pcmciamtd_cistpl_geo, dev);
+
+ if(!dev->pcmcia_map.size)
+ dev->pcmcia_map.size = MAX_PCMCIA_ADDR;
+
+ if(!dev->pcmcia_map.bankwidth)
+ dev->pcmcia_map.bankwidth = 2;
+
+ if(force_size) {
+ dev->pcmcia_map.size = force_size << 20;
+ pr_debug("size forced to %dM\n", force_size);
+ }
+
+ if(bankwidth) {
+ dev->pcmcia_map.bankwidth = bankwidth;
+ pr_debug("bankwidth forced to %d\n", bankwidth);
+ }
+
+ dev->pcmcia_map.name = dev->mtd_name;
+ if(!dev->mtd_name[0]) {
+ strcpy(dev->mtd_name, "PCMCIA Memory card");
+ *new_name = 1;
+ }
+
+ pr_debug("Device: Size: %lu Width:%d Name: %s\n",
+ dev->pcmcia_map.size,
+ dev->pcmcia_map.bankwidth << 3, dev->mtd_name);
+}
+
+
+static int pcmciamtd_config(struct pcmcia_device *link)
+{
+ struct pcmciamtd_dev *dev = link->priv;
+ struct mtd_info *mtd = NULL;
+ int ret;
+ int i, j = 0;
+ static char *probes[] = { "jedec_probe", "cfi_probe" };
+ int new_name = 0;
+
+ pr_debug("link=0x%p\n", link);
+
+ card_settings(dev, link, &new_name);
+
+ dev->pcmcia_map.phys = NO_XIP;
+ dev->pcmcia_map.copy_from = pcmcia_copy_from_remap;
+ dev->pcmcia_map.copy_to = pcmcia_copy_to_remap;
+ if (dev->pcmcia_map.bankwidth == 1) {
+ dev->pcmcia_map.read = pcmcia_read8_remap;
+ dev->pcmcia_map.write = pcmcia_write8_remap;
+ } else {
+ dev->pcmcia_map.read = pcmcia_read16_remap;
+ dev->pcmcia_map.write = pcmcia_write16_remap;
+ }
+ if(setvpp == 1)
+ dev->pcmcia_map.set_vpp = pcmciamtd_set_vpp;
+
+ /* Request a memory window for PCMCIA. Some architeures can map windows
+ * up to the maximum that PCMCIA can support (64MiB) - this is ideal and
+ * we aim for a window the size of the whole card - otherwise we try
+ * smaller windows until we succeed
+ */
+
+ link->resource[2]->flags |= WIN_MEMORY_TYPE_CM | WIN_ENABLE;
+ link->resource[2]->flags |= (dev->pcmcia_map.bankwidth == 1) ?
+ WIN_DATA_WIDTH_8 : WIN_DATA_WIDTH_16;
+ link->resource[2]->start = 0;
+ link->resource[2]->end = (force_size) ? force_size << 20 :
+ MAX_PCMCIA_ADDR;
+ dev->win_size = 0;
+
+ do {
+ int ret;
+ pr_debug("requesting window with size = %luKiB memspeed = %d\n",
+ (unsigned long) resource_size(link->resource[2]) >> 10,
+ mem_speed);
+ ret = pcmcia_request_window(link, link->resource[2], mem_speed);
+ pr_debug("ret = %d dev->win_size = %d\n", ret, dev->win_size);
+ if(ret) {
+ j++;
+ link->resource[2]->start = 0;
+ link->resource[2]->end = (force_size) ?
+ force_size << 20 : MAX_PCMCIA_ADDR;
+ link->resource[2]->end >>= j;
+ } else {
+ pr_debug("Got window of size %luKiB\n", (unsigned long)
+ resource_size(link->resource[2]) >> 10);
+ dev->win_size = resource_size(link->resource[2]);
+ break;
+ }
+ } while (link->resource[2]->end >= 0x1000);
+
+ pr_debug("dev->win_size = %d\n", dev->win_size);
+
+ if(!dev->win_size) {
+ dev_err(&dev->p_dev->dev, "Cannot allocate memory window\n");
+ pcmciamtd_release(link);
+ return -ENODEV;
+ }
+ pr_debug("Allocated a window of %dKiB\n", dev->win_size >> 10);
+
+ /* Get write protect status */
+ dev->win_base = ioremap(link->resource[2]->start,
+ resource_size(link->resource[2]));
+ if(!dev->win_base) {
+ dev_err(&dev->p_dev->dev, "ioremap(%pR) failed\n",
+ link->resource[2]);
+ pcmciamtd_release(link);
+ return -ENODEV;
+ }
+ pr_debug("mapped window dev = %p @ %pR, base = %p\n",
+ dev, link->resource[2], dev->win_base);
+
+ dev->offset = 0;
+ dev->pcmcia_map.map_priv_1 = (unsigned long)dev;
+ dev->pcmcia_map.map_priv_2 = (unsigned long)link->resource[2];
+
+ dev->vpp = (vpp) ? vpp : link->socket->socket.Vpp;
+ if(setvpp == 2) {
+ link->vpp = dev->vpp;
+ } else {
+ link->vpp = 0;
+ }
+
+ link->config_index = 0;
+ pr_debug("Setting Configuration\n");
+ ret = pcmcia_enable_device(link);
+ if (ret != 0) {
+ if (dev->win_base) {
+ iounmap(dev->win_base);
+ dev->win_base = NULL;
+ }
+ return -ENODEV;
+ }
+
+ if(mem_type == 1) {
+ mtd = do_map_probe("map_ram", &dev->pcmcia_map);
+ } else if(mem_type == 2) {
+ mtd = do_map_probe("map_rom", &dev->pcmcia_map);
+ } else {
+ for(i = 0; i < ARRAY_SIZE(probes); i++) {
+ pr_debug("Trying %s\n", probes[i]);
+ mtd = do_map_probe(probes[i], &dev->pcmcia_map);
+ if(mtd)
+ break;
+
+ pr_debug("FAILED: %s\n", probes[i]);
+ }
+ }
+
+ if(!mtd) {
+ pr_debug("Can not find an MTD\n");
+ pcmciamtd_release(link);
+ return -ENODEV;
+ }
+
+ dev->mtd_info = mtd;
+ mtd->owner = THIS_MODULE;
+
+ if(new_name) {
+ int size = 0;
+ char unit = ' ';
+ /* Since we are using a default name, make it better by adding
+ * in the size
+ */
+ if(mtd->size < 1048576) { /* <1MiB in size, show size in KiB */
+ size = mtd->size >> 10;
+ unit = 'K';
+ } else {
+ size = mtd->size >> 20;
+ unit = 'M';
+ }
+ snprintf(dev->mtd_name, sizeof(dev->mtd_name), "%d%ciB %s", size, unit, "PCMCIA Memory card");
+ }
+
+ /* If the memory found is fits completely into the mapped PCMCIA window,
+ use the faster non-remapping read/write functions */
+ if(mtd->size <= dev->win_size) {
+ pr_debug("Using non remapping memory functions\n");
+ dev->pcmcia_map.map_priv_2 = (unsigned long)dev->win_base;
+ if (dev->pcmcia_map.bankwidth == 1) {
+ dev->pcmcia_map.read = pcmcia_read8;
+ dev->pcmcia_map.write = pcmcia_write8;
+ } else {
+ dev->pcmcia_map.read = pcmcia_read16;
+ dev->pcmcia_map.write = pcmcia_write16;
+ }
+ dev->pcmcia_map.copy_from = pcmcia_copy_from;
+ dev->pcmcia_map.copy_to = pcmcia_copy_to;
+ }
+
+ if (mtd_device_register(mtd, NULL, 0)) {
+ map_destroy(mtd);
+ dev->mtd_info = NULL;
+ dev_err(&dev->p_dev->dev,
+ "Could not register the MTD device\n");
+ pcmciamtd_release(link);
+ return -ENODEV;
+ }
+ dev_info(&dev->p_dev->dev, "mtd%d: %s\n", mtd->index, mtd->name);
+ return 0;
+}
+
+
+static int pcmciamtd_suspend(struct pcmcia_device *dev)
+{
+ pr_debug("EVENT_PM_RESUME\n");
+
+ /* get_lock(link); */
+
+ return 0;
+}
+
+static int pcmciamtd_resume(struct pcmcia_device *dev)
+{
+ pr_debug("EVENT_PM_SUSPEND\n");
+
+ /* free_lock(link); */
+
+ return 0;
+}
+
+
+static void pcmciamtd_detach(struct pcmcia_device *link)
+{
+ struct pcmciamtd_dev *dev = link->priv;
+
+ pr_debug("link=0x%p\n", link);
+
+ if(dev->mtd_info) {
+ mtd_device_unregister(dev->mtd_info);
+ dev_info(&dev->p_dev->dev, "mtd%d: Removing\n",
+ dev->mtd_info->index);
+ map_destroy(dev->mtd_info);
+ }
+
+ pcmciamtd_release(link);
+}
+
+
+static int pcmciamtd_probe(struct pcmcia_device *link)
+{
+ struct pcmciamtd_dev *dev;
+
+ /* Create new memory card device */
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) return -ENOMEM;
+ pr_debug("dev=0x%p\n", dev);
+
+ dev->p_dev = link;
+ link->priv = dev;
+
+ return pcmciamtd_config(link);
+}
+
+static const struct pcmcia_device_id pcmciamtd_ids[] = {
+ PCMCIA_DEVICE_FUNC_ID(1),
+ PCMCIA_DEVICE_PROD_ID123("IO DATA", "PCS-2M", "2MB SRAM", 0x547e66dc, 0x1fed36cd, 0x36eadd21),
+ PCMCIA_DEVICE_PROD_ID12("IBM", "2MB SRAM", 0xb569a6e5, 0x36eadd21),
+ PCMCIA_DEVICE_PROD_ID12("IBM", "4MB FLASH", 0xb569a6e5, 0x8bc54d2a),
+ PCMCIA_DEVICE_PROD_ID12("IBM", "8MB FLASH", 0xb569a6e5, 0x6df1be3e),
+ PCMCIA_DEVICE_PROD_ID12("Intel", "S2E20SW", 0x816cc815, 0xd14c9dcf),
+ PCMCIA_DEVICE_PROD_ID12("Intel", "S2E8 SW", 0x816cc815, 0xa2d7dedb),
+ PCMCIA_DEVICE_PROD_ID12("intel", "SERIES2-02 ", 0x40ade711, 0x145cea5c),
+ PCMCIA_DEVICE_PROD_ID12("intel", "SERIES2-04 ", 0x40ade711, 0x42064dda),
+ PCMCIA_DEVICE_PROD_ID12("intel", "SERIES2-20 ", 0x40ade711, 0x25ee5cb0),
+ PCMCIA_DEVICE_PROD_ID12("intel", "VALUE SERIES 100 ", 0x40ade711, 0xdf8506d8),
+ PCMCIA_DEVICE_PROD_ID12("KINGMAX TECHNOLOGY INC.", "SRAM 256K Bytes", 0x54d0c69c, 0xad12c29c),
+ PCMCIA_DEVICE_PROD_ID12("Maxtor", "MAXFL MobileMax Flash Memory Card", 0xb68968c8, 0x2dfb47b0),
+ PCMCIA_DEVICE_PROD_ID123("M-Systems", "M-SYS Flash Memory Card", "(c) M-Systems", 0x7ed2ad87, 0x675dc3fb, 0x7aef3965),
+ PCMCIA_DEVICE_PROD_ID12("PRETEC", " 2MB SRAM CARD", 0xebf91155, 0x805360ca),
+ PCMCIA_DEVICE_PROD_ID12("PRETEC", " 4MB SRAM CARD", 0xebf91155, 0x20b6bf17),
+ PCMCIA_DEVICE_PROD_ID12("SEIKO EPSON", "WWB101EN20", 0xf9876baf, 0xad0b207b),
+ PCMCIA_DEVICE_PROD_ID12("SEIKO EPSON", "WWB513EN20", 0xf9876baf, 0xe8d884ad),
+ PCMCIA_DEVICE_PROD_ID12("SMART Modular Technologies", " 4MB FLASH Card", 0x96fd8277, 0x737a5b05),
+ PCMCIA_DEVICE_PROD_ID12("Starfish, Inc.", "REX-3000", 0x05ddca47, 0xe7d67bca),
+ PCMCIA_DEVICE_PROD_ID12("Starfish, Inc.", "REX-4100", 0x05ddca47, 0x7bc32944),
+ /* the following was commented out in pcmcia-cs-3.2.7 */
+ /* PCMCIA_DEVICE_PROD_ID12("RATOC Systems,Inc.", "SmartMedia ADAPTER PC Card", 0xf4a2fefe, 0x5885b2ae), */
+#ifdef CONFIG_MTD_PCMCIA_ANONYMOUS
+ { .match_flags = PCMCIA_DEV_ID_MATCH_ANONYMOUS, },
+#endif
+ PCMCIA_DEVICE_NULL
+};
+MODULE_DEVICE_TABLE(pcmcia, pcmciamtd_ids);
+
+static struct pcmcia_driver pcmciamtd_driver = {
+ .name = "pcmciamtd",
+ .probe = pcmciamtd_probe,
+ .remove = pcmciamtd_detach,
+ .owner = THIS_MODULE,
+ .id_table = pcmciamtd_ids,
+ .suspend = pcmciamtd_suspend,
+ .resume = pcmciamtd_resume,
+};
+
+
+static int __init init_pcmciamtd(void)
+{
+ if(bankwidth && bankwidth != 1 && bankwidth != 2) {
+ info("bad bankwidth (%d), using default", bankwidth);
+ bankwidth = 2;
+ }
+ if(force_size && (force_size < 1 || force_size > 64)) {
+ info("bad force_size (%d), using default", force_size);
+ force_size = 0;
+ }
+ if(mem_type && mem_type != 1 && mem_type != 2) {
+ info("bad mem_type (%d), using default", mem_type);
+ mem_type = 0;
+ }
+ return pcmcia_register_driver(&pcmciamtd_driver);
+}
+
+
+static void __exit exit_pcmciamtd(void)
+{
+ pr_debug(DRIVER_DESC " unloading");
+ pcmcia_unregister_driver(&pcmciamtd_driver);
+}
+
+module_init(init_pcmciamtd);
+module_exit(exit_pcmciamtd);
diff --git a/drivers/mtd/maps/physmap-bt1-rom.c b/drivers/mtd/maps/physmap-bt1-rom.c
new file mode 100644
index 0000000000..60dccc48f9
--- /dev/null
+++ b/drivers/mtd/maps/physmap-bt1-rom.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 Physically Mapped Internal ROM driver
+ */
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/xip.h>
+#include <linux/mux/consumer.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "physmap-bt1-rom.h"
+
+/*
+ * Baikal-T1 SoC ROMs are only accessible by the dword-aligned instructions.
+ * We have to take this into account when implementing the data read-methods.
+ * Note there is no need in bothering with endianness, since both Baikal-T1
+ * CPU and MMIO are LE.
+ */
+static map_word __xipram bt1_rom_map_read(struct map_info *map,
+ unsigned long ofs)
+{
+ void __iomem *src = map->virt + ofs;
+ unsigned int shift;
+ map_word ret;
+ u32 data;
+
+ /* Read data within offset dword. */
+ shift = (uintptr_t)src & 0x3;
+ data = readl_relaxed(src - shift);
+ if (!shift) {
+ ret.x[0] = data;
+ return ret;
+ }
+ ret.x[0] = data >> (shift * BITS_PER_BYTE);
+
+ /* Read data from the next dword. */
+ shift = 4 - shift;
+ if (ofs + shift >= map->size)
+ return ret;
+
+ data = readl_relaxed(src + shift);
+ ret.x[0] |= data << (shift * BITS_PER_BYTE);
+
+ return ret;
+}
+
+static void __xipram bt1_rom_map_copy_from(struct map_info *map,
+ void *to, unsigned long from,
+ ssize_t len)
+{
+ void __iomem *src = map->virt + from;
+ unsigned int shift, chunk;
+ u32 data;
+
+ if (len <= 0 || from >= map->size)
+ return;
+
+ /* Make sure we don't go over the map limit. */
+ len = min_t(ssize_t, map->size - from, len);
+
+ /*
+ * Since requested data size can be pretty big we have to implement
+ * the copy procedure as optimal as possible. That's why it's split
+ * up into the next three stages: unaligned head, aligned body,
+ * unaligned tail.
+ */
+ shift = (uintptr_t)src & 0x3;
+ if (shift) {
+ chunk = min_t(ssize_t, 4 - shift, len);
+ data = readl_relaxed(src - shift);
+ memcpy(to, (char *)&data + shift, chunk);
+ src += chunk;
+ to += chunk;
+ len -= chunk;
+ }
+
+ while (len >= 4) {
+ data = readl_relaxed(src);
+ memcpy(to, &data, 4);
+ src += 4;
+ to += 4;
+ len -= 4;
+ }
+
+ if (len) {
+ data = readl_relaxed(src);
+ memcpy(to, &data, len);
+ }
+}
+
+int of_flash_probe_bt1_rom(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map)
+{
+ struct device *dev = &pdev->dev;
+
+ /* It's supposed to be read-only MTD. */
+ if (!of_device_is_compatible(np, "mtd-rom")) {
+ dev_info(dev, "No mtd-rom compatible string\n");
+ return 0;
+ }
+
+ /* Multiplatform guard. */
+ if (!of_device_is_compatible(np, "baikal,bt1-int-rom"))
+ return 0;
+
+ /* Sanity check the device parameters retrieved from DTB. */
+ if (map->bankwidth != 4)
+ dev_warn(dev, "Bank width is supposed to be 32 bits wide\n");
+
+ map->read = bt1_rom_map_read;
+ map->copy_from = bt1_rom_map_copy_from;
+
+ return 0;
+}
diff --git a/drivers/mtd/maps/physmap-bt1-rom.h b/drivers/mtd/maps/physmap-bt1-rom.h
new file mode 100644
index 0000000000..6782899598
--- /dev/null
+++ b/drivers/mtd/maps/physmap-bt1-rom.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#include <linux/mtd/map.h>
+#include <linux/of.h>
+
+#ifdef CONFIG_MTD_PHYSMAP_BT1_ROM
+int of_flash_probe_bt1_rom(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map);
+#else
+static inline
+int of_flash_probe_bt1_rom(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map)
+{
+ return 0;
+}
+#endif
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
new file mode 100644
index 0000000000..fc87213392
--- /dev/null
+++ b/drivers/mtd/maps/physmap-core.c
@@ -0,0 +1,696 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Normal mappings of chips in physical memory
+ *
+ * Copyright (C) 2003 MontaVista Software Inc.
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ *
+ * 031022 - [jsun] add run-time configure and partition setup
+ *
+ * Device tree support:
+ * Copyright (C) 2006 MontaVista Software Inc.
+ * Author: Vitaly Wool <vwool@ru.mvista.com>
+ *
+ * Revised to handle newer style flash binding by:
+ * Copyright (C) 2007 David Gibson, IBM Corporation.
+ *
+ * GPIO address extension:
+ * Handle the case where a flash device is mostly addressed using physical
+ * line and supplemented by GPIOs. This way you can hook up say a 8MiB flash
+ * to a 2MiB memory range and use the GPIOs to select a particular range.
+ *
+ * Copyright © 2000 Nicolas Pitre <nico@cam.org>
+ * Copyright © 2005-2009 Analog Devices Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/mtd/concat.h>
+#include <linux/mtd/cfi_endian.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/gpio/consumer.h>
+
+#include "physmap-bt1-rom.h"
+#include "physmap-gemini.h"
+#include "physmap-ixp4xx.h"
+#include "physmap-versatile.h"
+
+struct physmap_flash_info {
+ unsigned int nmaps;
+ struct mtd_info **mtds;
+ struct mtd_info *cmtd;
+ struct map_info *maps;
+ spinlock_t vpp_lock;
+ int vpp_refcnt;
+ const char *probe_type;
+ const char * const *part_types;
+ unsigned int nparts;
+ const struct mtd_partition *parts;
+ struct gpio_descs *gpios;
+ unsigned int gpio_values;
+ unsigned int win_order;
+};
+
+static int physmap_flash_remove(struct platform_device *dev)
+{
+ struct physmap_flash_info *info;
+ struct physmap_flash_data *physmap_data;
+ int i;
+
+ info = platform_get_drvdata(dev);
+
+ if (info->cmtd) {
+ WARN_ON(mtd_device_unregister(info->cmtd));
+
+ if (info->cmtd != info->mtds[0])
+ mtd_concat_destroy(info->cmtd);
+ }
+
+ for (i = 0; i < info->nmaps; i++) {
+ if (info->mtds[i])
+ map_destroy(info->mtds[i]);
+ }
+
+ physmap_data = dev_get_platdata(&dev->dev);
+ if (physmap_data && physmap_data->exit)
+ physmap_data->exit(dev);
+
+ pm_runtime_put(&dev->dev);
+ pm_runtime_disable(&dev->dev);
+ return 0;
+}
+
+static void physmap_set_vpp(struct map_info *map, int state)
+{
+ struct platform_device *pdev;
+ struct physmap_flash_data *physmap_data;
+ struct physmap_flash_info *info;
+ unsigned long flags;
+
+ pdev = (struct platform_device *)map->map_priv_1;
+ physmap_data = dev_get_platdata(&pdev->dev);
+
+ if (!physmap_data->set_vpp)
+ return;
+
+ info = platform_get_drvdata(pdev);
+
+ spin_lock_irqsave(&info->vpp_lock, flags);
+ if (state) {
+ if (++info->vpp_refcnt == 1) /* first nested 'on' */
+ physmap_data->set_vpp(pdev, 1);
+ } else {
+ if (--info->vpp_refcnt == 0) /* last nested 'off' */
+ physmap_data->set_vpp(pdev, 0);
+ }
+ spin_unlock_irqrestore(&info->vpp_lock, flags);
+}
+
+#if IS_ENABLED(CONFIG_MTD_PHYSMAP_GPIO_ADDR)
+static void physmap_set_addr_gpios(struct physmap_flash_info *info,
+ unsigned long ofs)
+{
+ unsigned int i;
+
+ ofs >>= info->win_order;
+ if (info->gpio_values == ofs)
+ return;
+
+ for (i = 0; i < info->gpios->ndescs; i++) {
+ if ((BIT(i) & ofs) == (BIT(i) & info->gpio_values))
+ continue;
+
+ gpiod_set_value(info->gpios->desc[i], !!(BIT(i) & ofs));
+ }
+
+ info->gpio_values = ofs;
+}
+
+#define win_mask(order) (BIT(order) - 1)
+
+static map_word physmap_addr_gpios_read(struct map_info *map,
+ unsigned long ofs)
+{
+ struct platform_device *pdev;
+ struct physmap_flash_info *info;
+ map_word mw;
+ u16 word;
+
+ pdev = (struct platform_device *)map->map_priv_1;
+ info = platform_get_drvdata(pdev);
+ physmap_set_addr_gpios(info, ofs);
+
+ word = readw(map->virt + (ofs & win_mask(info->win_order)));
+ mw.x[0] = word;
+ return mw;
+}
+
+static void physmap_addr_gpios_copy_from(struct map_info *map, void *buf,
+ unsigned long ofs, ssize_t len)
+{
+ struct platform_device *pdev;
+ struct physmap_flash_info *info;
+
+ pdev = (struct platform_device *)map->map_priv_1;
+ info = platform_get_drvdata(pdev);
+
+ while (len) {
+ unsigned int winofs = ofs & win_mask(info->win_order);
+ unsigned int chunklen = min_t(unsigned int, len,
+ BIT(info->win_order) - winofs);
+
+ physmap_set_addr_gpios(info, ofs);
+ memcpy_fromio(buf, map->virt + winofs, chunklen);
+ len -= chunklen;
+ buf += chunklen;
+ ofs += chunklen;
+ }
+}
+
+static void physmap_addr_gpios_write(struct map_info *map, map_word mw,
+ unsigned long ofs)
+{
+ struct platform_device *pdev;
+ struct physmap_flash_info *info;
+ u16 word;
+
+ pdev = (struct platform_device *)map->map_priv_1;
+ info = platform_get_drvdata(pdev);
+ physmap_set_addr_gpios(info, ofs);
+
+ word = mw.x[0];
+ writew(word, map->virt + (ofs & win_mask(info->win_order)));
+}
+
+static void physmap_addr_gpios_copy_to(struct map_info *map, unsigned long ofs,
+ const void *buf, ssize_t len)
+{
+ struct platform_device *pdev;
+ struct physmap_flash_info *info;
+
+ pdev = (struct platform_device *)map->map_priv_1;
+ info = platform_get_drvdata(pdev);
+
+ while (len) {
+ unsigned int winofs = ofs & win_mask(info->win_order);
+ unsigned int chunklen = min_t(unsigned int, len,
+ BIT(info->win_order) - winofs);
+
+ physmap_set_addr_gpios(info, ofs);
+ memcpy_toio(map->virt + winofs, buf, chunklen);
+ len -= chunklen;
+ buf += chunklen;
+ ofs += chunklen;
+ }
+}
+
+static int physmap_addr_gpios_map_init(struct map_info *map)
+{
+ map->phys = NO_XIP;
+ map->read = physmap_addr_gpios_read;
+ map->copy_from = physmap_addr_gpios_copy_from;
+ map->write = physmap_addr_gpios_write;
+ map->copy_to = physmap_addr_gpios_copy_to;
+
+ return 0;
+}
+#else
+static int physmap_addr_gpios_map_init(struct map_info *map)
+{
+ return -ENOTSUPP;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_MTD_PHYSMAP_OF)
+static const struct of_device_id of_flash_match[] = {
+ {
+ .compatible = "cfi-flash",
+ .data = "cfi_probe",
+ },
+ {
+ /*
+ * FIXME: JEDEC chips can't be safely and reliably
+ * probed, although the mtd code gets it right in
+ * practice most of the time. We should use the
+ * vendor and device ids specified by the binding to
+ * bypass the heuristic probe code, but the mtd layer
+ * provides, at present, no interface for doing so
+ * :(.
+ */
+ .compatible = "jedec-flash",
+ .data = "jedec_probe",
+ },
+ {
+ .compatible = "mtd-ram",
+ .data = "map_ram",
+ },
+ {
+ .compatible = "mtd-rom",
+ .data = "map_rom",
+ },
+ {
+ .type = "rom",
+ .compatible = "direct-mapped"
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, of_flash_match);
+
+static const char * const of_default_part_probes[] = {
+ "cmdlinepart", "RedBoot", "ofpart", "ofoldpart", NULL
+};
+
+static const char * const *of_get_part_probes(struct platform_device *dev)
+{
+ struct device_node *dp = dev->dev.of_node;
+ const char **res;
+ int count;
+
+ count = of_property_count_strings(dp, "linux,part-probe");
+ if (count < 0)
+ return of_default_part_probes;
+
+ res = devm_kcalloc(&dev->dev, count + 1, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return NULL;
+
+ count = of_property_read_string_array(dp, "linux,part-probe", res,
+ count);
+ if (count < 0)
+ return NULL;
+
+ return res;
+}
+
+static const char *of_select_probe_type(struct platform_device *dev)
+{
+ struct device_node *dp = dev->dev.of_node;
+ const struct of_device_id *match;
+ const char *probe_type;
+
+ match = of_match_device(of_flash_match, &dev->dev);
+ if (!match)
+ return NULL;
+
+ probe_type = match->data;
+ if (probe_type)
+ return probe_type;
+
+ dev_warn(&dev->dev,
+ "Device tree uses obsolete \"direct-mapped\" flash binding\n");
+
+ of_property_read_string(dp, "probe-type", &probe_type);
+ if (!probe_type)
+ return NULL;
+
+ if (!strcmp(probe_type, "CFI")) {
+ probe_type = "cfi_probe";
+ } else if (!strcmp(probe_type, "JEDEC")) {
+ probe_type = "jedec_probe";
+ } else if (!strcmp(probe_type, "ROM")) {
+ probe_type = "map_rom";
+ } else {
+ dev_warn(&dev->dev,
+ "obsolete_probe: don't know probe type '%s', mapping as rom\n",
+ probe_type);
+ probe_type = "map_rom";
+ }
+
+ return probe_type;
+}
+
+static int physmap_flash_of_init(struct platform_device *dev)
+{
+ struct physmap_flash_info *info = platform_get_drvdata(dev);
+ struct device_node *dp = dev->dev.of_node;
+ const char *mtd_name = NULL;
+ int err, swap = 0;
+ bool map_indirect;
+ unsigned int i;
+ u32 bankwidth;
+
+ if (!dp)
+ return -EINVAL;
+
+ info->probe_type = of_select_probe_type(dev);
+
+ info->part_types = of_get_part_probes(dev);
+ if (!info->part_types)
+ return -ENOMEM;
+
+ of_property_read_string(dp, "linux,mtd-name", &mtd_name);
+
+ map_indirect = of_property_read_bool(dp, "no-unaligned-direct-access");
+
+ err = of_property_read_u32(dp, "bank-width", &bankwidth);
+ if (err) {
+ dev_err(&dev->dev, "Can't get bank width from device tree\n");
+ return err;
+ }
+
+ if (of_property_read_bool(dp, "big-endian"))
+ swap = CFI_BIG_ENDIAN;
+ else if (of_property_read_bool(dp, "little-endian"))
+ swap = CFI_LITTLE_ENDIAN;
+
+ for (i = 0; i < info->nmaps; i++) {
+ info->maps[i].name = mtd_name;
+ info->maps[i].swap = swap;
+ info->maps[i].bankwidth = bankwidth;
+ info->maps[i].device_node = dp;
+
+ err = of_flash_probe_bt1_rom(dev, dp, &info->maps[i]);
+ if (err)
+ return err;
+
+ err = of_flash_probe_gemini(dev, dp, &info->maps[i]);
+ if (err)
+ return err;
+
+ err = of_flash_probe_ixp4xx(dev, dp, &info->maps[i]);
+ if (err)
+ return err;
+
+ err = of_flash_probe_versatile(dev, dp, &info->maps[i]);
+ if (err)
+ return err;
+
+ /*
+ * On some platforms (e.g. MPC5200) a direct 1:1 mapping
+ * may cause problems with JFFS2 usage, as the local bus (LPB)
+ * doesn't support unaligned accesses as implemented in the
+ * JFFS2 code via memcpy(). By setting NO_XIP, the
+ * flash will not be exposed directly to the MTD users
+ * (e.g. JFFS2) any more.
+ */
+ if (map_indirect)
+ info->maps[i].phys = NO_XIP;
+ }
+
+ return 0;
+}
+#else /* IS_ENABLED(CONFIG_MTD_PHYSMAP_OF) */
+#define of_flash_match NULL
+
+static int physmap_flash_of_init(struct platform_device *dev)
+{
+ return -ENOTSUPP;
+}
+#endif /* IS_ENABLED(CONFIG_MTD_PHYSMAP_OF) */
+
+static const char * const rom_probe_types[] = {
+ "cfi_probe", "jedec_probe", "qinfo_probe", "map_rom",
+};
+
+static const char * const part_probe_types[] = {
+ "cmdlinepart", "RedBoot", "afs", NULL
+};
+
+static int physmap_flash_pdata_init(struct platform_device *dev)
+{
+ struct physmap_flash_info *info = platform_get_drvdata(dev);
+ struct physmap_flash_data *physmap_data;
+ unsigned int i;
+ int err;
+
+ physmap_data = dev_get_platdata(&dev->dev);
+ if (!physmap_data)
+ return -EINVAL;
+
+ info->probe_type = physmap_data->probe_type;
+ info->part_types = physmap_data->part_probe_types ? : part_probe_types;
+ info->parts = physmap_data->parts;
+ info->nparts = physmap_data->nr_parts;
+
+ if (physmap_data->init) {
+ err = physmap_data->init(dev);
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < info->nmaps; i++) {
+ info->maps[i].bankwidth = physmap_data->width;
+ info->maps[i].pfow_base = physmap_data->pfow_base;
+ info->maps[i].set_vpp = physmap_set_vpp;
+ }
+
+ return 0;
+}
+
+static int physmap_flash_probe(struct platform_device *dev)
+{
+ struct physmap_flash_info *info;
+ int err = 0;
+ int i;
+
+ if (!dev->dev.of_node && !dev_get_platdata(&dev->dev))
+ return -EINVAL;
+
+ info = devm_kzalloc(&dev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ while (platform_get_resource(dev, IORESOURCE_MEM, info->nmaps))
+ info->nmaps++;
+
+ if (!info->nmaps)
+ return -ENODEV;
+
+ info->maps = devm_kzalloc(&dev->dev,
+ sizeof(*info->maps) * info->nmaps,
+ GFP_KERNEL);
+ if (!info->maps)
+ return -ENOMEM;
+
+ info->mtds = devm_kzalloc(&dev->dev,
+ sizeof(*info->mtds) * info->nmaps,
+ GFP_KERNEL);
+ if (!info->mtds)
+ return -ENOMEM;
+
+ platform_set_drvdata(dev, info);
+
+ info->gpios = devm_gpiod_get_array_optional(&dev->dev, "addr",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(info->gpios))
+ return PTR_ERR(info->gpios);
+
+ if (info->gpios && info->nmaps > 1) {
+ dev_err(&dev->dev, "addr-gpios only supported for nmaps == 1\n");
+ return -EINVAL;
+ }
+
+ pm_runtime_enable(&dev->dev);
+ pm_runtime_get_sync(&dev->dev);
+
+ if (dev->dev.of_node)
+ err = physmap_flash_of_init(dev);
+ else
+ err = physmap_flash_pdata_init(dev);
+
+ if (err) {
+ pm_runtime_put(&dev->dev);
+ pm_runtime_disable(&dev->dev);
+ return err;
+ }
+
+ for (i = 0; i < info->nmaps; i++) {
+ struct resource *res;
+
+ info->maps[i].virt = devm_platform_get_and_ioremap_resource(dev, i, &res);
+ if (IS_ERR(info->maps[i].virt)) {
+ err = PTR_ERR(info->maps[i].virt);
+ goto err_out;
+ }
+
+ dev_notice(&dev->dev, "physmap platform flash device: %pR\n",
+ res);
+
+ if (!info->maps[i].name)
+ info->maps[i].name = dev_name(&dev->dev);
+
+ if (!info->maps[i].phys)
+ info->maps[i].phys = res->start;
+
+ info->win_order = get_bitmask_order(resource_size(res)) - 1;
+ info->maps[i].size = BIT(info->win_order +
+ (info->gpios ?
+ info->gpios->ndescs : 0));
+
+ info->maps[i].map_priv_1 = (unsigned long)dev;
+
+ if (info->gpios) {
+ err = physmap_addr_gpios_map_init(&info->maps[i]);
+ if (err)
+ goto err_out;
+ }
+
+#ifdef CONFIG_MTD_COMPLEX_MAPPINGS
+ /*
+ * Only use the simple_map implementation if map hooks are not
+ * implemented. Since map->read() is mandatory checking for its
+ * presence is enough.
+ */
+ if (!info->maps[i].read)
+ simple_map_init(&info->maps[i]);
+#else
+ simple_map_init(&info->maps[i]);
+#endif
+
+ if (info->probe_type) {
+ info->mtds[i] = do_map_probe(info->probe_type,
+ &info->maps[i]);
+
+ /* Fall back to mapping region as ROM */
+ if (!info->mtds[i] && IS_ENABLED(CONFIG_MTD_ROM) &&
+ strcmp(info->probe_type, "map_rom")) {
+ dev_warn(&dev->dev,
+ "map_probe() failed for type %s\n",
+ info->probe_type);
+
+ info->mtds[i] = do_map_probe("map_rom",
+ &info->maps[i]);
+ }
+ } else {
+ int j;
+
+ for (j = 0; j < ARRAY_SIZE(rom_probe_types); j++) {
+ info->mtds[i] = do_map_probe(rom_probe_types[j],
+ &info->maps[i]);
+ if (info->mtds[i])
+ break;
+ }
+ }
+
+ if (!info->mtds[i]) {
+ dev_err(&dev->dev, "map_probe failed\n");
+ err = -ENXIO;
+ goto err_out;
+ }
+ info->mtds[i]->dev.parent = &dev->dev;
+ }
+
+ if (info->nmaps == 1) {
+ info->cmtd = info->mtds[0];
+ } else {
+ /*
+ * We detected multiple devices. Concatenate them together.
+ */
+ info->cmtd = mtd_concat_create(info->mtds, info->nmaps,
+ dev_name(&dev->dev));
+ if (!info->cmtd)
+ err = -ENXIO;
+ }
+ if (err)
+ goto err_out;
+
+ spin_lock_init(&info->vpp_lock);
+
+ mtd_set_of_node(info->cmtd, dev->dev.of_node);
+ err = mtd_device_parse_register(info->cmtd, info->part_types, NULL,
+ info->parts, info->nparts);
+ if (err)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ physmap_flash_remove(dev);
+ return err;
+}
+
+#ifdef CONFIG_PM
+static void physmap_flash_shutdown(struct platform_device *dev)
+{
+ struct physmap_flash_info *info = platform_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < info->nmaps && info->mtds[i]; i++)
+ if (mtd_suspend(info->mtds[i]) == 0)
+ mtd_resume(info->mtds[i]);
+}
+#else
+#define physmap_flash_shutdown NULL
+#endif
+
+static struct platform_driver physmap_flash_driver = {
+ .probe = physmap_flash_probe,
+ .remove = physmap_flash_remove,
+ .shutdown = physmap_flash_shutdown,
+ .driver = {
+ .name = "physmap-flash",
+ .of_match_table = of_flash_match,
+ },
+};
+
+#ifdef CONFIG_MTD_PHYSMAP_COMPAT
+static struct physmap_flash_data physmap_flash_data = {
+ .width = CONFIG_MTD_PHYSMAP_BANKWIDTH,
+};
+
+static struct resource physmap_flash_resource = {
+ .start = CONFIG_MTD_PHYSMAP_START,
+ .end = CONFIG_MTD_PHYSMAP_START + CONFIG_MTD_PHYSMAP_LEN - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device physmap_flash = {
+ .name = "physmap-flash",
+ .id = 0,
+ .dev = {
+ .platform_data = &physmap_flash_data,
+ },
+ .num_resources = 1,
+ .resource = &physmap_flash_resource,
+};
+#endif
+
+static int __init physmap_init(void)
+{
+ int err;
+
+ err = platform_driver_register(&physmap_flash_driver);
+#ifdef CONFIG_MTD_PHYSMAP_COMPAT
+ if (err == 0) {
+ err = platform_device_register(&physmap_flash);
+ if (err)
+ platform_driver_unregister(&physmap_flash_driver);
+ }
+#endif
+
+ return err;
+}
+
+static void __exit physmap_exit(void)
+{
+#ifdef CONFIG_MTD_PHYSMAP_COMPAT
+ platform_device_unregister(&physmap_flash);
+#endif
+ platform_driver_unregister(&physmap_flash_driver);
+}
+
+module_init(physmap_init);
+module_exit(physmap_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_AUTHOR("Vitaly Wool <vwool@ru.mvista.com>");
+MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>");
+MODULE_DESCRIPTION("Generic configurable MTD map driver");
+
+/* legacy platform drivers can't hotplug or coldplg */
+#ifndef CONFIG_MTD_PHYSMAP_COMPAT
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:physmap-flash");
+#endif
diff --git a/drivers/mtd/maps/physmap-gemini.c b/drivers/mtd/maps/physmap-gemini.c
new file mode 100644
index 0000000000..9d3b4bf84a
--- /dev/null
+++ b/drivers/mtd/maps/physmap-gemini.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cortina Systems Gemini OF physmap add-on
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * This SoC has an elaborate flash control register, so we need to
+ * detect and set it up when booting on this platform.
+ */
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/xip.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/bitops.h>
+#include <linux/pinctrl/consumer.h>
+#include "physmap-gemini.h"
+
+/*
+ * The Flash-relevant parts of the global status register
+ * These would also be relevant for a NAND driver.
+ */
+#define GLOBAL_STATUS 0x04
+#define FLASH_TYPE_MASK (0x3 << 24)
+#define FLASH_TYPE_NAND_2K (0x3 << 24)
+#define FLASH_TYPE_NAND_512 (0x2 << 24)
+#define FLASH_TYPE_PARALLEL (0x1 << 24)
+#define FLASH_TYPE_SERIAL (0x0 << 24)
+/* if parallel */
+#define FLASH_WIDTH_16BIT (1 << 23) /* else 8 bit */
+/* if serial */
+#define FLASH_ATMEL (1 << 23) /* else STM */
+
+#define FLASH_SIZE_MASK (0x3 << 21)
+#define NAND_256M (0x3 << 21) /* and more */
+#define NAND_128M (0x2 << 21)
+#define NAND_64M (0x1 << 21)
+#define NAND_32M (0x0 << 21)
+#define ATMEL_16M (0x3 << 21) /* and more */
+#define ATMEL_8M (0x2 << 21)
+#define ATMEL_4M_2M (0x1 << 21)
+#define ATMEL_1M (0x0 << 21) /* and less */
+#define STM_32M (1 << 22) /* and more */
+#define STM_16M (0 << 22) /* and less */
+
+#define FLASH_PARALLEL_HIGH_PIN_CNT (1 << 20) /* else low pin cnt */
+
+struct gemini_flash {
+ struct device *dev;
+ struct pinctrl *p;
+ struct pinctrl_state *enabled_state;
+ struct pinctrl_state *disabled_state;
+};
+
+/* Static local state */
+static struct gemini_flash *gf;
+
+static void gemini_flash_enable_pins(void)
+{
+ int ret;
+
+ if (IS_ERR(gf->enabled_state))
+ return;
+ ret = pinctrl_select_state(gf->p, gf->enabled_state);
+ if (ret)
+ dev_err(gf->dev, "failed to enable pins\n");
+}
+
+static void gemini_flash_disable_pins(void)
+{
+ int ret;
+
+ if (IS_ERR(gf->disabled_state))
+ return;
+ ret = pinctrl_select_state(gf->p, gf->disabled_state);
+ if (ret)
+ dev_err(gf->dev, "failed to disable pins\n");
+}
+
+static map_word __xipram gemini_flash_map_read(struct map_info *map,
+ unsigned long ofs)
+{
+ map_word ret;
+
+ gemini_flash_enable_pins();
+ ret = inline_map_read(map, ofs);
+ gemini_flash_disable_pins();
+
+ return ret;
+}
+
+static void __xipram gemini_flash_map_write(struct map_info *map,
+ const map_word datum,
+ unsigned long ofs)
+{
+ gemini_flash_enable_pins();
+ inline_map_write(map, datum, ofs);
+ gemini_flash_disable_pins();
+}
+
+static void __xipram gemini_flash_map_copy_from(struct map_info *map,
+ void *to, unsigned long from,
+ ssize_t len)
+{
+ gemini_flash_enable_pins();
+ inline_map_copy_from(map, to, from, len);
+ gemini_flash_disable_pins();
+}
+
+static void __xipram gemini_flash_map_copy_to(struct map_info *map,
+ unsigned long to,
+ const void *from, ssize_t len)
+{
+ gemini_flash_enable_pins();
+ inline_map_copy_to(map, to, from, len);
+ gemini_flash_disable_pins();
+}
+
+int of_flash_probe_gemini(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map)
+{
+ struct regmap *rmap;
+ struct device *dev = &pdev->dev;
+ u32 val;
+ int ret;
+
+ /* Multiplatform guard */
+ if (!of_device_is_compatible(np, "cortina,gemini-flash"))
+ return 0;
+
+ gf = devm_kzalloc(dev, sizeof(*gf), GFP_KERNEL);
+ if (!gf)
+ return -ENOMEM;
+ gf->dev = dev;
+
+ rmap = syscon_regmap_lookup_by_phandle(np, "syscon");
+ if (IS_ERR(rmap)) {
+ dev_err(dev, "no syscon\n");
+ return PTR_ERR(rmap);
+ }
+
+ ret = regmap_read(rmap, GLOBAL_STATUS, &val);
+ if (ret) {
+ dev_err(dev, "failed to read global status register\n");
+ return -ENODEV;
+ }
+ dev_dbg(dev, "global status reg: %08x\n", val);
+
+ /*
+ * It would be contradictory if a physmap flash was NOT parallel.
+ */
+ if ((val & FLASH_TYPE_MASK) != FLASH_TYPE_PARALLEL) {
+ dev_err(dev, "flash is not parallel\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Complain if DT data and hardware definition is different.
+ */
+ if (val & FLASH_WIDTH_16BIT) {
+ if (map->bankwidth != 2)
+ dev_warn(dev, "flash hardware say flash is 16 bit wide but DT says it is %d bits wide\n",
+ map->bankwidth * 8);
+ } else {
+ if (map->bankwidth != 1)
+ dev_warn(dev, "flash hardware say flash is 8 bit wide but DT says it is %d bits wide\n",
+ map->bankwidth * 8);
+ }
+
+ gf->p = devm_pinctrl_get(dev);
+ if (IS_ERR(gf->p)) {
+ dev_err(dev, "no pinctrl handle\n");
+ ret = PTR_ERR(gf->p);
+ return ret;
+ }
+
+ gf->enabled_state = pinctrl_lookup_state(gf->p, "enabled");
+ if (IS_ERR(gf->enabled_state))
+ dev_err(dev, "no enabled pin control state\n");
+
+ gf->disabled_state = pinctrl_lookup_state(gf->p, "disabled");
+ if (IS_ERR(gf->enabled_state)) {
+ dev_err(dev, "no disabled pin control state\n");
+ } else {
+ ret = pinctrl_select_state(gf->p, gf->disabled_state);
+ if (ret)
+ dev_err(gf->dev, "failed to disable pins\n");
+ }
+
+ map->read = gemini_flash_map_read;
+ map->write = gemini_flash_map_write;
+ map->copy_from = gemini_flash_map_copy_from;
+ map->copy_to = gemini_flash_map_copy_to;
+
+ dev_info(dev, "initialized Gemini-specific physmap control\n");
+
+ return 0;
+}
diff --git a/drivers/mtd/maps/physmap-gemini.h b/drivers/mtd/maps/physmap-gemini.h
new file mode 100644
index 0000000000..72bd04ce3f
--- /dev/null
+++ b/drivers/mtd/maps/physmap-gemini.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/of.h>
+#include <linux/mtd/map.h>
+
+#ifdef CONFIG_MTD_PHYSMAP_GEMINI
+int of_flash_probe_gemini(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map);
+#else
+static inline
+int of_flash_probe_gemini(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map)
+{
+ return 0;
+}
+#endif
diff --git a/drivers/mtd/maps/physmap-ixp4xx.c b/drivers/mtd/maps/physmap-ixp4xx.c
new file mode 100644
index 0000000000..c561468f95
--- /dev/null
+++ b/drivers/mtd/maps/physmap-ixp4xx.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel IXP4xx OF physmap add-on
+ * Copyright (C) 2019 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on the ixp4xx.c map driver, originally written by:
+ * Intel Corporation
+ * Deepak Saxena <dsaxena@mvista.com>
+ * Copyright (C) 2002 Intel Corporation
+ * Copyright (C) 2003-2004 MontaVista Software, Inc.
+ */
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/xip.h>
+#include "physmap-ixp4xx.h"
+
+/*
+ * Read/write a 16 bit word from flash address 'addr'.
+ *
+ * When the cpu is in little-endian mode it swizzles the address lines
+ * ('address coherency') so we need to undo the swizzling to ensure commands
+ * and the like end up on the correct flash address.
+ *
+ * To further complicate matters, due to the way the expansion bus controller
+ * handles 32 bit reads, the byte stream ABCD is stored on the flash as:
+ * D15 D0
+ * +---+---+
+ * | A | B | 0
+ * +---+---+
+ * | C | D | 2
+ * +---+---+
+ * This means that on LE systems each 16 bit word must be swapped. Note that
+ * this requires CONFIG_MTD_CFI_BE_BYTE_SWAP to be enabled to 'unswap' the CFI
+ * data and other flash commands which are always in D7-D0.
+ */
+#ifndef CONFIG_CPU_BIG_ENDIAN
+
+static inline u16 flash_read16(void __iomem *addr)
+{
+ return be16_to_cpu(__raw_readw((void __iomem *)((unsigned long)addr ^ 0x2)));
+}
+
+static inline void flash_write16(u16 d, void __iomem *addr)
+{
+ __raw_writew(cpu_to_be16(d), (void __iomem *)((unsigned long)addr ^ 0x2));
+}
+
+#define BYTE0(h) ((h) & 0xFF)
+#define BYTE1(h) (((h) >> 8) & 0xFF)
+
+#else
+
+static inline u16 flash_read16(const void __iomem *addr)
+{
+ return __raw_readw(addr);
+}
+
+static inline void flash_write16(u16 d, void __iomem *addr)
+{
+ __raw_writew(d, addr);
+}
+
+#define BYTE0(h) (((h) >> 8) & 0xFF)
+#define BYTE1(h) ((h) & 0xFF)
+#endif
+
+static map_word ixp4xx_read16(struct map_info *map, unsigned long ofs)
+{
+ map_word val;
+
+ val.x[0] = flash_read16(map->virt + ofs);
+ return val;
+}
+
+/*
+ * The IXP4xx expansion bus only allows 16-bit wide acceses
+ * when attached to a 16-bit wide device (such as the 28F128J3A),
+ * so we can't just memcpy_fromio().
+ */
+static void ixp4xx_copy_from(struct map_info *map, void *to,
+ unsigned long from, ssize_t len)
+{
+ u8 *dest = (u8 *) to;
+ void __iomem *src = map->virt + from;
+
+ if (len <= 0)
+ return;
+
+ if (from & 1) {
+ *dest++ = BYTE1(flash_read16(src-1));
+ src++;
+ --len;
+ }
+
+ while (len >= 2) {
+ u16 data = flash_read16(src);
+ *dest++ = BYTE0(data);
+ *dest++ = BYTE1(data);
+ src += 2;
+ len -= 2;
+ }
+
+ if (len > 0)
+ *dest++ = BYTE0(flash_read16(src));
+}
+
+static void ixp4xx_write16(struct map_info *map, map_word d, unsigned long adr)
+{
+ flash_write16(d.x[0], map->virt + adr);
+}
+
+int of_flash_probe_ixp4xx(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map)
+{
+ struct device *dev = &pdev->dev;
+
+ /* Multiplatform guard */
+ if (!of_device_is_compatible(np, "intel,ixp4xx-flash"))
+ return 0;
+
+ map->read = ixp4xx_read16;
+ map->write = ixp4xx_write16;
+ map->copy_from = ixp4xx_copy_from;
+ map->copy_to = NULL;
+
+ dev_info(dev, "initialized Intel IXP4xx-specific physmap control\n");
+
+ return 0;
+}
diff --git a/drivers/mtd/maps/physmap-ixp4xx.h b/drivers/mtd/maps/physmap-ixp4xx.h
new file mode 100644
index 0000000000..46824c57e5
--- /dev/null
+++ b/drivers/mtd/maps/physmap-ixp4xx.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/map.h>
+
+#ifdef CONFIG_MTD_PHYSMAP_IXP4XX
+int of_flash_probe_ixp4xx(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map);
+#else
+static inline
+int of_flash_probe_ixp4xx(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map)
+{
+ return 0;
+}
+#endif
diff --git a/drivers/mtd/maps/physmap-versatile.c b/drivers/mtd/maps/physmap-versatile.c
new file mode 100644
index 0000000000..2e779111bf
--- /dev/null
+++ b/drivers/mtd/maps/physmap-versatile.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Versatile OF physmap driver add-on
+ *
+ * Copyright (c) 2016, Linaro Limited
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ */
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/mtd/map.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/bitops.h>
+#include "physmap-versatile.h"
+
+static struct regmap *syscon_regmap;
+
+enum versatile_flashprot {
+ INTEGRATOR_AP_FLASHPROT,
+ INTEGRATOR_CP_FLASHPROT,
+ VERSATILE_FLASHPROT,
+ REALVIEW_FLASHPROT,
+};
+
+static const struct of_device_id syscon_match[] = {
+ {
+ .compatible = "arm,integrator-ap-syscon",
+ .data = (void *)INTEGRATOR_AP_FLASHPROT,
+ },
+ {
+ .compatible = "arm,integrator-cp-syscon",
+ .data = (void *)INTEGRATOR_CP_FLASHPROT,
+ },
+ {
+ .compatible = "arm,core-module-versatile",
+ .data = (void *)VERSATILE_FLASHPROT,
+ },
+ {
+ .compatible = "arm,realview-eb-syscon",
+ .data = (void *)REALVIEW_FLASHPROT,
+ },
+ {
+ .compatible = "arm,realview-pb1176-syscon",
+ .data = (void *)REALVIEW_FLASHPROT,
+ },
+ {
+ .compatible = "arm,realview-pb11mp-syscon",
+ .data = (void *)REALVIEW_FLASHPROT,
+ },
+ {
+ .compatible = "arm,realview-pba8-syscon",
+ .data = (void *)REALVIEW_FLASHPROT,
+ },
+ {
+ .compatible = "arm,realview-pbx-syscon",
+ .data = (void *)REALVIEW_FLASHPROT,
+ },
+ {},
+};
+
+/*
+ * Flash protection handling for the Integrator/AP
+ */
+#define INTEGRATOR_SC_CTRLS_OFFSET 0x08
+#define INTEGRATOR_SC_CTRLC_OFFSET 0x0C
+#define INTEGRATOR_SC_CTRL_FLVPPEN BIT(1)
+#define INTEGRATOR_SC_CTRL_FLWP BIT(2)
+
+#define INTEGRATOR_EBI_CSR1_OFFSET 0x04
+/* The manual says bit 2, the code says bit 3, trust the code */
+#define INTEGRATOR_EBI_WRITE_ENABLE BIT(3)
+#define INTEGRATOR_EBI_LOCK_OFFSET 0x20
+#define INTEGRATOR_EBI_LOCK_VAL 0xA05F
+
+static const struct of_device_id ebi_match[] = {
+ { .compatible = "arm,external-bus-interface"},
+ { },
+};
+
+static int ap_flash_init(struct platform_device *pdev)
+{
+ struct device_node *ebi;
+ void __iomem *ebi_base;
+ u32 val;
+ int ret;
+
+ /* Look up the EBI */
+ ebi = of_find_matching_node(NULL, ebi_match);
+ if (!ebi) {
+ return -ENODEV;
+ }
+ ebi_base = of_iomap(ebi, 0);
+ of_node_put(ebi);
+ if (!ebi_base)
+ return -ENODEV;
+
+ /* Clear VPP and write protection bits */
+ ret = regmap_write(syscon_regmap,
+ INTEGRATOR_SC_CTRLC_OFFSET,
+ INTEGRATOR_SC_CTRL_FLVPPEN | INTEGRATOR_SC_CTRL_FLWP);
+ if (ret)
+ dev_err(&pdev->dev, "error clearing Integrator VPP/WP\n");
+
+ /* Unlock the EBI */
+ writel(INTEGRATOR_EBI_LOCK_VAL, ebi_base + INTEGRATOR_EBI_LOCK_OFFSET);
+
+ /* Enable write cycles on the EBI, CSR1 (flash) */
+ val = readl(ebi_base + INTEGRATOR_EBI_CSR1_OFFSET);
+ val |= INTEGRATOR_EBI_WRITE_ENABLE;
+ writel(val, ebi_base + INTEGRATOR_EBI_CSR1_OFFSET);
+
+ /* Lock the EBI again */
+ writel(0, ebi_base + INTEGRATOR_EBI_LOCK_OFFSET);
+ iounmap(ebi_base);
+
+ return 0;
+}
+
+static void ap_flash_set_vpp(struct map_info *map, int on)
+{
+ int ret;
+
+ if (on) {
+ ret = regmap_write(syscon_regmap,
+ INTEGRATOR_SC_CTRLS_OFFSET,
+ INTEGRATOR_SC_CTRL_FLVPPEN | INTEGRATOR_SC_CTRL_FLWP);
+ if (ret)
+ pr_err("error enabling AP VPP\n");
+ } else {
+ ret = regmap_write(syscon_regmap,
+ INTEGRATOR_SC_CTRLC_OFFSET,
+ INTEGRATOR_SC_CTRL_FLVPPEN | INTEGRATOR_SC_CTRL_FLWP);
+ if (ret)
+ pr_err("error disabling AP VPP\n");
+ }
+}
+
+/*
+ * Flash protection handling for the Integrator/CP
+ */
+
+#define INTCP_FLASHPROG_OFFSET 0x04
+#define CINTEGRATOR_FLVPPEN BIT(0)
+#define CINTEGRATOR_FLWREN BIT(1)
+#define CINTEGRATOR_FLMASK BIT(0)|BIT(1)
+
+static void cp_flash_set_vpp(struct map_info *map, int on)
+{
+ int ret;
+
+ if (on) {
+ ret = regmap_update_bits(syscon_regmap,
+ INTCP_FLASHPROG_OFFSET,
+ CINTEGRATOR_FLMASK,
+ CINTEGRATOR_FLVPPEN | CINTEGRATOR_FLWREN);
+ if (ret)
+ pr_err("error setting CP VPP\n");
+ } else {
+ ret = regmap_update_bits(syscon_regmap,
+ INTCP_FLASHPROG_OFFSET,
+ CINTEGRATOR_FLMASK,
+ 0);
+ if (ret)
+ pr_err("error setting CP VPP\n");
+ }
+}
+
+/*
+ * Flash protection handling for the Versatiles and RealViews
+ */
+
+#define VERSATILE_SYS_FLASH_OFFSET 0x4C
+
+static void versatile_flash_set_vpp(struct map_info *map, int on)
+{
+ int ret;
+
+ ret = regmap_update_bits(syscon_regmap, VERSATILE_SYS_FLASH_OFFSET,
+ 0x01, !!on);
+ if (ret)
+ pr_err("error setting Versatile VPP\n");
+}
+
+int of_flash_probe_versatile(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map)
+{
+ struct device_node *sysnp;
+ const struct of_device_id *devid;
+ struct regmap *rmap;
+ static enum versatile_flashprot versatile_flashprot;
+ int ret;
+
+ /* Not all flash chips use this protection line */
+ if (!of_device_is_compatible(np, "arm,versatile-flash"))
+ return 0;
+
+ /* For first chip probed, look up the syscon regmap */
+ if (!syscon_regmap) {
+ sysnp = of_find_matching_node_and_match(NULL,
+ syscon_match,
+ &devid);
+ if (!sysnp)
+ return -ENODEV;
+
+ versatile_flashprot = (uintptr_t)devid->data;
+ rmap = syscon_node_to_regmap(sysnp);
+ of_node_put(sysnp);
+ if (IS_ERR(rmap))
+ return PTR_ERR(rmap);
+
+ syscon_regmap = rmap;
+ }
+
+ switch (versatile_flashprot) {
+ case INTEGRATOR_AP_FLASHPROT:
+ ret = ap_flash_init(pdev);
+ if (ret)
+ return ret;
+ map->set_vpp = ap_flash_set_vpp;
+ dev_info(&pdev->dev, "Integrator/AP flash protection\n");
+ break;
+ case INTEGRATOR_CP_FLASHPROT:
+ map->set_vpp = cp_flash_set_vpp;
+ dev_info(&pdev->dev, "Integrator/CP flash protection\n");
+ break;
+ case VERSATILE_FLASHPROT:
+ case REALVIEW_FLASHPROT:
+ map->set_vpp = versatile_flash_set_vpp;
+ dev_info(&pdev->dev, "versatile/realview flash protection\n");
+ break;
+ default:
+ dev_info(&pdev->dev, "device marked as Versatile flash "
+ "but no system controller was found\n");
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/mtd/maps/physmap-versatile.h b/drivers/mtd/maps/physmap-versatile.h
new file mode 100644
index 0000000000..9cf39d031f
--- /dev/null
+++ b/drivers/mtd/maps/physmap-versatile.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/of.h>
+#include <linux/mtd/map.h>
+
+#ifdef CONFIG_MTD_PHYSMAP_VERSATILE
+int of_flash_probe_versatile(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map);
+#else
+static inline
+int of_flash_probe_versatile(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map)
+{
+ return 0;
+}
+#endif
diff --git a/drivers/mtd/maps/pismo.c b/drivers/mtd/maps/pismo.c
new file mode 100644
index 0000000000..ecf68922da
--- /dev/null
+++ b/drivers/mtd/maps/pismo.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PISMO memory driver - http://www.pismoworld.org/
+ *
+ * For ARM Realview and Versatile platforms
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/mtd/physmap.h>
+#include <linux/mtd/plat-ram.h>
+#include <linux/mtd/pismo.h>
+
+#define PISMO_NUM_CS 5
+
+struct pismo_cs_block {
+ u8 type;
+ u8 width;
+ __le16 access;
+ __le32 size;
+ u32 reserved[2];
+ char device[32];
+} __packed;
+
+struct pismo_eeprom {
+ struct pismo_cs_block cs[PISMO_NUM_CS];
+ char board[15];
+ u8 sum;
+} __packed;
+
+struct pismo_mem {
+ phys_addr_t base;
+ u32 size;
+ u16 access;
+ u8 width;
+ u8 type;
+};
+
+struct pismo_data {
+ struct i2c_client *client;
+ void (*vpp)(void *, int);
+ void *vpp_data;
+ struct platform_device *dev[PISMO_NUM_CS];
+};
+
+static void pismo_set_vpp(struct platform_device *pdev, int on)
+{
+ struct i2c_client *client = to_i2c_client(pdev->dev.parent);
+ struct pismo_data *pismo = i2c_get_clientdata(client);
+
+ pismo->vpp(pismo->vpp_data, on);
+}
+
+static unsigned int pismo_width_to_bytes(unsigned int width)
+{
+ width &= 15;
+ if (width > 2)
+ return 0;
+ return 1 << width;
+}
+
+static int pismo_eeprom_read(struct i2c_client *client, void *buf, u8 addr,
+ size_t size)
+{
+ int ret;
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .len = sizeof(addr),
+ .buf = &addr,
+ }, {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = size,
+ .buf = buf,
+ },
+ };
+
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+
+ return ret == ARRAY_SIZE(msg) ? size : -EIO;
+}
+
+static int pismo_add_device(struct pismo_data *pismo, int i,
+ struct pismo_mem *region, const char *name,
+ void *pdata, size_t psize)
+{
+ struct platform_device *dev;
+ struct resource res = { };
+ phys_addr_t base = region->base;
+ int ret;
+
+ if (base == ~0)
+ return -ENXIO;
+
+ res.start = base;
+ res.end = base + region->size - 1;
+ res.flags = IORESOURCE_MEM;
+
+ dev = platform_device_alloc(name, i);
+ if (!dev)
+ return -ENOMEM;
+ dev->dev.parent = &pismo->client->dev;
+
+ do {
+ ret = platform_device_add_resources(dev, &res, 1);
+ if (ret)
+ break;
+
+ ret = platform_device_add_data(dev, pdata, psize);
+ if (ret)
+ break;
+
+ ret = platform_device_add(dev);
+ if (ret)
+ break;
+
+ pismo->dev[i] = dev;
+ return 0;
+ } while (0);
+
+ platform_device_put(dev);
+ return ret;
+}
+
+static int pismo_add_nor(struct pismo_data *pismo, int i,
+ struct pismo_mem *region)
+{
+ struct physmap_flash_data data = {
+ .width = region->width,
+ };
+
+ if (pismo->vpp)
+ data.set_vpp = pismo_set_vpp;
+
+ return pismo_add_device(pismo, i, region, "physmap-flash",
+ &data, sizeof(data));
+}
+
+static int pismo_add_sram(struct pismo_data *pismo, int i,
+ struct pismo_mem *region)
+{
+ struct platdata_mtd_ram data = {
+ .bankwidth = region->width,
+ };
+
+ return pismo_add_device(pismo, i, region, "mtd-ram",
+ &data, sizeof(data));
+}
+
+static void pismo_add_one(struct pismo_data *pismo, int i,
+ const struct pismo_cs_block *cs, phys_addr_t base)
+{
+ struct device *dev = &pismo->client->dev;
+ struct pismo_mem region;
+
+ region.base = base;
+ region.type = cs->type;
+ region.width = pismo_width_to_bytes(cs->width);
+ region.access = le16_to_cpu(cs->access);
+ region.size = le32_to_cpu(cs->size);
+
+ if (region.width == 0) {
+ dev_err(dev, "cs%u: bad width: %02x, ignoring\n", i, cs->width);
+ return;
+ }
+
+ /*
+ * FIXME: may need to the platforms memory controller here, but at
+ * the moment we assume that it has already been correctly setup.
+ * The memory controller can also tell us the base address as well.
+ */
+
+ dev_info(dev, "cs%u: %.32s: type %02x access %u00ps size %uK\n",
+ i, cs->device, region.type, region.access, region.size / 1024);
+
+ switch (region.type) {
+ case 0:
+ break;
+ case 1:
+ /* static DOC */
+ break;
+ case 2:
+ /* static NOR */
+ pismo_add_nor(pismo, i, &region);
+ break;
+ case 3:
+ /* static RAM */
+ pismo_add_sram(pismo, i, &region);
+ break;
+ }
+}
+
+static void pismo_remove(struct i2c_client *client)
+{
+ struct pismo_data *pismo = i2c_get_clientdata(client);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pismo->dev); i++)
+ platform_device_unregister(pismo->dev[i]);
+
+ kfree(pismo);
+}
+
+static int pismo_probe(struct i2c_client *client)
+{
+ struct pismo_pdata *pdata = client->dev.platform_data;
+ struct pismo_eeprom eeprom;
+ struct pismo_data *pismo;
+ int ret, i;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "functionality mismatch\n");
+ return -EIO;
+ }
+
+ pismo = kzalloc(sizeof(*pismo), GFP_KERNEL);
+ if (!pismo)
+ return -ENOMEM;
+
+ pismo->client = client;
+ if (pdata) {
+ pismo->vpp = pdata->set_vpp;
+ pismo->vpp_data = pdata->vpp_data;
+ }
+ i2c_set_clientdata(client, pismo);
+
+ ret = pismo_eeprom_read(client, &eeprom, 0, sizeof(eeprom));
+ if (ret < 0) {
+ dev_err(&client->dev, "error reading EEPROM: %d\n", ret);
+ goto exit_free;
+ }
+
+ dev_info(&client->dev, "%.15s board found\n", eeprom.board);
+
+ for (i = 0; i < ARRAY_SIZE(eeprom.cs); i++)
+ if (eeprom.cs[i].type != 0xff)
+ pismo_add_one(pismo, i, &eeprom.cs[i],
+ pdata->cs_addrs[i]);
+
+ return 0;
+
+ exit_free:
+ kfree(pismo);
+ return ret;
+}
+
+static const struct i2c_device_id pismo_id[] = {
+ { "pismo" },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, pismo_id);
+
+static struct i2c_driver pismo_driver = {
+ .driver = {
+ .name = "pismo",
+ },
+ .probe = pismo_probe,
+ .remove = pismo_remove,
+ .id_table = pismo_id,
+};
+
+static int __init pismo_init(void)
+{
+ BUILD_BUG_ON(sizeof(struct pismo_cs_block) != 48);
+ BUILD_BUG_ON(sizeof(struct pismo_eeprom) != 256);
+
+ return i2c_add_driver(&pismo_driver);
+}
+module_init(pismo_init);
+
+static void __exit pismo_exit(void)
+{
+ i2c_del_driver(&pismo_driver);
+}
+module_exit(pismo_exit);
+
+MODULE_AUTHOR("Russell King <linux@arm.linux.org.uk>");
+MODULE_DESCRIPTION("PISMO memory driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
new file mode 100644
index 0000000000..4c921dce73
--- /dev/null
+++ b/drivers/mtd/maps/plat-ram.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* drivers/mtd/maps/plat-ram.c
+ *
+ * (c) 2004-2005 Simtec Electronics
+ * http://www.simtec.co.uk/products/SWLINUX/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * Generic platform device based RAM map
+*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/plat-ram.h>
+
+#include <asm/io.h>
+
+/* private structure for each mtd platform ram device created */
+
+struct platram_info {
+ struct device *dev;
+ struct mtd_info *mtd;
+ struct map_info map;
+ struct platdata_mtd_ram *pdata;
+};
+
+/* to_platram_info()
+ *
+ * device private data to struct platram_info conversion
+*/
+
+static inline struct platram_info *to_platram_info(struct platform_device *dev)
+{
+ return platform_get_drvdata(dev);
+}
+
+/* platram_setrw
+ *
+ * call the platform device's set rw/ro control
+ *
+ * to = 0 => read-only
+ * = 1 => read-write
+*/
+
+static inline void platram_setrw(struct platram_info *info, int to)
+{
+ if (info->pdata == NULL)
+ return;
+
+ if (info->pdata->set_rw != NULL)
+ (info->pdata->set_rw)(info->dev, to);
+}
+
+/* platram_remove
+ *
+ * called to remove the device from the driver's control
+*/
+
+static int platram_remove(struct platform_device *pdev)
+{
+ struct platram_info *info = to_platram_info(pdev);
+
+ dev_dbg(&pdev->dev, "removing device\n");
+
+ if (info == NULL)
+ return 0;
+
+ if (info->mtd) {
+ mtd_device_unregister(info->mtd);
+ map_destroy(info->mtd);
+ }
+
+ /* ensure ram is left read-only */
+
+ platram_setrw(info, PLATRAM_RO);
+
+ kfree(info);
+
+ return 0;
+}
+
+/* platram_probe
+ *
+ * called from device drive system when a device matching our
+ * driver is found.
+*/
+
+static int platram_probe(struct platform_device *pdev)
+{
+ struct platdata_mtd_ram *pdata;
+ struct platram_info *info;
+ struct resource *res;
+ int err = 0;
+
+ dev_dbg(&pdev->dev, "probe entered\n");
+
+ if (dev_get_platdata(&pdev->dev) == NULL) {
+ dev_err(&pdev->dev, "no platform data supplied\n");
+ err = -ENOENT;
+ goto exit_error;
+ }
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (info == NULL) {
+ err = -ENOMEM;
+ goto exit_error;
+ }
+
+ platform_set_drvdata(pdev, info);
+
+ info->dev = &pdev->dev;
+ info->pdata = pdata;
+
+ /* get the resource for the memory mapping */
+ info->map.virt = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(info->map.virt)) {
+ err = PTR_ERR(info->map.virt);
+ goto exit_free;
+ }
+
+ dev_dbg(&pdev->dev, "got platform resource %p (0x%llx)\n", res,
+ (unsigned long long)res->start);
+
+ /* setup map parameters */
+
+ info->map.phys = res->start;
+ info->map.size = resource_size(res);
+ info->map.name = pdata->mapname != NULL ?
+ (char *)pdata->mapname : (char *)pdev->name;
+ info->map.bankwidth = pdata->bankwidth;
+
+ dev_dbg(&pdev->dev, "virt %p, %lu bytes\n", info->map.virt, info->map.size);
+
+ simple_map_init(&info->map);
+
+ dev_dbg(&pdev->dev, "initialised map, probing for mtd\n");
+
+ /* probe for the right mtd map driver
+ * supplied by the platform_data struct */
+
+ if (pdata->map_probes) {
+ const char * const *map_probes = pdata->map_probes;
+
+ for ( ; !info->mtd && *map_probes; map_probes++)
+ info->mtd = do_map_probe(*map_probes , &info->map);
+ }
+ /* fallback to map_ram */
+ else
+ info->mtd = do_map_probe("map_ram", &info->map);
+
+ if (info->mtd == NULL) {
+ dev_err(&pdev->dev, "failed to probe for map_ram\n");
+ err = -ENOMEM;
+ goto exit_free;
+ }
+
+ info->mtd->dev.parent = &pdev->dev;
+
+ platram_setrw(info, PLATRAM_RW);
+
+ /* check to see if there are any available partitions, or whether
+ * to add this device whole */
+
+ err = mtd_device_parse_register(info->mtd, pdata->probes, NULL,
+ pdata->partitions,
+ pdata->nr_partitions);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register mtd device\n");
+ goto exit_free;
+ }
+
+ dev_info(&pdev->dev, "registered mtd device\n");
+
+ if (pdata->nr_partitions) {
+ /* add the whole device. */
+ err = mtd_device_register(info->mtd, NULL, 0);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to register the entire device\n");
+ goto exit_free;
+ }
+ }
+
+ return 0;
+
+ exit_free:
+ platram_remove(pdev);
+ exit_error:
+ return err;
+}
+
+/* device driver info */
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:mtd-ram");
+
+static struct platform_driver platram_driver = {
+ .probe = platram_probe,
+ .remove = platram_remove,
+ .driver = {
+ .name = "mtd-ram",
+ },
+};
+
+module_platform_driver(platram_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("MTD platform RAM map driver");
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
new file mode 100644
index 0000000000..62a5bf41a6
--- /dev/null
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Map driver for Intel XScale PXA2xx platforms.
+ *
+ * Author: Nicolas Pitre
+ * Copyright: (C) 2001 MontaVista Software Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+#include <asm/mach/flash.h>
+
+#define CACHELINESIZE 32
+
+static void pxa2xx_map_inval_cache(struct map_info *map, unsigned long from,
+ ssize_t len)
+{
+ unsigned long start = (unsigned long)map->cached + from;
+ unsigned long end = start + len;
+
+ start &= ~(CACHELINESIZE - 1);
+ while (start < end) {
+ /* invalidate D cache line */
+ asm volatile ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start));
+ start += CACHELINESIZE;
+ }
+}
+
+struct pxa2xx_flash_info {
+ struct mtd_info *mtd;
+ struct map_info map;
+};
+
+static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
+
+static int pxa2xx_flash_probe(struct platform_device *pdev)
+{
+ struct flash_platform_data *flash = dev_get_platdata(&pdev->dev);
+ struct pxa2xx_flash_info *info;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ info = kzalloc(sizeof(struct pxa2xx_flash_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->map.name = flash->name;
+ info->map.bankwidth = flash->width;
+ info->map.phys = res->start;
+ info->map.size = resource_size(res);
+
+ info->map.virt = ioremap(info->map.phys, info->map.size);
+ if (!info->map.virt) {
+ printk(KERN_WARNING "Failed to ioremap %s\n",
+ info->map.name);
+ kfree(info);
+ return -ENOMEM;
+ }
+ info->map.cached = ioremap_cache(info->map.phys, info->map.size);
+ if (!info->map.cached)
+ printk(KERN_WARNING "Failed to ioremap cached %s\n",
+ info->map.name);
+ info->map.inval_cache = pxa2xx_map_inval_cache;
+ simple_map_init(&info->map);
+
+ printk(KERN_NOTICE
+ "Probing %s at physical address 0x%08lx"
+ " (%d-bit bankwidth)\n",
+ info->map.name, (unsigned long)info->map.phys,
+ info->map.bankwidth * 8);
+
+ info->mtd = do_map_probe(flash->map_name, &info->map);
+
+ if (!info->mtd) {
+ iounmap((void *)info->map.virt);
+ if (info->map.cached)
+ iounmap(info->map.cached);
+ kfree(info);
+ return -EIO;
+ }
+ info->mtd->dev.parent = &pdev->dev;
+
+ mtd_device_parse_register(info->mtd, probes, NULL, flash->parts,
+ flash->nr_parts);
+
+ platform_set_drvdata(pdev, info);
+ return 0;
+}
+
+static int pxa2xx_flash_remove(struct platform_device *dev)
+{
+ struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
+
+ mtd_device_unregister(info->mtd);
+
+ map_destroy(info->mtd);
+ iounmap(info->map.virt);
+ if (info->map.cached)
+ iounmap(info->map.cached);
+ kfree(info);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static void pxa2xx_flash_shutdown(struct platform_device *dev)
+{
+ struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
+
+ if (info && mtd_suspend(info->mtd) == 0)
+ mtd_resume(info->mtd);
+}
+#else
+#define pxa2xx_flash_shutdown NULL
+#endif
+
+static struct platform_driver pxa2xx_flash_driver = {
+ .driver = {
+ .name = "pxa2xx-flash",
+ },
+ .probe = pxa2xx_flash_probe,
+ .remove = pxa2xx_flash_remove,
+ .shutdown = pxa2xx_flash_shutdown,
+};
+
+module_platform_driver(pxa2xx_flash_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net>");
+MODULE_DESCRIPTION("MTD map driver for Intel XScale PXA2xx");
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
new file mode 100644
index 0000000000..d3d4e987c1
--- /dev/null
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Flash memory access on SA11x0 based devices
+ *
+ * (C) 2000 Nicolas Pitre <nico@fluxnic.net>
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/concat.h>
+
+#include <mach/hardware.h>
+#include <linux/sizes.h>
+#include <asm/mach/flash.h>
+
+struct sa_subdev_info {
+ char name[16];
+ struct map_info map;
+ struct mtd_info *mtd;
+ struct flash_platform_data *plat;
+};
+
+struct sa_info {
+ struct mtd_info *mtd;
+ int num_subdev;
+ struct sa_subdev_info subdev[];
+};
+
+static DEFINE_SPINLOCK(sa1100_vpp_lock);
+static int sa1100_vpp_refcnt;
+static void sa1100_set_vpp(struct map_info *map, int on)
+{
+ struct sa_subdev_info *subdev = container_of(map, struct sa_subdev_info, map);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sa1100_vpp_lock, flags);
+ if (on) {
+ if (++sa1100_vpp_refcnt == 1) /* first nested 'on' */
+ subdev->plat->set_vpp(1);
+ } else {
+ if (--sa1100_vpp_refcnt == 0) /* last nested 'off' */
+ subdev->plat->set_vpp(0);
+ }
+ spin_unlock_irqrestore(&sa1100_vpp_lock, flags);
+}
+
+static void sa1100_destroy_subdev(struct sa_subdev_info *subdev)
+{
+ if (subdev->mtd)
+ map_destroy(subdev->mtd);
+ if (subdev->map.virt)
+ iounmap(subdev->map.virt);
+ release_mem_region(subdev->map.phys, subdev->map.size);
+}
+
+static int sa1100_probe_subdev(struct sa_subdev_info *subdev, struct resource *res)
+{
+ unsigned long phys;
+ unsigned int size;
+ int ret;
+
+ phys = res->start;
+ size = res->end - phys + 1;
+
+ /*
+ * Retrieve the bankwidth from the MSC registers.
+ * We currently only implement CS0 and CS1 here.
+ */
+ switch (phys) {
+ default:
+ printk(KERN_WARNING "SA1100 flash: unknown base address "
+ "0x%08lx, assuming CS0\n", phys);
+ fallthrough;
+ case SA1100_CS0_PHYS:
+ subdev->map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4;
+ break;
+
+ case SA1100_CS1_PHYS:
+ subdev->map.bankwidth = ((MSC0 >> 16) & MSC_RBW) ? 2 : 4;
+ break;
+ }
+
+ if (!request_mem_region(phys, size, subdev->name)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (subdev->plat->set_vpp)
+ subdev->map.set_vpp = sa1100_set_vpp;
+
+ subdev->map.phys = phys;
+ subdev->map.size = size;
+ subdev->map.virt = ioremap(phys, size);
+ if (!subdev->map.virt) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ simple_map_init(&subdev->map);
+
+ /*
+ * Now let's probe for the actual flash. Do it here since
+ * specific machine settings might have been set above.
+ */
+ subdev->mtd = do_map_probe(subdev->plat->map_name, &subdev->map);
+ if (subdev->mtd == NULL) {
+ ret = -ENXIO;
+ goto err;
+ }
+
+ printk(KERN_INFO "SA1100 flash: CFI device at 0x%08lx, %uMiB, %d-bit\n",
+ phys, (unsigned)(subdev->mtd->size >> 20),
+ subdev->map.bankwidth * 8);
+
+ return 0;
+
+ err:
+ sa1100_destroy_subdev(subdev);
+ out:
+ return ret;
+}
+
+static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *plat)
+{
+ int i;
+
+ if (info->mtd) {
+ mtd_device_unregister(info->mtd);
+ if (info->mtd != info->subdev[0].mtd)
+ mtd_concat_destroy(info->mtd);
+ }
+
+ for (i = info->num_subdev - 1; i >= 0; i--)
+ sa1100_destroy_subdev(&info->subdev[i]);
+ kfree(info);
+
+ if (plat->exit)
+ plat->exit();
+}
+
+static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
+ struct flash_platform_data *plat)
+{
+ struct sa_info *info;
+ int nr, size, i, ret = 0;
+
+ /*
+ * Count number of devices.
+ */
+ for (nr = 0; ; nr++)
+ if (!platform_get_resource(pdev, IORESOURCE_MEM, nr))
+ break;
+
+ if (nr == 0) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ size = sizeof(struct sa_info) + sizeof(struct sa_subdev_info) * nr;
+
+ /*
+ * Allocate the map_info structs in one go.
+ */
+ info = kzalloc(size, GFP_KERNEL);
+ if (!info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (plat->init) {
+ ret = plat->init();
+ if (ret)
+ goto err;
+ }
+
+ /*
+ * Claim and then map the memory regions.
+ */
+ for (i = 0; i < nr; i++) {
+ struct sa_subdev_info *subdev = &info->subdev[i];
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (!res)
+ break;
+
+ subdev->map.name = subdev->name;
+ sprintf(subdev->name, "%s-%d", plat->name, i);
+ subdev->plat = plat;
+
+ ret = sa1100_probe_subdev(subdev, res);
+ if (ret)
+ break;
+ }
+
+ info->num_subdev = i;
+
+ /*
+ * ENXIO is special. It means we didn't find a chip when we probed.
+ */
+ if (ret != 0 && !(ret == -ENXIO && info->num_subdev > 0))
+ goto err;
+
+ /*
+ * If we found one device, don't bother with concat support. If
+ * we found multiple devices, use concat if we have it available,
+ * otherwise fail. Either way, it'll be called "sa1100".
+ */
+ if (info->num_subdev == 1) {
+ strcpy(info->subdev[0].name, plat->name);
+ info->mtd = info->subdev[0].mtd;
+ ret = 0;
+ } else if (info->num_subdev > 1) {
+ struct mtd_info **cdev;
+
+ cdev = kmalloc_array(nr, sizeof(*cdev), GFP_KERNEL);
+ if (!cdev) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /*
+ * We detected multiple devices. Concatenate them together.
+ */
+ for (i = 0; i < info->num_subdev; i++)
+ cdev[i] = info->subdev[i].mtd;
+
+ info->mtd = mtd_concat_create(cdev, info->num_subdev,
+ plat->name);
+ kfree(cdev);
+ if (info->mtd == NULL) {
+ ret = -ENXIO;
+ goto err;
+ }
+ }
+ info->mtd->dev.parent = &pdev->dev;
+
+ if (ret == 0)
+ return info;
+
+ err:
+ sa1100_destroy(info, plat);
+ out:
+ return ERR_PTR(ret);
+}
+
+static const char * const part_probes[] = { "cmdlinepart", "RedBoot", NULL };
+
+static int sa1100_mtd_probe(struct platform_device *pdev)
+{
+ struct flash_platform_data *plat = dev_get_platdata(&pdev->dev);
+ struct sa_info *info;
+ int err;
+
+ if (!plat)
+ return -ENODEV;
+
+ info = sa1100_setup_mtd(pdev, plat);
+ if (IS_ERR(info)) {
+ err = PTR_ERR(info);
+ goto out;
+ }
+
+ /*
+ * Partition selection stuff.
+ */
+ mtd_device_parse_register(info->mtd, part_probes, NULL, plat->parts,
+ plat->nr_parts);
+
+ platform_set_drvdata(pdev, info);
+ err = 0;
+
+ out:
+ return err;
+}
+
+static int sa1100_mtd_remove(struct platform_device *pdev)
+{
+ struct sa_info *info = platform_get_drvdata(pdev);
+ struct flash_platform_data *plat = dev_get_platdata(&pdev->dev);
+
+ sa1100_destroy(info, plat);
+
+ return 0;
+}
+
+static struct platform_driver sa1100_mtd_driver = {
+ .probe = sa1100_mtd_probe,
+ .remove = sa1100_mtd_remove,
+ .driver = {
+ .name = "sa1100-mtd",
+ },
+};
+
+module_platform_driver(sa1100_mtd_driver);
+
+MODULE_AUTHOR("Nicolas Pitre");
+MODULE_DESCRIPTION("SA1100 CFI map driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:sa1100-mtd");
diff --git a/drivers/mtd/maps/sbc_gxx.c b/drivers/mtd/maps/sbc_gxx.c
new file mode 100644
index 0000000000..9d48a261c2
--- /dev/null
+++ b/drivers/mtd/maps/sbc_gxx.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* sbc_gxx.c -- MTD map driver for Arcom Control Systems SBC-MediaGX,
+ SBC-GXm and SBC-GX1 series boards.
+
+ Copyright (C) 2001 Arcom Control System Ltd
+
+
+The SBC-MediaGX / SBC-GXx has up to 16 MiB of
+Intel StrataFlash (28F320/28F640) in x8 mode.
+
+This driver uses the CFI probe and Intel Extended Command Set drivers.
+
+The flash is accessed as follows:
+
+ 16 KiB memory window at 0xdc000-0xdffff
+
+ Two IO address locations for paging
+
+ 0x258
+ bit 0-7: address bit 14-21
+ 0x259
+ bit 0-1: address bit 22-23
+ bit 7: 0 - reset/powered down
+ 1 - device enabled
+
+The single flash device is divided into 3 partition which appear as
+separate MTD devices.
+
+25/04/2001 AJL (Arcom) Modified signon strings and partition sizes
+ (to support bzImages up to 638KiB-ish)
+*/
+
+// Includes
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <asm/io.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+// Defines
+
+// - Hardware specific
+
+#define WINDOW_START 0xdc000
+
+/* Number of bits in offset. */
+#define WINDOW_SHIFT 14
+#define WINDOW_LENGTH (1 << WINDOW_SHIFT)
+
+/* The bits for the offset into the window. */
+#define WINDOW_MASK (WINDOW_LENGTH-1)
+#define PAGE_IO 0x258
+#define PAGE_IO_SIZE 2
+
+/* bit 7 of 0x259 must be 1 to enable device. */
+#define DEVICE_ENABLE 0x8000
+
+// - Flash / Partition sizing
+
+#define MAX_SIZE_KiB 16384
+#define BOOT_PARTITION_SIZE_KiB 768
+#define DATA_PARTITION_SIZE_KiB 1280
+#define APP_PARTITION_SIZE_KiB 6144
+
+// Globals
+
+static volatile int page_in_window = -1; // Current page in window.
+static void __iomem *iomapadr;
+static DEFINE_SPINLOCK(sbc_gxx_spin);
+
+/* partition_info gives details on the logical partitions that the split the
+ * single flash device into. If the size if zero we use up to the end of the
+ * device. */
+static const struct mtd_partition partition_info[] = {
+ { .name = "SBC-GXx flash boot partition",
+ .offset = 0,
+ .size = BOOT_PARTITION_SIZE_KiB*1024 },
+ { .name = "SBC-GXx flash data partition",
+ .offset = BOOT_PARTITION_SIZE_KiB*1024,
+ .size = (DATA_PARTITION_SIZE_KiB)*1024 },
+ { .name = "SBC-GXx flash application partition",
+ .offset = (BOOT_PARTITION_SIZE_KiB+DATA_PARTITION_SIZE_KiB)*1024 }
+};
+
+#define NUM_PARTITIONS 3
+
+static inline void sbc_gxx_page(struct map_info *map, unsigned long ofs)
+{
+ unsigned long page = ofs >> WINDOW_SHIFT;
+
+ if( page!=page_in_window ) {
+ outw( page | DEVICE_ENABLE, PAGE_IO );
+ page_in_window = page;
+ }
+}
+
+
+static map_word sbc_gxx_read8(struct map_info *map, unsigned long ofs)
+{
+ map_word ret;
+ spin_lock(&sbc_gxx_spin);
+ sbc_gxx_page(map, ofs);
+ ret.x[0] = readb(iomapadr + (ofs & WINDOW_MASK));
+ spin_unlock(&sbc_gxx_spin);
+ return ret;
+}
+
+static void sbc_gxx_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ while(len) {
+ unsigned long thislen = len;
+ if (len > (WINDOW_LENGTH - (from & WINDOW_MASK)))
+ thislen = WINDOW_LENGTH-(from & WINDOW_MASK);
+
+ spin_lock(&sbc_gxx_spin);
+ sbc_gxx_page(map, from);
+ memcpy_fromio(to, iomapadr + (from & WINDOW_MASK), thislen);
+ spin_unlock(&sbc_gxx_spin);
+ to += thislen;
+ from += thislen;
+ len -= thislen;
+ }
+}
+
+static void sbc_gxx_write8(struct map_info *map, map_word d, unsigned long adr)
+{
+ spin_lock(&sbc_gxx_spin);
+ sbc_gxx_page(map, adr);
+ writeb(d.x[0], iomapadr + (adr & WINDOW_MASK));
+ spin_unlock(&sbc_gxx_spin);
+}
+
+static void sbc_gxx_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ while(len) {
+ unsigned long thislen = len;
+ if (len > (WINDOW_LENGTH - (to & WINDOW_MASK)))
+ thislen = WINDOW_LENGTH-(to & WINDOW_MASK);
+
+ spin_lock(&sbc_gxx_spin);
+ sbc_gxx_page(map, to);
+ memcpy_toio(iomapadr + (to & WINDOW_MASK), from, thislen);
+ spin_unlock(&sbc_gxx_spin);
+ to += thislen;
+ from += thislen;
+ len -= thislen;
+ }
+}
+
+static struct map_info sbc_gxx_map = {
+ .name = "SBC-GXx flash",
+ .phys = NO_XIP,
+ .size = MAX_SIZE_KiB*1024, /* this must be set to a maximum possible amount
+ of flash so the cfi probe routines find all
+ the chips */
+ .bankwidth = 1,
+ .read = sbc_gxx_read8,
+ .copy_from = sbc_gxx_copy_from,
+ .write = sbc_gxx_write8,
+ .copy_to = sbc_gxx_copy_to
+};
+
+/* MTD device for all of the flash. */
+static struct mtd_info *all_mtd;
+
+static void cleanup_sbc_gxx(void)
+{
+ if( all_mtd ) {
+ mtd_device_unregister(all_mtd);
+ map_destroy( all_mtd );
+ }
+
+ iounmap(iomapadr);
+ release_region(PAGE_IO,PAGE_IO_SIZE);
+}
+
+static int __init init_sbc_gxx(void)
+{
+ iomapadr = ioremap(WINDOW_START, WINDOW_LENGTH);
+ if (!iomapadr) {
+ printk( KERN_ERR"%s: failed to ioremap memory region\n",
+ sbc_gxx_map.name );
+ return -EIO;
+ }
+
+ if (!request_region( PAGE_IO, PAGE_IO_SIZE, "SBC-GXx flash")) {
+ printk( KERN_ERR"%s: IO ports 0x%x-0x%x in use\n",
+ sbc_gxx_map.name,
+ PAGE_IO, PAGE_IO+PAGE_IO_SIZE-1 );
+ iounmap(iomapadr);
+ return -EAGAIN;
+ }
+
+
+ printk( KERN_INFO"%s: IO:0x%x-0x%x MEM:0x%x-0x%x\n",
+ sbc_gxx_map.name,
+ PAGE_IO, PAGE_IO+PAGE_IO_SIZE-1,
+ WINDOW_START, WINDOW_START+WINDOW_LENGTH-1 );
+
+ /* Probe for chip. */
+ all_mtd = do_map_probe( "cfi_probe", &sbc_gxx_map );
+ if( !all_mtd ) {
+ cleanup_sbc_gxx();
+ return -ENXIO;
+ }
+
+ all_mtd->owner = THIS_MODULE;
+
+ /* Create MTD devices for each partition. */
+ mtd_device_register(all_mtd, partition_info, NUM_PARTITIONS);
+
+ return 0;
+}
+
+module_init(init_sbc_gxx);
+module_exit(cleanup_sbc_gxx);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arcom Control Systems Ltd.");
+MODULE_DESCRIPTION("MTD map driver for SBC-GXm and SBC-GX1 series boards");
diff --git a/drivers/mtd/maps/sc520cdp.c b/drivers/mtd/maps/sc520cdp.c
new file mode 100644
index 0000000000..8ef7aec634
--- /dev/null
+++ b/drivers/mtd/maps/sc520cdp.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* sc520cdp.c -- MTD map driver for AMD SC520 Customer Development Platform
+ *
+ * Copyright (C) 2001 Sysgo Real-Time Solutions GmbH
+ *
+ * The SC520CDP is an evaluation board for the Elan SC520 processor available
+ * from AMD. It has two banks of 32-bit Flash ROM, each 8 Megabytes in size,
+ * and up to 512 KiB of 8-bit DIL Flash ROM.
+ * For details see https://www.amd.com/products/epd/desiging/evalboards/18.elansc520/520_cdp_brief/index.html
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/concat.h>
+
+/*
+** The Embedded Systems BIOS decodes the first FLASH starting at
+** 0x8400000. This is a *terrible* place for it because accessing
+** the flash at this location causes the A22 address line to be high
+** (that's what 0x8400000 binary's ought to be). But this is the highest
+** order address line on the raw flash devices themselves!!
+** This causes the top HALF of the flash to be accessed first. Beyond
+** the physical limits of the flash, the flash chip aliases over (to
+** 0x880000 which causes the bottom half to be accessed. This splits the
+** flash into two and inverts it! If you then try to access this from another
+** program that does NOT do this insanity, then you *will* access the
+** first half of the flash, but not find what you expect there. That
+** stuff is in the *second* half! Similarly, the address used by the
+** BIOS for the second FLASH bank is also quite a bad choice.
+** If REPROGRAM_PAR is defined below (the default), then this driver will
+** choose more useful addresses for the FLASH banks by reprogramming the
+** responsible PARxx registers in the SC520's MMCR region. This will
+** cause the settings to be incompatible with the BIOS's settings, which
+** shouldn't be a problem since you are running Linux, (i.e. the BIOS is
+** not much use anyway). However, if you need to be compatible with
+** the BIOS for some reason, just undefine REPROGRAM_PAR.
+*/
+#define REPROGRAM_PAR
+
+
+
+#ifdef REPROGRAM_PAR
+
+/* These are the addresses we want.. */
+#define WINDOW_ADDR_0 0x08800000
+#define WINDOW_ADDR_1 0x09000000
+#define WINDOW_ADDR_2 0x09800000
+
+/* .. and these are the addresses the BIOS gives us */
+#define WINDOW_ADDR_0_BIOS 0x08400000
+#define WINDOW_ADDR_1_BIOS 0x08c00000
+#define WINDOW_ADDR_2_BIOS 0x09400000
+
+#else
+
+#define WINDOW_ADDR_0 0x08400000
+#define WINDOW_ADDR_1 0x08C00000
+#define WINDOW_ADDR_2 0x09400000
+
+#endif
+
+#define WINDOW_SIZE_0 0x00800000
+#define WINDOW_SIZE_1 0x00800000
+#define WINDOW_SIZE_2 0x00080000
+
+
+static struct map_info sc520cdp_map[] = {
+ {
+ .name = "SC520CDP Flash Bank #0",
+ .size = WINDOW_SIZE_0,
+ .bankwidth = 4,
+ .phys = WINDOW_ADDR_0
+ },
+ {
+ .name = "SC520CDP Flash Bank #1",
+ .size = WINDOW_SIZE_1,
+ .bankwidth = 4,
+ .phys = WINDOW_ADDR_1
+ },
+ {
+ .name = "SC520CDP DIL Flash",
+ .size = WINDOW_SIZE_2,
+ .bankwidth = 1,
+ .phys = WINDOW_ADDR_2
+ },
+};
+
+#define NUM_FLASH_BANKS ARRAY_SIZE(sc520cdp_map)
+
+static struct mtd_info *mymtd[NUM_FLASH_BANKS];
+static struct mtd_info *merged_mtd;
+
+#ifdef REPROGRAM_PAR
+
+/*
+** The SC520 MMCR (memory mapped control register) region resides
+** at 0xFFFEF000. The 16 Programmable Address Region (PAR) registers
+** are at offset 0x88 in the MMCR:
+*/
+#define SC520_MMCR_BASE 0xFFFEF000
+#define SC520_MMCR_EXTENT 0x1000
+#define SC520_PAR(x) ((0x88/sizeof(unsigned long)) + (x))
+#define NUM_SC520_PAR 16 /* total number of PAR registers */
+
+/*
+** The highest three bits in a PAR register determine what target
+** device is controlled by this PAR. Here, only ROMCS? and BOOTCS
+** devices are of interest.
+*/
+#define SC520_PAR_BOOTCS (0x4<<29)
+#define SC520_PAR_ROMCS0 (0x5<<29)
+#define SC520_PAR_ROMCS1 (0x6<<29)
+#define SC520_PAR_TRGDEV (0x7<<29)
+
+/*
+** Bits 28 thru 26 determine some attributes for the
+** region controlled by the PAR. (We only use non-cacheable)
+*/
+#define SC520_PAR_WRPROT (1<<26) /* write protected */
+#define SC520_PAR_NOCACHE (1<<27) /* non-cacheable */
+#define SC520_PAR_NOEXEC (1<<28) /* code execution denied */
+
+
+/*
+** Bit 25 determines the granularity: 4K or 64K
+*/
+#define SC520_PAR_PG_SIZ4 (0<<25)
+#define SC520_PAR_PG_SIZ64 (1<<25)
+
+/*
+** Build a value to be written into a PAR register.
+** We only need ROM entries, 64K page size:
+*/
+#define SC520_PAR_ENTRY(trgdev, address, size) \
+ ((trgdev) | SC520_PAR_NOCACHE | SC520_PAR_PG_SIZ64 | \
+ (address) >> 16 | (((size) >> 16) - 1) << 14)
+
+struct sc520_par_table
+{
+ unsigned long trgdev;
+ unsigned long new_par;
+ unsigned long default_address;
+};
+
+static const struct sc520_par_table par_table[NUM_FLASH_BANKS] =
+{
+ { /* Flash Bank #0: selected by ROMCS0 */
+ SC520_PAR_ROMCS0,
+ SC520_PAR_ENTRY(SC520_PAR_ROMCS0, WINDOW_ADDR_0, WINDOW_SIZE_0),
+ WINDOW_ADDR_0_BIOS
+ },
+ { /* Flash Bank #1: selected by ROMCS1 */
+ SC520_PAR_ROMCS1,
+ SC520_PAR_ENTRY(SC520_PAR_ROMCS1, WINDOW_ADDR_1, WINDOW_SIZE_1),
+ WINDOW_ADDR_1_BIOS
+ },
+ { /* DIL (BIOS) Flash: selected by BOOTCS */
+ SC520_PAR_BOOTCS,
+ SC520_PAR_ENTRY(SC520_PAR_BOOTCS, WINDOW_ADDR_2, WINDOW_SIZE_2),
+ WINDOW_ADDR_2_BIOS
+ }
+};
+
+
+static void sc520cdp_setup_par(void)
+{
+ unsigned long __iomem *mmcr;
+ unsigned long mmcr_val;
+ int i, j;
+
+ /* map in SC520's MMCR area */
+ mmcr = ioremap(SC520_MMCR_BASE, SC520_MMCR_EXTENT);
+ if(!mmcr) { /* ioremap failed: skip the PAR reprogramming */
+ /* force physical address fields to BIOS defaults: */
+ for(i = 0; i < NUM_FLASH_BANKS; i++)
+ sc520cdp_map[i].phys = par_table[i].default_address;
+ return;
+ }
+
+ /*
+ ** Find the PARxx registers that are responsible for activating
+ ** ROMCS0, ROMCS1 and BOOTCS. Reprogram each of these with a
+ ** new value from the table.
+ */
+ for(i = 0; i < NUM_FLASH_BANKS; i++) { /* for each par_table entry */
+ for(j = 0; j < NUM_SC520_PAR; j++) { /* for each PAR register */
+ mmcr_val = readl(&mmcr[SC520_PAR(j)]);
+ /* if target device field matches, reprogram the PAR */
+ if((mmcr_val & SC520_PAR_TRGDEV) == par_table[i].trgdev)
+ {
+ writel(par_table[i].new_par, &mmcr[SC520_PAR(j)]);
+ break;
+ }
+ }
+ if(j == NUM_SC520_PAR)
+ { /* no matching PAR found: try default BIOS address */
+ printk(KERN_NOTICE "Could not find PAR responsible for %s\n",
+ sc520cdp_map[i].name);
+ printk(KERN_NOTICE "Trying default address 0x%lx\n",
+ par_table[i].default_address);
+ sc520cdp_map[i].phys = par_table[i].default_address;
+ }
+ }
+ iounmap(mmcr);
+}
+#endif
+
+
+static int __init init_sc520cdp(void)
+{
+ int i, j, devices_found = 0;
+
+#ifdef REPROGRAM_PAR
+ /* reprogram PAR registers so flash appears at the desired addresses */
+ sc520cdp_setup_par();
+#endif
+
+ for (i = 0; i < NUM_FLASH_BANKS; i++) {
+ printk(KERN_NOTICE "SC520 CDP flash device: 0x%Lx at 0x%Lx\n",
+ (unsigned long long)sc520cdp_map[i].size,
+ (unsigned long long)sc520cdp_map[i].phys);
+
+ sc520cdp_map[i].virt = ioremap(sc520cdp_map[i].phys, sc520cdp_map[i].size);
+
+ if (!sc520cdp_map[i].virt) {
+ printk("Failed to ioremap\n");
+ for (j = 0; j < i; j++) {
+ if (mymtd[j]) {
+ map_destroy(mymtd[j]);
+ iounmap(sc520cdp_map[j].virt);
+ }
+ }
+ return -EIO;
+ }
+
+ simple_map_init(&sc520cdp_map[i]);
+
+ mymtd[i] = do_map_probe("cfi_probe", &sc520cdp_map[i]);
+ if(!mymtd[i])
+ mymtd[i] = do_map_probe("jedec_probe", &sc520cdp_map[i]);
+ if(!mymtd[i])
+ mymtd[i] = do_map_probe("map_rom", &sc520cdp_map[i]);
+
+ if (mymtd[i]) {
+ mymtd[i]->owner = THIS_MODULE;
+ ++devices_found;
+ }
+ else {
+ iounmap(sc520cdp_map[i].virt);
+ }
+ }
+ if(devices_found >= 2) {
+ /* Combine the two flash banks into a single MTD device & register it: */
+ merged_mtd = mtd_concat_create(mymtd, 2, "SC520CDP Flash Banks #0 and #1");
+ if(merged_mtd)
+ mtd_device_register(merged_mtd, NULL, 0);
+ }
+ if(devices_found == 3) /* register the third (DIL-Flash) device */
+ mtd_device_register(mymtd[2], NULL, 0);
+ return(devices_found ? 0 : -ENXIO);
+}
+
+static void __exit cleanup_sc520cdp(void)
+{
+ int i;
+
+ if (merged_mtd) {
+ mtd_device_unregister(merged_mtd);
+ mtd_concat_destroy(merged_mtd);
+ }
+ if (mymtd[2])
+ mtd_device_unregister(mymtd[2]);
+
+ for (i = 0; i < NUM_FLASH_BANKS; i++) {
+ if (mymtd[i])
+ map_destroy(mymtd[i]);
+ if (sc520cdp_map[i].virt) {
+ iounmap(sc520cdp_map[i].virt);
+ sc520cdp_map[i].virt = NULL;
+ }
+ }
+}
+
+module_init(init_sc520cdp);
+module_exit(cleanup_sc520cdp);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sysgo Real-Time Solutions GmbH");
+MODULE_DESCRIPTION("MTD map driver for AMD SC520 Customer Development Platform");
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
new file mode 100644
index 0000000000..57303f904b
--- /dev/null
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MTD map driver for BIOS Flash on Intel SCB2 boards
+ * Copyright (C) 2002 Sun Microsystems, Inc.
+ * Tim Hockin <thockin@sun.com>
+ *
+ * A few notes on this MTD map:
+ *
+ * This was developed with a small number of SCB2 boards to test on.
+ * Hopefully, Intel has not introducted too many unaccounted variables in the
+ * making of this board.
+ *
+ * The BIOS marks its own memory region as 'reserved' in the e820 map. We
+ * try to request it here, but if it fails, we carry on anyway.
+ *
+ * This is how the chip is attached, so said the schematic:
+ * * a 4 MiB (32 Mib) 16 bit chip
+ * * a 1 MiB memory region
+ * * A20 and A21 pulled up
+ * * D8-D15 ignored
+ * What this means is that, while we are addressing bytes linearly, we are
+ * really addressing words, and discarding the other byte. This means that
+ * the chip MUST BE at least 2 MiB. This also means that every block is
+ * actually half as big as the chip reports. It also means that accesses of
+ * logical address 0 hit higher-address sections of the chip, not physical 0.
+ * One can only hope that these 4MiB x16 chips were a lot cheaper than 1MiB x8
+ * chips.
+ *
+ * This driver assumes the chip is not write-protected by an external signal.
+ * As of the this writing, that is true, but may change, just to spite me.
+ *
+ * The actual BIOS layout has been mostly reverse engineered. Intel BIOS
+ * updates for this board include 10 related (*.bio - &.bi9) binary files and
+ * another separate (*.bbo) binary file. The 10 files are 64k of data + a
+ * small header. If the headers are stripped off, the 10 64k files can be
+ * concatenated into a 640k image. This is your BIOS image, proper. The
+ * separate .bbo file also has a small header. It is the 'Boot Block'
+ * recovery BIOS. Once the header is stripped, no further prep is needed.
+ * As best I can tell, the BIOS is arranged as such:
+ * offset 0x00000 to 0x4ffff (320k): unknown - SCSI BIOS, etc?
+ * offset 0x50000 to 0xeffff (640k): BIOS proper
+ * offset 0xf0000 ty 0xfffff (64k): Boot Block region
+ *
+ * Intel's BIOS update program flashes the BIOS and Boot Block in separate
+ * steps. Probably a wise thing to do.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#define MODNAME "scb2_flash"
+#define SCB2_ADDR 0xfff00000
+#define SCB2_WINDOW 0x00100000
+
+
+static void __iomem *scb2_ioaddr;
+static struct mtd_info *scb2_mtd;
+static struct map_info scb2_map = {
+ .name = "SCB2 BIOS Flash",
+ .size = 0,
+ .bankwidth = 1,
+};
+static int region_fail;
+
+static int scb2_fixup_mtd(struct mtd_info *mtd)
+{
+ int i;
+ int done = 0;
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ /* barf if this doesn't look right */
+ if (cfi->cfiq->InterfaceDesc != CFI_INTERFACE_X16_ASYNC) {
+ printk(KERN_ERR MODNAME ": unsupported InterfaceDesc: %#x\n",
+ cfi->cfiq->InterfaceDesc);
+ return -1;
+ }
+
+ /* I wasn't here. I didn't see. dwmw2. */
+
+ /* the chip is sometimes bigger than the map - what a waste */
+ mtd->size = map->size;
+
+ /*
+ * We only REALLY get half the chip, due to the way it is
+ * wired up - D8-D15 are tossed away. We read linear bytes,
+ * but in reality we are getting 1/2 of each 16-bit read,
+ * which LOOKS linear to us. Because CFI code accounts for
+ * things like lock/unlock/erase by eraseregions, we need to
+ * fudge them to reflect this. Erases go like this:
+ * * send an erase to an address
+ * * the chip samples the address and erases the block
+ * * add the block erasesize to the address and repeat
+ * -- the problem is that addresses are 16-bit addressable
+ * -- we end up erasing every-other block
+ */
+ mtd->erasesize /= 2;
+ for (i = 0; i < mtd->numeraseregions; i++) {
+ struct mtd_erase_region_info *region = &mtd->eraseregions[i];
+ region->erasesize /= 2;
+ }
+
+ /*
+ * If the chip is bigger than the map, it is wired with the high
+ * address lines pulled up. This makes us access the top portion of
+ * the chip, so all our erase-region info is wrong. Start cutting from
+ * the bottom.
+ */
+ for (i = 0; !done && i < mtd->numeraseregions; i++) {
+ struct mtd_erase_region_info *region = &mtd->eraseregions[i];
+
+ if (region->numblocks * region->erasesize > mtd->size) {
+ region->numblocks = ((unsigned long)mtd->size /
+ region->erasesize);
+ done = 1;
+ } else {
+ region->numblocks = 0;
+ }
+ region->offset = 0;
+ }
+
+ return 0;
+}
+
+/* CSB5's 'Function Control Register' has bits for decoding @ >= 0xffc00000 */
+#define CSB5_FCR 0x41
+#define CSB5_FCR_DECODE_ALL 0x0e
+static int scb2_flash_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ u8 reg;
+
+ /* enable decoding of the flash region in the south bridge */
+ pci_read_config_byte(dev, CSB5_FCR, &reg);
+ pci_write_config_byte(dev, CSB5_FCR, reg | CSB5_FCR_DECODE_ALL);
+
+ if (!request_mem_region(SCB2_ADDR, SCB2_WINDOW, scb2_map.name)) {
+ /*
+ * The BIOS seems to mark the flash region as 'reserved'
+ * in the e820 map. Warn and go about our business.
+ */
+ printk(KERN_WARNING MODNAME
+ ": warning - can't reserve rom window, continuing\n");
+ region_fail = 1;
+ }
+
+ /* remap the IO window (w/o caching) */
+ scb2_ioaddr = ioremap(SCB2_ADDR, SCB2_WINDOW);
+ if (!scb2_ioaddr) {
+ printk(KERN_ERR MODNAME ": Failed to ioremap window!\n");
+ if (!region_fail)
+ release_mem_region(SCB2_ADDR, SCB2_WINDOW);
+ return -ENOMEM;
+ }
+
+ scb2_map.phys = SCB2_ADDR;
+ scb2_map.virt = scb2_ioaddr;
+ scb2_map.size = SCB2_WINDOW;
+
+ simple_map_init(&scb2_map);
+
+ /* try to find a chip */
+ scb2_mtd = do_map_probe("cfi_probe", &scb2_map);
+
+ if (!scb2_mtd) {
+ printk(KERN_ERR MODNAME ": flash probe failed!\n");
+ iounmap(scb2_ioaddr);
+ if (!region_fail)
+ release_mem_region(SCB2_ADDR, SCB2_WINDOW);
+ return -ENODEV;
+ }
+
+ scb2_mtd->owner = THIS_MODULE;
+ if (scb2_fixup_mtd(scb2_mtd) < 0) {
+ mtd_device_unregister(scb2_mtd);
+ map_destroy(scb2_mtd);
+ iounmap(scb2_ioaddr);
+ if (!region_fail)
+ release_mem_region(SCB2_ADDR, SCB2_WINDOW);
+ return -ENODEV;
+ }
+
+ printk(KERN_NOTICE MODNAME ": chip size 0x%llx at offset 0x%llx\n",
+ (unsigned long long)scb2_mtd->size,
+ (unsigned long long)(SCB2_WINDOW - scb2_mtd->size));
+
+ mtd_device_register(scb2_mtd, NULL, 0);
+
+ return 0;
+}
+
+static void scb2_flash_remove(struct pci_dev *dev)
+{
+ if (!scb2_mtd)
+ return;
+
+ /* disable flash writes */
+ mtd_lock(scb2_mtd, 0, scb2_mtd->size);
+
+ mtd_device_unregister(scb2_mtd);
+ map_destroy(scb2_mtd);
+
+ iounmap(scb2_ioaddr);
+ scb2_ioaddr = NULL;
+
+ if (!region_fail)
+ release_mem_region(SCB2_ADDR, SCB2_WINDOW);
+}
+
+static struct pci_device_id scb2_flash_pci_ids[] = {
+ {
+ .vendor = PCI_VENDOR_ID_SERVERWORKS,
+ .device = PCI_DEVICE_ID_SERVERWORKS_CSB5,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID
+ },
+ { 0, }
+};
+
+static struct pci_driver scb2_flash_driver = {
+ .name = "Intel SCB2 BIOS Flash",
+ .id_table = scb2_flash_pci_ids,
+ .probe = scb2_flash_probe,
+ .remove = scb2_flash_remove,
+};
+
+module_pci_driver(scb2_flash_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tim Hockin <thockin@sun.com>");
+MODULE_DESCRIPTION("MTD map driver for Intel SCB2 BIOS Flash");
+MODULE_DEVICE_TABLE(pci, scb2_flash_pci_ids);
diff --git a/drivers/mtd/maps/scx200_docflash.c b/drivers/mtd/maps/scx200_docflash.c
new file mode 100644
index 0000000000..8462662b71
--- /dev/null
+++ b/drivers/mtd/maps/scx200_docflash.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* linux/drivers/mtd/maps/scx200_docflash.c
+
+ Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>
+
+ National Semiconductor SCx200 flash mapped with DOCCS
+*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/pci.h>
+#include <linux/scx200.h>
+
+#define NAME "scx200_docflash"
+
+MODULE_AUTHOR("Christer Weinigel <wingel@hack.org>");
+MODULE_DESCRIPTION("NatSemi SCx200 DOCCS Flash Driver");
+MODULE_LICENSE("GPL");
+
+static int probe = 0; /* Don't autoprobe */
+static unsigned size = 0x1000000; /* 16 MiB the whole ISA address space */
+static unsigned width = 8; /* Default to 8 bits wide */
+static char *flashtype = "cfi_probe";
+
+module_param(probe, int, 0);
+MODULE_PARM_DESC(probe, "Probe for a BIOS mapping");
+module_param(size, int, 0);
+MODULE_PARM_DESC(size, "Size of the flash mapping");
+module_param(width, int, 0);
+MODULE_PARM_DESC(width, "Data width of the flash mapping (8/16)");
+module_param(flashtype, charp, 0);
+MODULE_PARM_DESC(flashtype, "Type of MTD probe to do");
+
+static struct resource docmem = {
+ .flags = IORESOURCE_MEM,
+ .name = "NatSemi SCx200 DOCCS Flash",
+};
+
+static struct mtd_info *mymtd;
+
+static struct mtd_partition partition_info[] = {
+ {
+ .name = "DOCCS Boot kernel",
+ .offset = 0,
+ .size = 0xc0000
+ },
+ {
+ .name = "DOCCS Low BIOS",
+ .offset = 0xc0000,
+ .size = 0x40000
+ },
+ {
+ .name = "DOCCS File system",
+ .offset = 0x100000,
+ .size = ~0 /* calculate from flash size */
+ },
+ {
+ .name = "DOCCS High BIOS",
+ .offset = ~0, /* calculate from flash size */
+ .size = 0x80000
+ },
+};
+#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
+
+static struct map_info scx200_docflash_map = {
+ .name = "NatSemi SCx200 DOCCS Flash",
+};
+
+static int __init init_scx200_docflash(void)
+{
+ unsigned u;
+ unsigned base;
+ unsigned ctrl;
+ unsigned pmr;
+ struct pci_dev *bridge;
+
+ printk(KERN_DEBUG NAME ": NatSemi SCx200 DOCCS Flash Driver\n");
+
+ if ((bridge = pci_get_device(PCI_VENDOR_ID_NS,
+ PCI_DEVICE_ID_NS_SCx200_BRIDGE,
+ NULL)) == NULL)
+ return -ENODEV;
+
+ /* check that we have found the configuration block */
+ if (!scx200_cb_present()) {
+ pci_dev_put(bridge);
+ return -ENODEV;
+ }
+
+ if (probe) {
+ /* Try to use the present flash mapping if any */
+ pci_read_config_dword(bridge, SCx200_DOCCS_BASE, &base);
+ pci_read_config_dword(bridge, SCx200_DOCCS_CTRL, &ctrl);
+ pci_dev_put(bridge);
+
+ pmr = inl(scx200_cb_base + SCx200_PMR);
+
+ if (base == 0
+ || (ctrl & 0x07000000) != 0x07000000
+ || (ctrl & 0x0007ffff) == 0)
+ return -ENODEV;
+
+ size = ((ctrl&0x1fff)<<13) + (1<<13);
+
+ for (u = size; u > 1; u >>= 1)
+ ;
+ if (u != 1)
+ return -ENODEV;
+
+ if (pmr & (1<<6))
+ width = 16;
+ else
+ width = 8;
+
+ docmem.start = base;
+ docmem.end = base + size;
+
+ if (request_resource(&iomem_resource, &docmem)) {
+ printk(KERN_ERR NAME ": unable to allocate memory for flash mapping\n");
+ return -ENOMEM;
+ }
+ } else {
+ pci_dev_put(bridge);
+ for (u = size; u > 1; u >>= 1)
+ ;
+ if (u != 1) {
+ printk(KERN_ERR NAME ": invalid size for flash mapping\n");
+ return -EINVAL;
+ }
+
+ if (width != 8 && width != 16) {
+ printk(KERN_ERR NAME ": invalid bus width for flash mapping\n");
+ return -EINVAL;
+ }
+
+ if (allocate_resource(&iomem_resource, &docmem,
+ size,
+ 0xc0000000, 0xffffffff,
+ size, NULL, NULL)) {
+ printk(KERN_ERR NAME ": unable to allocate memory for flash mapping\n");
+ return -ENOMEM;
+ }
+
+ ctrl = 0x07000000 | ((size-1) >> 13);
+
+ printk(KERN_INFO "DOCCS BASE=0x%08lx, CTRL=0x%08lx\n", (long)docmem.start, (long)ctrl);
+
+ pci_write_config_dword(bridge, SCx200_DOCCS_BASE, docmem.start);
+ pci_write_config_dword(bridge, SCx200_DOCCS_CTRL, ctrl);
+ pmr = inl(scx200_cb_base + SCx200_PMR);
+
+ if (width == 8) {
+ pmr &= ~(1<<6);
+ } else {
+ pmr |= (1<<6);
+ }
+ outl(pmr, scx200_cb_base + SCx200_PMR);
+ }
+
+ printk(KERN_INFO NAME ": DOCCS mapped at %pR, width %d\n",
+ &docmem, width);
+
+ scx200_docflash_map.size = size;
+ if (width == 8)
+ scx200_docflash_map.bankwidth = 1;
+ else
+ scx200_docflash_map.bankwidth = 2;
+
+ simple_map_init(&scx200_docflash_map);
+
+ scx200_docflash_map.phys = docmem.start;
+ scx200_docflash_map.virt = ioremap(docmem.start, scx200_docflash_map.size);
+ if (!scx200_docflash_map.virt) {
+ printk(KERN_ERR NAME ": failed to ioremap the flash\n");
+ release_resource(&docmem);
+ return -EIO;
+ }
+
+ mymtd = do_map_probe(flashtype, &scx200_docflash_map);
+ if (!mymtd) {
+ printk(KERN_ERR NAME ": unable to detect flash\n");
+ iounmap(scx200_docflash_map.virt);
+ release_resource(&docmem);
+ return -ENXIO;
+ }
+
+ if (size < mymtd->size)
+ printk(KERN_WARNING NAME ": warning, flash mapping is smaller than flash size\n");
+
+ mymtd->owner = THIS_MODULE;
+
+ partition_info[3].offset = mymtd->size-partition_info[3].size;
+ partition_info[2].size = partition_info[3].offset-partition_info[2].offset;
+ mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
+
+ return 0;
+}
+
+static void __exit cleanup_scx200_docflash(void)
+{
+ if (mymtd) {
+ mtd_device_unregister(mymtd);
+ map_destroy(mymtd);
+ }
+ if (scx200_docflash_map.virt) {
+ iounmap(scx200_docflash_map.virt);
+ release_resource(&docmem);
+ }
+}
+
+module_init(init_scx200_docflash);
+module_exit(cleanup_scx200_docflash);
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c
new file mode 100644
index 0000000000..c07f21b204
--- /dev/null
+++ b/drivers/mtd/maps/solutionengine.c
@@ -0,0 +1,95 @@
+/*
+ * Flash and EPROM on Hitachi Solution Engine and similar boards.
+ *
+ * (C) 2001 Red Hat, Inc.
+ *
+ * GPL'd
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/errno.h>
+
+static struct mtd_info *flash_mtd;
+static struct mtd_info *eprom_mtd;
+
+struct map_info soleng_eprom_map = {
+ .name = "Solution Engine EPROM",
+ .size = 0x400000,
+ .bankwidth = 4,
+};
+
+struct map_info soleng_flash_map = {
+ .name = "Solution Engine FLASH",
+ .size = 0x400000,
+ .bankwidth = 4,
+};
+
+static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
+
+static int __init init_soleng_maps(void)
+{
+ /* First probe at offset 0 */
+ soleng_flash_map.phys = 0;
+ soleng_flash_map.virt = (void __iomem *)P2SEGADDR(0);
+ soleng_eprom_map.phys = 0x01000000;
+ soleng_eprom_map.virt = (void __iomem *)P1SEGADDR(0x01000000);
+ simple_map_init(&soleng_eprom_map);
+ simple_map_init(&soleng_flash_map);
+
+ printk(KERN_NOTICE "Probing for flash chips at 0x00000000:\n");
+ flash_mtd = do_map_probe("cfi_probe", &soleng_flash_map);
+ if (!flash_mtd) {
+ /* Not there. Try swapping */
+ printk(KERN_NOTICE "Probing for flash chips at 0x01000000:\n");
+ soleng_flash_map.phys = 0x01000000;
+ soleng_flash_map.virt = P2SEGADDR(0x01000000);
+ soleng_eprom_map.phys = 0;
+ soleng_eprom_map.virt = P1SEGADDR(0);
+ flash_mtd = do_map_probe("cfi_probe", &soleng_flash_map);
+ if (!flash_mtd) {
+ /* Eep. */
+ printk(KERN_NOTICE "Flash chips not detected at either possible location.\n");
+ return -ENXIO;
+ }
+ }
+ printk(KERN_NOTICE "Solution Engine: Flash at 0x%pap, EPROM at 0x%pap\n",
+ &soleng_flash_map.phys,
+ &soleng_eprom_map.phys);
+ flash_mtd->owner = THIS_MODULE;
+
+ eprom_mtd = do_map_probe("map_rom", &soleng_eprom_map);
+ if (eprom_mtd) {
+ eprom_mtd->owner = THIS_MODULE;
+ mtd_device_register(eprom_mtd, NULL, 0);
+ }
+
+ mtd_device_parse_register(flash_mtd, probes, NULL, NULL, 0);
+
+ return 0;
+}
+
+static void __exit cleanup_soleng_maps(void)
+{
+ if (eprom_mtd) {
+ mtd_device_unregister(eprom_mtd);
+ map_destroy(eprom_mtd);
+ }
+
+ mtd_device_unregister(flash_mtd);
+ map_destroy(flash_mtd);
+}
+
+module_init(init_soleng_maps);
+module_exit(cleanup_soleng_maps);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("MTD map driver for Hitachi SolutionEngine (and similar) boards");
+
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
new file mode 100644
index 0000000000..2bfdf1b7e1
--- /dev/null
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* sun_uflash.c - Driver for user-programmable flash on
+ * Sun Microsystems SME boardsets.
+ *
+ * This driver does NOT provide access to the OBP-flash for
+ * safety reasons-- use <linux>/drivers/sbus/char/flash.c instead.
+ *
+ * Copyright (c) 2001 Eric Brower (ebrower@usa.net)
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <asm/prom.h>
+#include <linux/uaccess.h>
+#include <asm/io.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+
+#define UFLASH_OBPNAME "flashprom"
+#define DRIVER_NAME "sun_uflash"
+#define PFX DRIVER_NAME ": "
+
+#define UFLASH_WINDOW_SIZE 0x200000
+#define UFLASH_BUSWIDTH 1 /* EBus is 8-bit */
+
+MODULE_AUTHOR("Eric Brower <ebrower@usa.net>");
+MODULE_DESCRIPTION("User-programmable flash device on Sun Microsystems boardsets");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("2.1");
+
+struct uflash_dev {
+ const char *name; /* device name */
+ struct map_info map; /* mtd map info */
+ struct mtd_info *mtd; /* mtd info */
+};
+
+struct map_info uflash_map_templ = {
+ .name = "SUNW,???-????",
+ .size = UFLASH_WINDOW_SIZE,
+ .bankwidth = UFLASH_BUSWIDTH,
+};
+
+int uflash_devinit(struct platform_device *op, struct device_node *dp)
+{
+ struct uflash_dev *up;
+
+ if (op->resource[1].flags) {
+ /* Non-CFI userflash device-- once I find one we
+ * can work on supporting it.
+ */
+ printk(KERN_ERR PFX "Unsupported device at %pOF, 0x%llx\n",
+ dp, (unsigned long long)op->resource[0].start);
+
+ return -ENODEV;
+ }
+
+ up = kzalloc(sizeof(struct uflash_dev), GFP_KERNEL);
+ if (!up)
+ return -ENOMEM;
+
+ /* copy defaults and tweak parameters */
+ memcpy(&up->map, &uflash_map_templ, sizeof(uflash_map_templ));
+
+ up->map.size = resource_size(&op->resource[0]);
+
+ up->name = of_get_property(dp, "model", NULL);
+ if (up->name && 0 < strlen(up->name))
+ up->map.name = up->name;
+
+ up->map.phys = op->resource[0].start;
+
+ up->map.virt = of_ioremap(&op->resource[0], 0, up->map.size,
+ DRIVER_NAME);
+ if (!up->map.virt) {
+ printk(KERN_ERR PFX "Failed to map device.\n");
+ kfree(up);
+
+ return -EINVAL;
+ }
+
+ simple_map_init(&up->map);
+
+ /* MTD registration */
+ up->mtd = do_map_probe("cfi_probe", &up->map);
+ if (!up->mtd) {
+ of_iounmap(&op->resource[0], up->map.virt, up->map.size);
+ kfree(up);
+
+ return -ENXIO;
+ }
+
+ up->mtd->owner = THIS_MODULE;
+
+ mtd_device_register(up->mtd, NULL, 0);
+
+ dev_set_drvdata(&op->dev, up);
+
+ return 0;
+}
+
+static int uflash_probe(struct platform_device *op)
+{
+ struct device_node *dp = op->dev.of_node;
+
+ /* Flashprom must have the "user" property in order to
+ * be used by this driver.
+ */
+ if (!of_property_read_bool(dp, "user"))
+ return -ENODEV;
+
+ return uflash_devinit(op, dp);
+}
+
+static int uflash_remove(struct platform_device *op)
+{
+ struct uflash_dev *up = dev_get_drvdata(&op->dev);
+
+ if (up->mtd) {
+ mtd_device_unregister(up->mtd);
+ map_destroy(up->mtd);
+ }
+ if (up->map.virt) {
+ of_iounmap(&op->resource[0], up->map.virt, up->map.size);
+ up->map.virt = NULL;
+ }
+
+ kfree(up);
+
+ return 0;
+}
+
+static const struct of_device_id uflash_match[] = {
+ {
+ .name = UFLASH_OBPNAME,
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, uflash_match);
+
+static struct platform_driver uflash_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = uflash_match,
+ },
+ .probe = uflash_probe,
+ .remove = uflash_remove,
+};
+
+module_platform_driver(uflash_driver);
diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c
new file mode 100644
index 0000000000..70d6e865f5
--- /dev/null
+++ b/drivers/mtd/maps/ts5500_flash.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ts5500_flash.c -- MTD map driver for Technology Systems TS-5500 board
+ *
+ * Copyright (C) 2004 Sean Young <sean@mess.org>
+ *
+ * Note:
+ * - In order for detection to work, jumper 3 must be set.
+ * - Drive A and B use the resident flash disk (RFD) flash translation layer.
+ * - If you have created your own jffs file system and the bios overwrites
+ * it during boot, try disabling Drive A: and B: in the boot order.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/types.h>
+
+
+#define WINDOW_ADDR 0x09400000
+#define WINDOW_SIZE 0x00200000
+
+static struct map_info ts5500_map = {
+ .name = "TS-5500 Flash",
+ .size = WINDOW_SIZE,
+ .bankwidth = 1,
+ .phys = WINDOW_ADDR
+};
+
+static const struct mtd_partition ts5500_partitions[] = {
+ {
+ .name = "Drive A",
+ .offset = 0,
+ .size = 0x0e0000
+ },
+ {
+ .name = "BIOS",
+ .offset = 0x0e0000,
+ .size = 0x020000,
+ },
+ {
+ .name = "Drive B",
+ .offset = 0x100000,
+ .size = 0x100000
+ }
+};
+
+#define NUM_PARTITIONS ARRAY_SIZE(ts5500_partitions)
+
+static struct mtd_info *mymtd;
+
+static int __init init_ts5500_map(void)
+{
+ int rc = 0;
+
+ ts5500_map.virt = ioremap(ts5500_map.phys, ts5500_map.size);
+
+ if (!ts5500_map.virt) {
+ printk(KERN_ERR "Failed to ioremap\n");
+ rc = -EIO;
+ goto err2;
+ }
+
+ simple_map_init(&ts5500_map);
+
+ mymtd = do_map_probe("jedec_probe", &ts5500_map);
+ if (!mymtd)
+ mymtd = do_map_probe("map_rom", &ts5500_map);
+
+ if (!mymtd) {
+ rc = -ENXIO;
+ goto err1;
+ }
+
+ mymtd->owner = THIS_MODULE;
+ mtd_device_register(mymtd, ts5500_partitions, NUM_PARTITIONS);
+
+ return 0;
+
+err1:
+ iounmap(ts5500_map.virt);
+err2:
+ return rc;
+}
+
+static void __exit cleanup_ts5500_map(void)
+{
+ if (mymtd) {
+ mtd_device_unregister(mymtd);
+ map_destroy(mymtd);
+ }
+
+ if (ts5500_map.virt) {
+ iounmap(ts5500_map.virt);
+ ts5500_map.virt = NULL;
+ }
+}
+
+module_init(init_ts5500_map);
+module_exit(cleanup_ts5500_map);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sean Young <sean@mess.org>");
+MODULE_DESCRIPTION("MTD map driver for Technology Systems TS-5500 board");
+
diff --git a/drivers/mtd/maps/tsunami_flash.c b/drivers/mtd/maps/tsunami_flash.c
new file mode 100644
index 0000000000..9fc1f727aa
--- /dev/null
+++ b/drivers/mtd/maps/tsunami_flash.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * tsunami_flash.c
+ *
+ * flash chip on alpha ds10...
+ */
+#include <asm/io.h>
+#include <asm/core_tsunami.h>
+#include <linux/init.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
+
+#define FLASH_ENABLE_PORT 0x00C00001
+#define FLASH_ENABLE_BYTE 0x01
+#define FLASH_DISABLE_BYTE 0x00
+
+#define MAX_TIG_FLASH_SIZE (12*1024*1024)
+static inline map_word tsunami_flash_read8(struct map_info *map, unsigned long offset)
+{
+ map_word val;
+ val.x[0] = tsunami_tig_readb(offset);
+ return val;
+}
+
+static void tsunami_flash_write8(struct map_info *map, map_word value, unsigned long offset)
+{
+ tsunami_tig_writeb(value.x[0], offset);
+}
+
+static void tsunami_flash_copy_from(
+ struct map_info *map, void *addr, unsigned long offset, ssize_t len)
+{
+ unsigned char *dest;
+ dest = addr;
+ while(len && (offset < MAX_TIG_FLASH_SIZE)) {
+ *dest = tsunami_tig_readb(offset);
+ offset++;
+ dest++;
+ len--;
+ }
+}
+
+static void tsunami_flash_copy_to(
+ struct map_info *map, unsigned long offset,
+ const void *addr, ssize_t len)
+{
+ const unsigned char *src;
+ src = addr;
+ while(len && (offset < MAX_TIG_FLASH_SIZE)) {
+ tsunami_tig_writeb(*src, offset);
+ offset++;
+ src++;
+ len--;
+ }
+}
+
+/*
+ * Deliberately don't provide operations wider than 8 bits. I don't
+ * have then and it scares me to think how you could mess up if
+ * you tried to use them. Buswidth is correctly so I'm safe.
+ */
+static struct map_info tsunami_flash_map = {
+ .name = "flash chip on the Tsunami TIG bus",
+ .size = MAX_TIG_FLASH_SIZE,
+ .phys = NO_XIP,
+ .bankwidth = 1,
+ .read = tsunami_flash_read8,
+ .copy_from = tsunami_flash_copy_from,
+ .write = tsunami_flash_write8,
+ .copy_to = tsunami_flash_copy_to,
+};
+
+static struct mtd_info *tsunami_flash_mtd;
+
+static void __exit cleanup_tsunami_flash(void)
+{
+ struct mtd_info *mtd;
+ mtd = tsunami_flash_mtd;
+ if (mtd) {
+ mtd_device_unregister(mtd);
+ map_destroy(mtd);
+ }
+ tsunami_flash_mtd = 0;
+}
+
+static const char * const rom_probe_types[] = {
+ "cfi_probe", "jedec_probe", "map_rom", NULL };
+
+static int __init init_tsunami_flash(void)
+{
+ const char * const *type;
+
+ tsunami_tig_writeb(FLASH_ENABLE_BYTE, FLASH_ENABLE_PORT);
+
+ tsunami_flash_mtd = 0;
+ type = rom_probe_types;
+ for(; !tsunami_flash_mtd && *type; type++) {
+ tsunami_flash_mtd = do_map_probe(*type, &tsunami_flash_map);
+ }
+ if (tsunami_flash_mtd) {
+ tsunami_flash_mtd->owner = THIS_MODULE;
+ mtd_device_register(tsunami_flash_mtd, NULL, 0);
+ return 0;
+ }
+ return -ENXIO;
+}
+
+module_init(init_tsunami_flash);
+module_exit(cleanup_tsunami_flash);
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
new file mode 100644
index 0000000000..de4c46318a
--- /dev/null
+++ b/drivers/mtd/maps/uclinux.c
@@ -0,0 +1,118 @@
+/****************************************************************************/
+
+/*
+ * uclinux.c -- generic memory mapped MTD driver for uclinux
+ *
+ * (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com)
+ *
+ * License: GPL
+ */
+
+/****************************************************************************/
+
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/major.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <asm/io.h>
+#include <asm/sections.h>
+
+/****************************************************************************/
+
+#ifdef CONFIG_MTD_ROM
+#define MAP_NAME "rom"
+#else
+#define MAP_NAME "ram"
+#endif
+
+static struct map_info uclinux_ram_map = {
+ .name = MAP_NAME,
+ .size = 0,
+};
+
+static unsigned long physaddr = -1;
+module_param(physaddr, ulong, S_IRUGO);
+
+static struct mtd_info *uclinux_ram_mtdinfo;
+
+/****************************************************************************/
+
+static const struct mtd_partition uclinux_romfs[] = {
+ { .name = "ROMfs" }
+};
+
+#define NUM_PARTITIONS ARRAY_SIZE(uclinux_romfs)
+
+/****************************************************************************/
+
+static int uclinux_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys)
+{
+ struct map_info *map = mtd->priv;
+ *virt = map->virt + from;
+ if (phys)
+ *phys = map->phys + from;
+ *retlen = len;
+ return(0);
+}
+
+/****************************************************************************/
+
+static int __init uclinux_mtd_init(void)
+{
+ struct mtd_info *mtd;
+ struct map_info *mapp;
+
+ mapp = &uclinux_ram_map;
+
+ if (physaddr == -1)
+ mapp->phys = (resource_size_t)__bss_stop;
+ else
+ mapp->phys = physaddr;
+
+ if (!mapp->size)
+ mapp->size = PAGE_ALIGN(ntohl(*((unsigned long *)(mapp->phys + 8))));
+ mapp->bankwidth = 4;
+
+ printk("uclinux[mtd]: probe address=0x%x size=0x%x\n",
+ (int) mapp->phys, (int) mapp->size);
+
+ /*
+ * The filesystem is guaranteed to be in direct mapped memory. It is
+ * directly following the kernels own bss region. Following the same
+ * mechanism used by architectures setting up traditional initrds we
+ * use phys_to_virt to get the virtual address of its start.
+ */
+ mapp->virt = phys_to_virt(mapp->phys);
+
+ if (mapp->virt == 0) {
+ printk("uclinux[mtd]: no virtual mapping?\n");
+ return(-EIO);
+ }
+
+ simple_map_init(mapp);
+
+ mtd = do_map_probe("map_" MAP_NAME, mapp);
+ if (!mtd) {
+ printk("uclinux[mtd]: failed to find a mapping?\n");
+ return(-ENXIO);
+ }
+
+ mtd->owner = THIS_MODULE;
+ mtd->_point = uclinux_point;
+ mtd->priv = mapp;
+
+ uclinux_ram_mtdinfo = mtd;
+ mtd_device_register(mtd, uclinux_romfs, NUM_PARTITIONS);
+
+ return(0);
+}
+device_initcall(uclinux_mtd_init);
+
+/****************************************************************************/
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
new file mode 100644
index 0000000000..53019d313d
--- /dev/null
+++ b/drivers/mtd/maps/vmu-flash.c
@@ -0,0 +1,817 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* vmu-flash.c
+ * Driver for SEGA Dreamcast Visual Memory Unit
+ *
+ * Copyright (c) Adrian McMenamin 2002 - 2009
+ * Copyright (c) Paul Mundt 2001
+ */
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/maple.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+
+struct vmu_cache {
+ unsigned char *buffer; /* Cache */
+ unsigned int block; /* Which block was cached */
+ unsigned long jiffies_atc; /* When was it cached? */
+ int valid;
+};
+
+struct mdev_part {
+ struct maple_device *mdev;
+ int partition;
+};
+
+struct vmupart {
+ u16 user_blocks;
+ u16 root_block;
+ u16 numblocks;
+ char *name;
+ struct vmu_cache *pcache;
+};
+
+struct memcard {
+ u16 tempA;
+ u16 tempB;
+ u32 partitions;
+ u32 blocklen;
+ u32 writecnt;
+ u32 readcnt;
+ u32 removable;
+ int partition;
+ int read;
+ unsigned char *blockread;
+ struct vmupart *parts;
+ struct mtd_info *mtd;
+};
+
+struct vmu_block {
+ unsigned int num; /* block number */
+ unsigned int ofs; /* block offset */
+};
+
+static struct vmu_block *ofs_to_block(unsigned long src_ofs,
+ struct mtd_info *mtd, int partition)
+{
+ struct vmu_block *vblock;
+ struct maple_device *mdev;
+ struct memcard *card;
+ struct mdev_part *mpart;
+ int num;
+
+ mpart = mtd->priv;
+ mdev = mpart->mdev;
+ card = maple_get_drvdata(mdev);
+
+ if (src_ofs >= card->parts[partition].numblocks * card->blocklen)
+ goto failed;
+
+ num = src_ofs / card->blocklen;
+ if (num > card->parts[partition].numblocks)
+ goto failed;
+
+ vblock = kmalloc(sizeof(struct vmu_block), GFP_KERNEL);
+ if (!vblock)
+ goto failed;
+
+ vblock->num = num;
+ vblock->ofs = src_ofs % card->blocklen;
+ return vblock;
+
+failed:
+ return NULL;
+}
+
+/* Maple bus callback function for reads */
+static void vmu_blockread(struct mapleq *mq)
+{
+ struct maple_device *mdev;
+ struct memcard *card;
+
+ mdev = mq->dev;
+ card = maple_get_drvdata(mdev);
+ /* copy the read in data */
+
+ if (unlikely(!card->blockread))
+ return;
+
+ memcpy(card->blockread, mq->recvbuf->buf + 12,
+ card->blocklen/card->readcnt);
+
+}
+
+/* Interface with maple bus to read blocks
+ * caching the results so that other parts
+ * of the driver can access block reads */
+static int maple_vmu_read_block(unsigned int num, unsigned char *buf,
+ struct mtd_info *mtd)
+{
+ struct memcard *card;
+ struct mdev_part *mpart;
+ struct maple_device *mdev;
+ int partition, error = 0, x, wait;
+ unsigned char *blockread = NULL;
+ struct vmu_cache *pcache;
+ __be32 sendbuf;
+
+ mpart = mtd->priv;
+ mdev = mpart->mdev;
+ partition = mpart->partition;
+ card = maple_get_drvdata(mdev);
+ pcache = card->parts[partition].pcache;
+ pcache->valid = 0;
+
+ /* prepare the cache for this block */
+ if (!pcache->buffer) {
+ pcache->buffer = kmalloc(card->blocklen, GFP_KERNEL);
+ if (!pcache->buffer) {
+ dev_err(&mdev->dev, "VMU at (%d, %d) - read fails due"
+ " to lack of memory\n", mdev->port,
+ mdev->unit);
+ error = -ENOMEM;
+ goto outB;
+ }
+ }
+
+ /*
+ * Reads may be phased - again the hardware spec
+ * supports this - though may not be any devices in
+ * the wild that implement it, but we will here
+ */
+ for (x = 0; x < card->readcnt; x++) {
+ sendbuf = cpu_to_be32(partition << 24 | x << 16 | num);
+
+ if (atomic_read(&mdev->busy) == 1) {
+ wait_event_interruptible_timeout(mdev->maple_wait,
+ atomic_read(&mdev->busy) == 0, HZ);
+ if (atomic_read(&mdev->busy) == 1) {
+ dev_notice(&mdev->dev, "VMU at (%d, %d)"
+ " is busy\n", mdev->port, mdev->unit);
+ error = -EAGAIN;
+ goto outB;
+ }
+ }
+
+ atomic_set(&mdev->busy, 1);
+ blockread = kmalloc(card->blocklen/card->readcnt, GFP_KERNEL);
+ if (!blockread) {
+ error = -ENOMEM;
+ atomic_set(&mdev->busy, 0);
+ goto outB;
+ }
+ card->blockread = blockread;
+
+ maple_getcond_callback(mdev, vmu_blockread, 0,
+ MAPLE_FUNC_MEMCARD);
+ error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
+ MAPLE_COMMAND_BREAD, 2, &sendbuf);
+ /* Very long timeouts seem to be needed when box is stressed */
+ wait = wait_event_interruptible_timeout(mdev->maple_wait,
+ (atomic_read(&mdev->busy) == 0 ||
+ atomic_read(&mdev->busy) == 2), HZ * 3);
+ /*
+ * MTD layer does not handle hotplugging well
+ * so have to return errors when VMU is unplugged
+ * in the middle of a read (busy == 2)
+ */
+ if (error || atomic_read(&mdev->busy) == 2) {
+ if (atomic_read(&mdev->busy) == 2)
+ error = -ENXIO;
+ atomic_set(&mdev->busy, 0);
+ card->blockread = NULL;
+ goto outA;
+ }
+ if (wait == 0 || wait == -ERESTARTSYS) {
+ card->blockread = NULL;
+ atomic_set(&mdev->busy, 0);
+ error = -EIO;
+ list_del_init(&(mdev->mq->list));
+ kfree(mdev->mq->sendbuf);
+ mdev->mq->sendbuf = NULL;
+ if (wait == -ERESTARTSYS) {
+ dev_warn(&mdev->dev, "VMU read on (%d, %d)"
+ " interrupted on block 0x%X\n",
+ mdev->port, mdev->unit, num);
+ } else
+ dev_notice(&mdev->dev, "VMU read on (%d, %d)"
+ " timed out on block 0x%X\n",
+ mdev->port, mdev->unit, num);
+ goto outA;
+ }
+
+ memcpy(buf + (card->blocklen/card->readcnt) * x, blockread,
+ card->blocklen/card->readcnt);
+
+ memcpy(pcache->buffer + (card->blocklen/card->readcnt) * x,
+ card->blockread, card->blocklen/card->readcnt);
+ card->blockread = NULL;
+ pcache->block = num;
+ pcache->jiffies_atc = jiffies;
+ pcache->valid = 1;
+ kfree(blockread);
+ }
+
+ return error;
+
+outA:
+ kfree(blockread);
+outB:
+ return error;
+}
+
+/* communicate with maple bus for phased writing */
+static int maple_vmu_write_block(unsigned int num, const unsigned char *buf,
+ struct mtd_info *mtd)
+{
+ struct memcard *card;
+ struct mdev_part *mpart;
+ struct maple_device *mdev;
+ int partition, error, locking, x, phaselen, wait;
+ __be32 *sendbuf;
+
+ mpart = mtd->priv;
+ mdev = mpart->mdev;
+ partition = mpart->partition;
+ card = maple_get_drvdata(mdev);
+
+ phaselen = card->blocklen/card->writecnt;
+
+ sendbuf = kmalloc(phaselen + 4, GFP_KERNEL);
+ if (!sendbuf) {
+ error = -ENOMEM;
+ goto fail_nosendbuf;
+ }
+ for (x = 0; x < card->writecnt; x++) {
+ sendbuf[0] = cpu_to_be32(partition << 24 | x << 16 | num);
+ memcpy(&sendbuf[1], buf + phaselen * x, phaselen);
+ /* wait until the device is not busy doing something else
+ * or 1 second - which ever is longer */
+ if (atomic_read(&mdev->busy) == 1) {
+ wait_event_interruptible_timeout(mdev->maple_wait,
+ atomic_read(&mdev->busy) == 0, HZ);
+ if (atomic_read(&mdev->busy) == 1) {
+ error = -EBUSY;
+ dev_notice(&mdev->dev, "VMU write at (%d, %d)"
+ "failed - device is busy\n",
+ mdev->port, mdev->unit);
+ goto fail_nolock;
+ }
+ }
+ atomic_set(&mdev->busy, 1);
+
+ locking = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
+ MAPLE_COMMAND_BWRITE, phaselen / 4 + 2, sendbuf);
+ wait = wait_event_interruptible_timeout(mdev->maple_wait,
+ atomic_read(&mdev->busy) == 0, HZ/10);
+ if (locking) {
+ error = -EIO;
+ atomic_set(&mdev->busy, 0);
+ goto fail_nolock;
+ }
+ if (atomic_read(&mdev->busy) == 2) {
+ atomic_set(&mdev->busy, 0);
+ } else if (wait == 0 || wait == -ERESTARTSYS) {
+ error = -EIO;
+ dev_warn(&mdev->dev, "Write at (%d, %d) of block"
+ " 0x%X at phase %d failed: could not"
+ " communicate with VMU", mdev->port,
+ mdev->unit, num, x);
+ atomic_set(&mdev->busy, 0);
+ kfree(mdev->mq->sendbuf);
+ mdev->mq->sendbuf = NULL;
+ list_del_init(&(mdev->mq->list));
+ goto fail_nolock;
+ }
+ }
+ kfree(sendbuf);
+
+ return card->blocklen;
+
+fail_nolock:
+ kfree(sendbuf);
+fail_nosendbuf:
+ dev_err(&mdev->dev, "VMU (%d, %d): write failed\n", mdev->port,
+ mdev->unit);
+ return error;
+}
+
+/* mtd function to simulate reading byte by byte */
+static unsigned char vmu_flash_read_char(unsigned long ofs, int *retval,
+ struct mtd_info *mtd)
+{
+ struct vmu_block *vblock;
+ struct memcard *card;
+ struct mdev_part *mpart;
+ struct maple_device *mdev;
+ unsigned char *buf, ret;
+ int partition, error;
+
+ mpart = mtd->priv;
+ mdev = mpart->mdev;
+ partition = mpart->partition;
+ card = maple_get_drvdata(mdev);
+ *retval = 0;
+
+ buf = kmalloc(card->blocklen, GFP_KERNEL);
+ if (!buf) {
+ *retval = 1;
+ ret = -ENOMEM;
+ goto finish;
+ }
+
+ vblock = ofs_to_block(ofs, mtd, partition);
+ if (!vblock) {
+ *retval = 3;
+ ret = -ENOMEM;
+ goto out_buf;
+ }
+
+ error = maple_vmu_read_block(vblock->num, buf, mtd);
+ if (error) {
+ ret = error;
+ *retval = 2;
+ goto out_vblock;
+ }
+
+ ret = buf[vblock->ofs];
+
+out_vblock:
+ kfree(vblock);
+out_buf:
+ kfree(buf);
+finish:
+ return ret;
+}
+
+/* mtd higher order function to read flash */
+static int vmu_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct maple_device *mdev;
+ struct memcard *card;
+ struct mdev_part *mpart;
+ struct vmu_cache *pcache;
+ struct vmu_block *vblock;
+ int index = 0, retval, partition, leftover, numblocks;
+ unsigned char cx;
+
+ mpart = mtd->priv;
+ mdev = mpart->mdev;
+ partition = mpart->partition;
+ card = maple_get_drvdata(mdev);
+
+ numblocks = card->parts[partition].numblocks;
+ if (from + len > numblocks * card->blocklen)
+ len = numblocks * card->blocklen - from;
+ if (len == 0)
+ return -EIO;
+ /* Have we cached this bit already? */
+ pcache = card->parts[partition].pcache;
+ do {
+ vblock = ofs_to_block(from + index, mtd, partition);
+ if (!vblock)
+ return -ENOMEM;
+ /* Have we cached this and is the cache valid and timely? */
+ if (pcache->valid &&
+ time_before(jiffies, pcache->jiffies_atc + HZ) &&
+ (pcache->block == vblock->num)) {
+ /* we have cached it, so do necessary copying */
+ leftover = card->blocklen - vblock->ofs;
+ if (vblock->ofs + len - index < card->blocklen) {
+ /* only a bit of this block to copy */
+ memcpy(buf + index,
+ pcache->buffer + vblock->ofs,
+ len - index);
+ index = len;
+ } else {
+ /* otherwise copy remainder of whole block */
+ memcpy(buf + index, pcache->buffer +
+ vblock->ofs, leftover);
+ index += leftover;
+ }
+ } else {
+ /*
+ * Not cached so read one byte -
+ * but cache the rest of the block
+ */
+ cx = vmu_flash_read_char(from + index, &retval, mtd);
+ if (retval) {
+ *retlen = index;
+ kfree(vblock);
+ return cx;
+ }
+ memset(buf + index, cx, 1);
+ index++;
+ }
+ kfree(vblock);
+ } while (len > index);
+ *retlen = index;
+
+ return 0;
+}
+
+static int vmu_flash_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct maple_device *mdev;
+ struct memcard *card;
+ struct mdev_part *mpart;
+ int index = 0, partition, error = 0, numblocks;
+ struct vmu_cache *pcache;
+ struct vmu_block *vblock;
+ unsigned char *buffer;
+
+ mpart = mtd->priv;
+ mdev = mpart->mdev;
+ partition = mpart->partition;
+ card = maple_get_drvdata(mdev);
+
+ numblocks = card->parts[partition].numblocks;
+ if (to + len > numblocks * card->blocklen)
+ len = numblocks * card->blocklen - to;
+ if (len == 0) {
+ error = -EIO;
+ goto failed;
+ }
+
+ vblock = ofs_to_block(to, mtd, partition);
+ if (!vblock) {
+ error = -ENOMEM;
+ goto failed;
+ }
+
+ buffer = kmalloc(card->blocklen, GFP_KERNEL);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto fail_buffer;
+ }
+
+ do {
+ /* Read in the block we are to write to */
+ error = maple_vmu_read_block(vblock->num, buffer, mtd);
+ if (error)
+ goto fail_io;
+
+ do {
+ buffer[vblock->ofs] = buf[index];
+ vblock->ofs++;
+ index++;
+ if (index >= len)
+ break;
+ } while (vblock->ofs < card->blocklen);
+
+ /* write out new buffer */
+ error = maple_vmu_write_block(vblock->num, buffer, mtd);
+ /* invalidate the cache */
+ pcache = card->parts[partition].pcache;
+ pcache->valid = 0;
+
+ if (error != card->blocklen)
+ goto fail_io;
+
+ vblock->num++;
+ vblock->ofs = 0;
+ } while (len > index);
+
+ kfree(buffer);
+ *retlen = index;
+ kfree(vblock);
+ return 0;
+
+fail_io:
+ kfree(buffer);
+fail_buffer:
+ kfree(vblock);
+failed:
+ dev_err(&mdev->dev, "VMU write failing with error %d\n", error);
+ return error;
+}
+
+static void vmu_flash_sync(struct mtd_info *mtd)
+{
+ /* Do nothing here */
+}
+
+/* Maple bus callback function to recursively query hardware details */
+static void vmu_queryblocks(struct mapleq *mq)
+{
+ struct maple_device *mdev;
+ unsigned short *res;
+ struct memcard *card;
+ __be32 partnum;
+ struct vmu_cache *pcache;
+ struct mdev_part *mpart;
+ struct mtd_info *mtd_cur;
+ struct vmupart *part_cur;
+ int error;
+
+ mdev = mq->dev;
+ card = maple_get_drvdata(mdev);
+ res = (unsigned short *) (mq->recvbuf->buf);
+ card->tempA = res[12];
+ card->tempB = res[6];
+
+ dev_info(&mdev->dev, "VMU device at partition %d has %d user "
+ "blocks with a root block at %d\n", card->partition,
+ card->tempA, card->tempB);
+
+ part_cur = &card->parts[card->partition];
+ part_cur->user_blocks = card->tempA;
+ part_cur->root_block = card->tempB;
+ part_cur->numblocks = card->tempB + 1;
+ part_cur->name = kmalloc(12, GFP_KERNEL);
+ if (!part_cur->name)
+ goto fail_name;
+
+ sprintf(part_cur->name, "vmu%d.%d.%d",
+ mdev->port, mdev->unit, card->partition);
+ mtd_cur = &card->mtd[card->partition];
+ mtd_cur->name = part_cur->name;
+ mtd_cur->type = 8;
+ mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE;
+ mtd_cur->size = part_cur->numblocks * card->blocklen;
+ mtd_cur->erasesize = card->blocklen;
+ mtd_cur->_write = vmu_flash_write;
+ mtd_cur->_read = vmu_flash_read;
+ mtd_cur->_sync = vmu_flash_sync;
+ mtd_cur->writesize = card->blocklen;
+
+ mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL);
+ if (!mpart)
+ goto fail_mpart;
+
+ mpart->mdev = mdev;
+ mpart->partition = card->partition;
+ mtd_cur->priv = mpart;
+ mtd_cur->owner = THIS_MODULE;
+
+ pcache = kzalloc(sizeof(struct vmu_cache), GFP_KERNEL);
+ if (!pcache)
+ goto fail_cache_create;
+ part_cur->pcache = pcache;
+
+ error = mtd_device_register(mtd_cur, NULL, 0);
+ if (error)
+ goto fail_mtd_register;
+
+ maple_getcond_callback(mdev, NULL, 0,
+ MAPLE_FUNC_MEMCARD);
+
+ /*
+ * Set up a recursive call to the (probably theoretical)
+ * second or more partition
+ */
+ if (++card->partition < card->partitions) {
+ partnum = cpu_to_be32(card->partition << 24);
+ maple_getcond_callback(mdev, vmu_queryblocks, 0,
+ MAPLE_FUNC_MEMCARD);
+ maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
+ MAPLE_COMMAND_GETMINFO, 2, &partnum);
+ }
+ return;
+
+fail_mtd_register:
+ dev_err(&mdev->dev, "Could not register maple device at (%d, %d)"
+ "error is 0x%X\n", mdev->port, mdev->unit, error);
+ for (error = 0; error <= card->partition; error++) {
+ kfree(((card->parts)[error]).pcache);
+ ((card->parts)[error]).pcache = NULL;
+ }
+fail_cache_create:
+fail_mpart:
+ for (error = 0; error <= card->partition; error++) {
+ kfree(((card->mtd)[error]).priv);
+ ((card->mtd)[error]).priv = NULL;
+ }
+ maple_getcond_callback(mdev, NULL, 0,
+ MAPLE_FUNC_MEMCARD);
+ kfree(part_cur->name);
+fail_name:
+ return;
+}
+
+/* Handles very basic info about the flash, queries for details */
+static int vmu_connect(struct maple_device *mdev)
+{
+ unsigned long test_flash_data, basic_flash_data;
+ int c, error;
+ struct memcard *card;
+ u32 partnum = 0;
+
+ test_flash_data = be32_to_cpu(mdev->devinfo.function);
+ /* Need to count how many bits are set - to find out which
+ * function_data element has details of the memory card
+ */
+ c = hweight_long(test_flash_data);
+
+ basic_flash_data = be32_to_cpu(mdev->devinfo.function_data[c - 1]);
+
+ card = kmalloc(sizeof(struct memcard), GFP_KERNEL);
+ if (!card) {
+ error = -ENOMEM;
+ goto fail_nomem;
+ }
+
+ card->partitions = (basic_flash_data >> 24 & 0xFF) + 1;
+ card->blocklen = ((basic_flash_data >> 16 & 0xFF) + 1) << 5;
+ card->writecnt = basic_flash_data >> 12 & 0xF;
+ card->readcnt = basic_flash_data >> 8 & 0xF;
+ card->removable = basic_flash_data >> 7 & 1;
+
+ card->partition = 0;
+
+ /*
+ * Not sure there are actually any multi-partition devices in the
+ * real world, but the hardware supports them, so, so will we
+ */
+ card->parts = kmalloc_array(card->partitions, sizeof(struct vmupart),
+ GFP_KERNEL);
+ if (!card->parts) {
+ error = -ENOMEM;
+ goto fail_partitions;
+ }
+
+ card->mtd = kmalloc_array(card->partitions, sizeof(struct mtd_info),
+ GFP_KERNEL);
+ if (!card->mtd) {
+ error = -ENOMEM;
+ goto fail_mtd_info;
+ }
+
+ maple_set_drvdata(mdev, card);
+
+ /*
+ * We want to trap meminfo not get cond
+ * so set interval to zero, but rely on maple bus
+ * driver to pass back the results of the meminfo
+ */
+ maple_getcond_callback(mdev, vmu_queryblocks, 0,
+ MAPLE_FUNC_MEMCARD);
+
+ /* Make sure we are clear to go */
+ if (atomic_read(&mdev->busy) == 1) {
+ wait_event_interruptible_timeout(mdev->maple_wait,
+ atomic_read(&mdev->busy) == 0, HZ);
+ if (atomic_read(&mdev->busy) == 1) {
+ dev_notice(&mdev->dev, "VMU at (%d, %d) is busy\n",
+ mdev->port, mdev->unit);
+ error = -EAGAIN;
+ goto fail_device_busy;
+ }
+ }
+
+ atomic_set(&mdev->busy, 1);
+
+ /*
+ * Set up the minfo call: vmu_queryblocks will handle
+ * the information passed back
+ */
+ error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
+ MAPLE_COMMAND_GETMINFO, 2, &partnum);
+ if (error) {
+ dev_err(&mdev->dev, "Could not lock VMU at (%d, %d)"
+ " error is 0x%X\n", mdev->port, mdev->unit, error);
+ goto fail_mtd_info;
+ }
+ return 0;
+
+fail_device_busy:
+ kfree(card->mtd);
+fail_mtd_info:
+ kfree(card->parts);
+fail_partitions:
+ kfree(card);
+fail_nomem:
+ return error;
+}
+
+static void vmu_disconnect(struct maple_device *mdev)
+{
+ struct memcard *card;
+ struct mdev_part *mpart;
+ int x;
+
+ mdev->callback = NULL;
+ card = maple_get_drvdata(mdev);
+ for (x = 0; x < card->partitions; x++) {
+ mpart = ((card->mtd)[x]).priv;
+ mpart->mdev = NULL;
+ mtd_device_unregister(&((card->mtd)[x]));
+ kfree(((card->parts)[x]).name);
+ }
+ kfree(card->parts);
+ kfree(card->mtd);
+ kfree(card);
+}
+
+/* Callback to handle eccentricities of both mtd subsystem
+ * and general flakyness of Dreamcast VMUs
+ */
+static int vmu_can_unload(struct maple_device *mdev)
+{
+ struct memcard *card;
+ int x;
+ struct mtd_info *mtd;
+
+ card = maple_get_drvdata(mdev);
+ for (x = 0; x < card->partitions; x++) {
+ mtd = &((card->mtd)[x]);
+ if (kref_read(&mtd->refcnt))
+ return 0;
+ }
+ return 1;
+}
+
+#define ERRSTR "VMU at (%d, %d) file error -"
+
+static void vmu_file_error(struct maple_device *mdev, void *recvbuf)
+{
+ enum maple_file_errors error = ((int *)recvbuf)[1];
+
+ switch (error) {
+
+ case MAPLE_FILEERR_INVALID_PARTITION:
+ dev_notice(&mdev->dev, ERRSTR " invalid partition number\n",
+ mdev->port, mdev->unit);
+ break;
+
+ case MAPLE_FILEERR_PHASE_ERROR:
+ dev_notice(&mdev->dev, ERRSTR " phase error\n",
+ mdev->port, mdev->unit);
+ break;
+
+ case MAPLE_FILEERR_INVALID_BLOCK:
+ dev_notice(&mdev->dev, ERRSTR " invalid block number\n",
+ mdev->port, mdev->unit);
+ break;
+
+ case MAPLE_FILEERR_WRITE_ERROR:
+ dev_notice(&mdev->dev, ERRSTR " write error\n",
+ mdev->port, mdev->unit);
+ break;
+
+ case MAPLE_FILEERR_INVALID_WRITE_LENGTH:
+ dev_notice(&mdev->dev, ERRSTR " invalid write length\n",
+ mdev->port, mdev->unit);
+ break;
+
+ case MAPLE_FILEERR_BAD_CRC:
+ dev_notice(&mdev->dev, ERRSTR " bad CRC\n",
+ mdev->port, mdev->unit);
+ break;
+
+ default:
+ dev_notice(&mdev->dev, ERRSTR " 0x%X\n",
+ mdev->port, mdev->unit, error);
+ }
+}
+
+
+static int probe_maple_vmu(struct device *dev)
+{
+ struct maple_device *mdev = to_maple_dev(dev);
+ struct maple_driver *mdrv = to_maple_driver(dev->driver);
+
+ mdev->can_unload = vmu_can_unload;
+ mdev->fileerr_handler = vmu_file_error;
+ mdev->driver = mdrv;
+
+ return vmu_connect(mdev);
+}
+
+static int remove_maple_vmu(struct device *dev)
+{
+ struct maple_device *mdev = to_maple_dev(dev);
+
+ vmu_disconnect(mdev);
+ return 0;
+}
+
+static struct maple_driver vmu_flash_driver = {
+ .function = MAPLE_FUNC_MEMCARD,
+ .drv = {
+ .name = "Dreamcast_visual_memory",
+ .probe = probe_maple_vmu,
+ .remove = remove_maple_vmu,
+ },
+};
+
+static int __init vmu_flash_map_init(void)
+{
+ return maple_driver_register(&vmu_flash_driver);
+}
+
+static void __exit vmu_flash_map_exit(void)
+{
+ maple_driver_unregister(&vmu_flash_driver);
+}
+
+module_init(vmu_flash_map_init);
+module_exit(vmu_flash_map_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Adrian McMenamin");
+MODULE_DESCRIPTION("Flash mapping for Sega Dreamcast visual memory");
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
new file mode 100644
index 0000000000..5bc32108ca
--- /dev/null
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -0,0 +1,548 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Interface to Linux block layer for MTD 'translation layers'.
+ *
+ * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/fs.h>
+#include <linux/mtd/blktrans.h>
+#include <linux/mtd/mtd.h>
+#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/blkpg.h>
+#include <linux/spinlock.h>
+#include <linux/hdreg.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+
+#include "mtdcore.h"
+
+static LIST_HEAD(blktrans_majors);
+
+static void blktrans_dev_release(struct kref *kref)
+{
+ struct mtd_blktrans_dev *dev =
+ container_of(kref, struct mtd_blktrans_dev, ref);
+
+ put_disk(dev->disk);
+ blk_mq_free_tag_set(dev->tag_set);
+ kfree(dev->tag_set);
+ list_del(&dev->list);
+ kfree(dev);
+}
+
+static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
+{
+ kref_put(&dev->ref, blktrans_dev_release);
+}
+
+
+static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
+ struct mtd_blktrans_dev *dev,
+ struct request *req)
+{
+ struct req_iterator iter;
+ struct bio_vec bvec;
+ unsigned long block, nsect;
+ char *buf;
+
+ block = blk_rq_pos(req) << 9 >> tr->blkshift;
+ nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
+
+ switch (req_op(req)) {
+ case REQ_OP_FLUSH:
+ if (tr->flush(dev))
+ return BLK_STS_IOERR;
+ return BLK_STS_OK;
+ case REQ_OP_DISCARD:
+ if (tr->discard(dev, block, nsect))
+ return BLK_STS_IOERR;
+ return BLK_STS_OK;
+ case REQ_OP_READ:
+ buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
+ for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
+ if (tr->readsect(dev, block, buf)) {
+ kunmap(bio_page(req->bio));
+ return BLK_STS_IOERR;
+ }
+ }
+ kunmap(bio_page(req->bio));
+
+ rq_for_each_segment(bvec, req, iter)
+ flush_dcache_page(bvec.bv_page);
+ return BLK_STS_OK;
+ case REQ_OP_WRITE:
+ if (!tr->writesect)
+ return BLK_STS_IOERR;
+
+ rq_for_each_segment(bvec, req, iter)
+ flush_dcache_page(bvec.bv_page);
+
+ buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
+ for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
+ if (tr->writesect(dev, block, buf)) {
+ kunmap(bio_page(req->bio));
+ return BLK_STS_IOERR;
+ }
+ }
+ kunmap(bio_page(req->bio));
+ return BLK_STS_OK;
+ default:
+ return BLK_STS_IOERR;
+ }
+}
+
+int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
+{
+ return dev->bg_stop;
+}
+EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
+
+static struct request *mtd_next_request(struct mtd_blktrans_dev *dev)
+{
+ struct request *rq;
+
+ rq = list_first_entry_or_null(&dev->rq_list, struct request, queuelist);
+ if (rq) {
+ list_del_init(&rq->queuelist);
+ blk_mq_start_request(rq);
+ return rq;
+ }
+
+ return NULL;
+}
+
+static void mtd_blktrans_work(struct mtd_blktrans_dev *dev)
+ __releases(&dev->queue_lock)
+ __acquires(&dev->queue_lock)
+{
+ struct mtd_blktrans_ops *tr = dev->tr;
+ struct request *req = NULL;
+ int background_done = 0;
+
+ while (1) {
+ blk_status_t res;
+
+ dev->bg_stop = false;
+ if (!req && !(req = mtd_next_request(dev))) {
+ if (tr->background && !background_done) {
+ spin_unlock_irq(&dev->queue_lock);
+ mutex_lock(&dev->lock);
+ tr->background(dev);
+ mutex_unlock(&dev->lock);
+ spin_lock_irq(&dev->queue_lock);
+ /*
+ * Do background processing just once per idle
+ * period.
+ */
+ background_done = !dev->bg_stop;
+ continue;
+ }
+ break;
+ }
+
+ spin_unlock_irq(&dev->queue_lock);
+
+ mutex_lock(&dev->lock);
+ res = do_blktrans_request(dev->tr, dev, req);
+ mutex_unlock(&dev->lock);
+
+ if (!blk_update_request(req, res, blk_rq_cur_bytes(req))) {
+ __blk_mq_end_request(req, res);
+ req = NULL;
+ }
+
+ background_done = 0;
+ cond_resched();
+ spin_lock_irq(&dev->queue_lock);
+ }
+}
+
+static blk_status_t mtd_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct mtd_blktrans_dev *dev;
+
+ dev = hctx->queue->queuedata;
+ if (!dev) {
+ blk_mq_start_request(bd->rq);
+ return BLK_STS_IOERR;
+ }
+
+ spin_lock_irq(&dev->queue_lock);
+ list_add_tail(&bd->rq->queuelist, &dev->rq_list);
+ mtd_blktrans_work(dev);
+ spin_unlock_irq(&dev->queue_lock);
+
+ return BLK_STS_OK;
+}
+
+static int blktrans_open(struct gendisk *disk, blk_mode_t mode)
+{
+ struct mtd_blktrans_dev *dev = disk->private_data;
+ int ret = 0;
+
+ kref_get(&dev->ref);
+
+ mutex_lock(&dev->lock);
+
+ if (dev->open)
+ goto unlock;
+
+ __module_get(dev->tr->owner);
+
+ if (!dev->mtd)
+ goto unlock;
+
+ if (dev->tr->open) {
+ ret = dev->tr->open(dev);
+ if (ret)
+ goto error_put;
+ }
+
+ ret = __get_mtd_device(dev->mtd);
+ if (ret)
+ goto error_release;
+ dev->writable = mode & BLK_OPEN_WRITE;
+
+unlock:
+ dev->open++;
+ mutex_unlock(&dev->lock);
+ return ret;
+
+error_release:
+ if (dev->tr->release)
+ dev->tr->release(dev);
+error_put:
+ module_put(dev->tr->owner);
+ mutex_unlock(&dev->lock);
+ blktrans_dev_put(dev);
+ return ret;
+}
+
+static void blktrans_release(struct gendisk *disk)
+{
+ struct mtd_blktrans_dev *dev = disk->private_data;
+
+ mutex_lock(&dev->lock);
+
+ if (--dev->open)
+ goto unlock;
+
+ module_put(dev->tr->owner);
+
+ if (dev->mtd) {
+ if (dev->tr->release)
+ dev->tr->release(dev);
+ __put_mtd_device(dev->mtd);
+ }
+unlock:
+ mutex_unlock(&dev->lock);
+ blktrans_dev_put(dev);
+}
+
+static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
+ int ret = -ENXIO;
+
+ mutex_lock(&dev->lock);
+
+ if (!dev->mtd)
+ goto unlock;
+
+ ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : -ENOTTY;
+unlock:
+ mutex_unlock(&dev->lock);
+ return ret;
+}
+
+static const struct block_device_operations mtd_block_ops = {
+ .owner = THIS_MODULE,
+ .open = blktrans_open,
+ .release = blktrans_release,
+ .getgeo = blktrans_getgeo,
+};
+
+static const struct blk_mq_ops mtd_mq_ops = {
+ .queue_rq = mtd_queue_rq,
+};
+
+int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
+{
+ struct mtd_blktrans_ops *tr = new->tr;
+ struct mtd_blktrans_dev *d;
+ int last_devnum = -1;
+ struct gendisk *gd;
+ int ret;
+
+ lockdep_assert_held(&mtd_table_mutex);
+
+ list_for_each_entry(d, &tr->devs, list) {
+ if (new->devnum == -1) {
+ /* Use first free number */
+ if (d->devnum != last_devnum+1) {
+ /* Found a free devnum. Plug it in here */
+ new->devnum = last_devnum+1;
+ list_add_tail(&new->list, &d->list);
+ goto added;
+ }
+ } else if (d->devnum == new->devnum) {
+ /* Required number taken */
+ return -EBUSY;
+ } else if (d->devnum > new->devnum) {
+ /* Required number was free */
+ list_add_tail(&new->list, &d->list);
+ goto added;
+ }
+ last_devnum = d->devnum;
+ }
+
+ ret = -EBUSY;
+ if (new->devnum == -1)
+ new->devnum = last_devnum+1;
+
+ /* Check that the device and any partitions will get valid
+ * minor numbers and that the disk naming code below can cope
+ * with this number. */
+ if (new->devnum > (MINORMASK >> tr->part_bits) ||
+ (tr->part_bits && new->devnum >= 27 * 26))
+ return ret;
+
+ list_add_tail(&new->list, &tr->devs);
+ added:
+
+ mutex_init(&new->lock);
+ kref_init(&new->ref);
+ if (!tr->writesect)
+ new->readonly = 1;
+
+ ret = -ENOMEM;
+ new->tag_set = kzalloc(sizeof(*new->tag_set), GFP_KERNEL);
+ if (!new->tag_set)
+ goto out_list_del;
+
+ ret = blk_mq_alloc_sq_tag_set(new->tag_set, &mtd_mq_ops, 2,
+ BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
+ if (ret)
+ goto out_kfree_tag_set;
+
+ /* Create gendisk */
+ gd = blk_mq_alloc_disk(new->tag_set, new);
+ if (IS_ERR(gd)) {
+ ret = PTR_ERR(gd);
+ goto out_free_tag_set;
+ }
+
+ new->disk = gd;
+ new->rq = new->disk->queue;
+ gd->private_data = new;
+ gd->major = tr->major;
+ gd->first_minor = (new->devnum) << tr->part_bits;
+ gd->minors = 1 << tr->part_bits;
+ gd->fops = &mtd_block_ops;
+
+ if (tr->part_bits) {
+ if (new->devnum < 26)
+ snprintf(gd->disk_name, sizeof(gd->disk_name),
+ "%s%c", tr->name, 'a' + new->devnum);
+ else
+ snprintf(gd->disk_name, sizeof(gd->disk_name),
+ "%s%c%c", tr->name,
+ 'a' - 1 + new->devnum / 26,
+ 'a' + new->devnum % 26);
+ } else {
+ snprintf(gd->disk_name, sizeof(gd->disk_name),
+ "%s%d", tr->name, new->devnum);
+ gd->flags |= GENHD_FL_NO_PART;
+ }
+
+ set_capacity(gd, ((u64)new->size * tr->blksize) >> 9);
+
+ /* Create the request queue */
+ spin_lock_init(&new->queue_lock);
+ INIT_LIST_HEAD(&new->rq_list);
+
+ if (tr->flush)
+ blk_queue_write_cache(new->rq, true, false);
+
+ blk_queue_logical_block_size(new->rq, tr->blksize);
+
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq);
+ blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq);
+
+ if (tr->discard) {
+ blk_queue_max_discard_sectors(new->rq, UINT_MAX);
+ new->rq->limits.discard_granularity = tr->blksize;
+ }
+
+ gd->queue = new->rq;
+
+ if (new->readonly)
+ set_disk_ro(gd, 1);
+
+ ret = device_add_disk(&new->mtd->dev, gd, NULL);
+ if (ret)
+ goto out_cleanup_disk;
+
+ if (new->disk_attributes) {
+ ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
+ new->disk_attributes);
+ WARN_ON(ret);
+ }
+ return 0;
+
+out_cleanup_disk:
+ put_disk(new->disk);
+out_free_tag_set:
+ blk_mq_free_tag_set(new->tag_set);
+out_kfree_tag_set:
+ kfree(new->tag_set);
+out_list_del:
+ list_del(&new->list);
+ return ret;
+}
+
+int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
+{
+ unsigned long flags;
+
+ lockdep_assert_held(&mtd_table_mutex);
+
+ if (old->disk_attributes)
+ sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
+ old->disk_attributes);
+
+ /* Stop new requests to arrive */
+ del_gendisk(old->disk);
+
+ /* Kill current requests */
+ spin_lock_irqsave(&old->queue_lock, flags);
+ old->rq->queuedata = NULL;
+ spin_unlock_irqrestore(&old->queue_lock, flags);
+
+ /* freeze+quiesce queue to ensure all requests are flushed */
+ blk_mq_freeze_queue(old->rq);
+ blk_mq_quiesce_queue(old->rq);
+ blk_mq_unquiesce_queue(old->rq);
+ blk_mq_unfreeze_queue(old->rq);
+
+ /* If the device is currently open, tell trans driver to close it,
+ then put mtd device, and don't touch it again */
+ mutex_lock(&old->lock);
+ if (old->open) {
+ if (old->tr->release)
+ old->tr->release(old);
+ __put_mtd_device(old->mtd);
+ }
+
+ old->mtd = NULL;
+
+ mutex_unlock(&old->lock);
+ blktrans_dev_put(old);
+ return 0;
+}
+
+static void blktrans_notify_remove(struct mtd_info *mtd)
+{
+ struct mtd_blktrans_ops *tr;
+ struct mtd_blktrans_dev *dev, *next;
+
+ list_for_each_entry(tr, &blktrans_majors, list)
+ list_for_each_entry_safe(dev, next, &tr->devs, list)
+ if (dev->mtd == mtd)
+ tr->remove_dev(dev);
+}
+
+static void blktrans_notify_add(struct mtd_info *mtd)
+{
+ struct mtd_blktrans_ops *tr;
+
+ if (mtd->type == MTD_ABSENT || mtd->type == MTD_UBIVOLUME)
+ return;
+
+ list_for_each_entry(tr, &blktrans_majors, list)
+ tr->add_mtd(tr, mtd);
+}
+
+static struct mtd_notifier blktrans_notifier = {
+ .add = blktrans_notify_add,
+ .remove = blktrans_notify_remove,
+};
+
+int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
+{
+ struct mtd_info *mtd;
+ int ret;
+
+ /* Register the notifier if/when the first device type is
+ registered, to prevent the link/init ordering from fucking
+ us over. */
+ if (!blktrans_notifier.list.next)
+ register_mtd_user(&blktrans_notifier);
+
+ ret = register_blkdev(tr->major, tr->name);
+ if (ret < 0) {
+ printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
+ tr->name, tr->major, ret);
+ return ret;
+ }
+
+ if (ret)
+ tr->major = ret;
+
+ tr->blkshift = ffs(tr->blksize) - 1;
+
+ INIT_LIST_HEAD(&tr->devs);
+
+ mutex_lock(&mtd_table_mutex);
+ list_add(&tr->list, &blktrans_majors);
+ mtd_for_each_device(mtd)
+ if (mtd->type != MTD_ABSENT && mtd->type != MTD_UBIVOLUME)
+ tr->add_mtd(tr, mtd);
+ mutex_unlock(&mtd_table_mutex);
+ return 0;
+}
+
+int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
+{
+ struct mtd_blktrans_dev *dev, *next;
+
+ mutex_lock(&mtd_table_mutex);
+
+ /* Remove it from the list of active majors */
+ list_del(&tr->list);
+
+ list_for_each_entry_safe(dev, next, &tr->devs, list)
+ tr->remove_dev(dev);
+
+ mutex_unlock(&mtd_table_mutex);
+ unregister_blkdev(tr->major, tr->name);
+
+ BUG_ON(!list_empty(&tr->devs));
+ return 0;
+}
+
+static void __exit mtd_blktrans_exit(void)
+{
+ /* No race here -- if someone's currently in register_mtd_blktrans
+ we're screwed anyway. */
+ if (blktrans_notifier.list.next)
+ unregister_mtd_user(&blktrans_notifier);
+}
+
+module_exit(mtd_blktrans_exit);
+
+EXPORT_SYMBOL_GPL(register_mtd_blktrans);
+EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
+EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
+EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
+
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
new file mode 100644
index 0000000000..9751416c2a
--- /dev/null
+++ b/drivers/mtd/mtdblock.c
@@ -0,0 +1,361 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Direct MTD block device access
+ *
+ * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
+ * Copyright © 2000-2003 Nicolas Pitre <nico@fluxnic.net>
+ */
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/blktrans.h>
+#include <linux/mutex.h>
+#include <linux/major.h>
+
+
+struct mtdblk_dev {
+ struct mtd_blktrans_dev mbd;
+ int count;
+ struct mutex cache_mutex;
+ unsigned char *cache_data;
+ unsigned long cache_offset;
+ unsigned int cache_size;
+ enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
+};
+
+/*
+ * Cache stuff...
+ *
+ * Since typical flash erasable sectors are much larger than what Linux's
+ * buffer cache can handle, we must implement read-modify-write on flash
+ * sectors for each block write requests. To avoid over-erasing flash sectors
+ * and to speed things up, we locally cache a whole flash sector while it is
+ * being written to until a different sector is required.
+ */
+
+static int erase_write (struct mtd_info *mtd, unsigned long pos,
+ unsigned int len, const char *buf)
+{
+ struct erase_info erase;
+ size_t retlen;
+ int ret;
+
+ /*
+ * First, let's erase the flash block.
+ */
+ erase.addr = pos;
+ erase.len = len;
+
+ ret = mtd_erase(mtd, &erase);
+ if (ret) {
+ printk (KERN_WARNING "mtdblock: erase of region [0x%lx, 0x%x] "
+ "on \"%s\" failed\n",
+ pos, len, mtd->name);
+ return ret;
+ }
+
+ /*
+ * Next, write the data to flash.
+ */
+
+ ret = mtd_write(mtd, pos, len, &retlen, buf);
+ if (ret)
+ return ret;
+ if (retlen != len)
+ return -EIO;
+ return 0;
+}
+
+
+static int write_cached_data (struct mtdblk_dev *mtdblk)
+{
+ struct mtd_info *mtd = mtdblk->mbd.mtd;
+ int ret;
+
+ if (mtdblk->cache_state != STATE_DIRTY)
+ return 0;
+
+ pr_debug("mtdblock: writing cached data for \"%s\" "
+ "at 0x%lx, size 0x%x\n", mtd->name,
+ mtdblk->cache_offset, mtdblk->cache_size);
+
+ ret = erase_write (mtd, mtdblk->cache_offset,
+ mtdblk->cache_size, mtdblk->cache_data);
+
+ /*
+ * Here we could arguably set the cache state to STATE_CLEAN.
+ * However this could lead to inconsistency since we will not
+ * be notified if this content is altered on the flash by other
+ * means. Let's declare it empty and leave buffering tasks to
+ * the buffer cache instead.
+ *
+ * If this cache_offset points to a bad block, data cannot be
+ * written to the device. Clear cache_state to avoid writing to
+ * bad blocks repeatedly.
+ */
+ if (ret == 0 || ret == -EIO)
+ mtdblk->cache_state = STATE_EMPTY;
+ return ret;
+}
+
+
+static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
+ int len, const char *buf)
+{
+ struct mtd_info *mtd = mtdblk->mbd.mtd;
+ unsigned int sect_size = mtdblk->cache_size;
+ size_t retlen;
+ int ret;
+
+ pr_debug("mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n",
+ mtd->name, pos, len);
+
+ if (!sect_size)
+ return mtd_write(mtd, pos, len, &retlen, buf);
+
+ while (len > 0) {
+ unsigned long sect_start = (pos/sect_size)*sect_size;
+ unsigned int offset = pos - sect_start;
+ unsigned int size = sect_size - offset;
+ if( size > len )
+ size = len;
+
+ if (size == sect_size) {
+ /*
+ * We are covering a whole sector. Thus there is no
+ * need to bother with the cache while it may still be
+ * useful for other partial writes.
+ */
+ ret = erase_write (mtd, pos, size, buf);
+ if (ret)
+ return ret;
+ } else {
+ /* Partial sector: need to use the cache */
+
+ if (mtdblk->cache_state == STATE_DIRTY &&
+ mtdblk->cache_offset != sect_start) {
+ ret = write_cached_data(mtdblk);
+ if (ret)
+ return ret;
+ }
+
+ if (mtdblk->cache_state == STATE_EMPTY ||
+ mtdblk->cache_offset != sect_start) {
+ /* fill the cache with the current sector */
+ mtdblk->cache_state = STATE_EMPTY;
+ ret = mtd_read(mtd, sect_start, sect_size,
+ &retlen, mtdblk->cache_data);
+ if (ret && !mtd_is_bitflip(ret))
+ return ret;
+ if (retlen != sect_size)
+ return -EIO;
+
+ mtdblk->cache_offset = sect_start;
+ mtdblk->cache_size = sect_size;
+ mtdblk->cache_state = STATE_CLEAN;
+ }
+
+ /* write data to our local cache */
+ memcpy (mtdblk->cache_data + offset, buf, size);
+ mtdblk->cache_state = STATE_DIRTY;
+ }
+
+ buf += size;
+ pos += size;
+ len -= size;
+ }
+
+ return 0;
+}
+
+
+static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
+ int len, char *buf)
+{
+ struct mtd_info *mtd = mtdblk->mbd.mtd;
+ unsigned int sect_size = mtdblk->cache_size;
+ size_t retlen;
+ int ret;
+
+ pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
+ mtd->name, pos, len);
+
+ if (!sect_size) {
+ ret = mtd_read(mtd, pos, len, &retlen, buf);
+ if (ret && !mtd_is_bitflip(ret))
+ return ret;
+ return 0;
+ }
+
+ while (len > 0) {
+ unsigned long sect_start = (pos/sect_size)*sect_size;
+ unsigned int offset = pos - sect_start;
+ unsigned int size = sect_size - offset;
+ if (size > len)
+ size = len;
+
+ /*
+ * Check if the requested data is already cached
+ * Read the requested amount of data from our internal cache if it
+ * contains what we want, otherwise we read the data directly
+ * from flash.
+ */
+ if (mtdblk->cache_state != STATE_EMPTY &&
+ mtdblk->cache_offset == sect_start) {
+ memcpy (buf, mtdblk->cache_data + offset, size);
+ } else {
+ ret = mtd_read(mtd, pos, size, &retlen, buf);
+ if (ret && !mtd_is_bitflip(ret))
+ return ret;
+ if (retlen != size)
+ return -EIO;
+ }
+
+ buf += size;
+ pos += size;
+ len -= size;
+ }
+
+ return 0;
+}
+
+static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
+ unsigned long block, char *buf)
+{
+ struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
+ return do_cached_read(mtdblk, block<<9, 512, buf);
+}
+
+static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
+ unsigned long block, char *buf)
+{
+ struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
+ if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) {
+ mtdblk->cache_data = vmalloc(mtdblk->mbd.mtd->erasesize);
+ if (!mtdblk->cache_data)
+ return -EINTR;
+ /* -EINTR is not really correct, but it is the best match
+ * documented in man 2 write for all cases. We could also
+ * return -EAGAIN sometimes, but why bother?
+ */
+ }
+ return do_cached_write(mtdblk, block<<9, 512, buf);
+}
+
+static int mtdblock_open(struct mtd_blktrans_dev *mbd)
+{
+ struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
+
+ pr_debug("mtdblock_open\n");
+
+ if (mtdblk->count) {
+ mtdblk->count++;
+ return 0;
+ }
+
+ if (mtd_type_is_nand(mbd->mtd))
+ pr_warn_ratelimited("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n",
+ mbd->tr->name, mbd->mtd->name);
+
+ /* OK, it's not open. Create cache info for it */
+ mtdblk->count = 1;
+ mutex_init(&mtdblk->cache_mutex);
+ mtdblk->cache_state = STATE_EMPTY;
+ if (!(mbd->mtd->flags & MTD_NO_ERASE) && mbd->mtd->erasesize) {
+ mtdblk->cache_size = mbd->mtd->erasesize;
+ mtdblk->cache_data = NULL;
+ }
+
+ pr_debug("ok\n");
+
+ return 0;
+}
+
+static void mtdblock_release(struct mtd_blktrans_dev *mbd)
+{
+ struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
+
+ pr_debug("mtdblock_release\n");
+
+ mutex_lock(&mtdblk->cache_mutex);
+ write_cached_data(mtdblk);
+ mutex_unlock(&mtdblk->cache_mutex);
+
+ if (!--mtdblk->count) {
+ /*
+ * It was the last usage. Free the cache, but only sync if
+ * opened for writing.
+ */
+ if (mbd->writable)
+ mtd_sync(mbd->mtd);
+ vfree(mtdblk->cache_data);
+ }
+
+ pr_debug("ok\n");
+}
+
+static int mtdblock_flush(struct mtd_blktrans_dev *dev)
+{
+ struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
+ int ret;
+
+ mutex_lock(&mtdblk->cache_mutex);
+ ret = write_cached_data(mtdblk);
+ mutex_unlock(&mtdblk->cache_mutex);
+ mtd_sync(dev->mtd);
+ return ret;
+}
+
+static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+{
+ struct mtdblk_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+
+ if (!dev)
+ return;
+
+ dev->mbd.mtd = mtd;
+ dev->mbd.devnum = mtd->index;
+
+ dev->mbd.size = mtd->size >> 9;
+ dev->mbd.tr = tr;
+
+ if (!(mtd->flags & MTD_WRITEABLE))
+ dev->mbd.readonly = 1;
+
+ if (add_mtd_blktrans_dev(&dev->mbd))
+ kfree(dev);
+}
+
+static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
+{
+ del_mtd_blktrans_dev(dev);
+}
+
+static struct mtd_blktrans_ops mtdblock_tr = {
+ .name = "mtdblock",
+ .major = MTD_BLOCK_MAJOR,
+ .part_bits = 0,
+ .blksize = 512,
+ .open = mtdblock_open,
+ .flush = mtdblock_flush,
+ .release = mtdblock_release,
+ .readsect = mtdblock_readsect,
+ .writesect = mtdblock_writesect,
+ .add_mtd = mtdblock_add_mtd,
+ .remove_dev = mtdblock_remove_dev,
+ .owner = THIS_MODULE,
+};
+
+module_mtd_blktrans(mtdblock_tr);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net> et al.");
+MODULE_DESCRIPTION("Caching read/erase/writeback block device emulation access to MTD devices");
diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c
new file mode 100644
index 0000000000..ef6299af60
--- /dev/null
+++ b/drivers/mtd/mtdblock_ro.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Simple read-only (writable only for RAM) mtdblock driver
+ *
+ * Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org>
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/blktrans.h>
+#include <linux/module.h>
+#include <linux/major.h>
+
+static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
+ unsigned long block, char *buf)
+{
+ size_t retlen;
+ int err;
+
+ err = mtd_read(dev->mtd, (block * 512), 512, &retlen, buf);
+ if (err && !mtd_is_bitflip(err))
+ return 1;
+ return 0;
+}
+
+static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
+ unsigned long block, char *buf)
+{
+ size_t retlen;
+
+ if (mtd_write(dev->mtd, (block * 512), 512, &retlen, buf))
+ return 1;
+ return 0;
+}
+
+static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+{
+ struct mtd_blktrans_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+
+ if (!dev)
+ return;
+
+ dev->mtd = mtd;
+ dev->devnum = mtd->index;
+
+ dev->size = mtd->size >> 9;
+ dev->tr = tr;
+ dev->readonly = 1;
+
+ if (mtd_type_is_nand(mtd))
+ pr_warn_ratelimited("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n",
+ tr->name, mtd->name);
+
+ if (add_mtd_blktrans_dev(dev))
+ kfree(dev);
+}
+
+static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
+{
+ del_mtd_blktrans_dev(dev);
+}
+
+static struct mtd_blktrans_ops mtdblock_tr = {
+ .name = "mtdblock",
+ .major = MTD_BLOCK_MAJOR,
+ .part_bits = 0,
+ .blksize = 512,
+ .readsect = mtdblock_readsect,
+ .writesect = mtdblock_writesect,
+ .add_mtd = mtdblock_add_mtd,
+ .remove_dev = mtdblock_remove_dev,
+ .owner = THIS_MODULE,
+};
+
+module_mtd_blktrans(mtdblock_tr);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("Simple read-only block device emulation access to MTD devices");
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
new file mode 100644
index 0000000000..8dc4f5c493
--- /dev/null
+++ b/drivers/mtd/mtdchar.c
@@ -0,0 +1,1439 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
+ */
+
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/backing-dev.h>
+#include <linux/compat.h>
+#include <linux/mount.h>
+#include <linux/blkpg.h>
+#include <linux/magic.h>
+#include <linux/major.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/map.h>
+
+#include <linux/uaccess.h>
+
+#include "mtdcore.h"
+
+/*
+ * Data structure to hold the pointer to the mtd device as well
+ * as mode information of various use cases.
+ */
+struct mtd_file_info {
+ struct mtd_info *mtd;
+ enum mtd_file_modes mode;
+};
+
+static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
+{
+ struct mtd_file_info *mfi = file->private_data;
+ return fixed_size_llseek(file, offset, orig, mfi->mtd->size);
+}
+
+static int mtdchar_open(struct inode *inode, struct file *file)
+{
+ int minor = iminor(inode);
+ int devnum = minor >> 1;
+ int ret = 0;
+ struct mtd_info *mtd;
+ struct mtd_file_info *mfi;
+
+ pr_debug("MTD_open\n");
+
+ /* You can't open the RO devices RW */
+ if ((file->f_mode & FMODE_WRITE) && (minor & 1))
+ return -EACCES;
+
+ mtd = get_mtd_device(NULL, devnum);
+
+ if (IS_ERR(mtd))
+ return PTR_ERR(mtd);
+
+ if (mtd->type == MTD_ABSENT) {
+ ret = -ENODEV;
+ goto out1;
+ }
+
+ /* You can't open it RW if it's not a writeable device */
+ if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
+ ret = -EACCES;
+ goto out1;
+ }
+
+ mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
+ if (!mfi) {
+ ret = -ENOMEM;
+ goto out1;
+ }
+ mfi->mtd = mtd;
+ file->private_data = mfi;
+ return 0;
+
+out1:
+ put_mtd_device(mtd);
+ return ret;
+} /* mtdchar_open */
+
+/*====================================================================*/
+
+static int mtdchar_close(struct inode *inode, struct file *file)
+{
+ struct mtd_file_info *mfi = file->private_data;
+ struct mtd_info *mtd = mfi->mtd;
+
+ pr_debug("MTD_close\n");
+
+ /* Only sync if opened RW */
+ if ((file->f_mode & FMODE_WRITE))
+ mtd_sync(mtd);
+
+ put_mtd_device(mtd);
+ file->private_data = NULL;
+ kfree(mfi);
+
+ return 0;
+} /* mtdchar_close */
+
+/* Back in June 2001, dwmw2 wrote:
+ *
+ * FIXME: This _really_ needs to die. In 2.5, we should lock the
+ * userspace buffer down and use it directly with readv/writev.
+ *
+ * The implementation below, using mtd_kmalloc_up_to, mitigates
+ * allocation failures when the system is under low-memory situations
+ * or if memory is highly fragmented at the cost of reducing the
+ * performance of the requested transfer due to a smaller buffer size.
+ *
+ * A more complex but more memory-efficient implementation based on
+ * get_user_pages and iovecs to cover extents of those pages is a
+ * longer-term goal, as intimated by dwmw2 above. However, for the
+ * write case, this requires yet more complex head and tail transfer
+ * handling when those head and tail offsets and sizes are such that
+ * alignment requirements are not met in the NAND subdriver.
+ */
+
+static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct mtd_file_info *mfi = file->private_data;
+ struct mtd_info *mtd = mfi->mtd;
+ size_t retlen;
+ size_t total_retlen=0;
+ int ret=0;
+ int len;
+ size_t size = count;
+ char *kbuf;
+
+ pr_debug("MTD_read\n");
+
+ if (*ppos + count > mtd->size) {
+ if (*ppos < mtd->size)
+ count = mtd->size - *ppos;
+ else
+ count = 0;
+ }
+
+ if (!count)
+ return 0;
+
+ kbuf = mtd_kmalloc_up_to(mtd, &size);
+ if (!kbuf)
+ return -ENOMEM;
+
+ while (count) {
+ len = min_t(size_t, count, size);
+
+ switch (mfi->mode) {
+ case MTD_FILE_MODE_OTP_FACTORY:
+ ret = mtd_read_fact_prot_reg(mtd, *ppos, len,
+ &retlen, kbuf);
+ break;
+ case MTD_FILE_MODE_OTP_USER:
+ ret = mtd_read_user_prot_reg(mtd, *ppos, len,
+ &retlen, kbuf);
+ break;
+ case MTD_FILE_MODE_RAW:
+ {
+ struct mtd_oob_ops ops = {};
+
+ ops.mode = MTD_OPS_RAW;
+ ops.datbuf = kbuf;
+ ops.oobbuf = NULL;
+ ops.len = len;
+
+ ret = mtd_read_oob(mtd, *ppos, &ops);
+ retlen = ops.retlen;
+ break;
+ }
+ default:
+ ret = mtd_read(mtd, *ppos, len, &retlen, kbuf);
+ }
+ /* Nand returns -EBADMSG on ECC errors, but it returns
+ * the data. For our userspace tools it is important
+ * to dump areas with ECC errors!
+ * For kernel internal usage it also might return -EUCLEAN
+ * to signal the caller that a bitflip has occurred and has
+ * been corrected by the ECC algorithm.
+ * Userspace software which accesses NAND this way
+ * must be aware of the fact that it deals with NAND
+ */
+ if (!ret || mtd_is_bitflip_or_eccerr(ret)) {
+ *ppos += retlen;
+ if (copy_to_user(buf, kbuf, retlen)) {
+ kfree(kbuf);
+ return -EFAULT;
+ }
+ else
+ total_retlen += retlen;
+
+ count -= retlen;
+ buf += retlen;
+ if (retlen == 0)
+ count = 0;
+ }
+ else {
+ kfree(kbuf);
+ return ret;
+ }
+
+ }
+
+ kfree(kbuf);
+ return total_retlen;
+} /* mtdchar_read */
+
+static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct mtd_file_info *mfi = file->private_data;
+ struct mtd_info *mtd = mfi->mtd;
+ size_t size = count;
+ char *kbuf;
+ size_t retlen;
+ size_t total_retlen=0;
+ int ret=0;
+ int len;
+
+ pr_debug("MTD_write\n");
+
+ if (*ppos >= mtd->size)
+ return -ENOSPC;
+
+ if (*ppos + count > mtd->size)
+ count = mtd->size - *ppos;
+
+ if (!count)
+ return 0;
+
+ kbuf = mtd_kmalloc_up_to(mtd, &size);
+ if (!kbuf)
+ return -ENOMEM;
+
+ while (count) {
+ len = min_t(size_t, count, size);
+
+ if (copy_from_user(kbuf, buf, len)) {
+ kfree(kbuf);
+ return -EFAULT;
+ }
+
+ switch (mfi->mode) {
+ case MTD_FILE_MODE_OTP_FACTORY:
+ ret = -EROFS;
+ break;
+ case MTD_FILE_MODE_OTP_USER:
+ ret = mtd_write_user_prot_reg(mtd, *ppos, len,
+ &retlen, kbuf);
+ break;
+
+ case MTD_FILE_MODE_RAW:
+ {
+ struct mtd_oob_ops ops = {};
+
+ ops.mode = MTD_OPS_RAW;
+ ops.datbuf = kbuf;
+ ops.oobbuf = NULL;
+ ops.ooboffs = 0;
+ ops.len = len;
+
+ ret = mtd_write_oob(mtd, *ppos, &ops);
+ retlen = ops.retlen;
+ break;
+ }
+
+ default:
+ ret = mtd_write(mtd, *ppos, len, &retlen, kbuf);
+ }
+
+ /*
+ * Return -ENOSPC only if no data could be written at all.
+ * Otherwise just return the number of bytes that actually
+ * have been written.
+ */
+ if ((ret == -ENOSPC) && (total_retlen))
+ break;
+
+ if (!ret) {
+ *ppos += retlen;
+ total_retlen += retlen;
+ count -= retlen;
+ buf += retlen;
+ }
+ else {
+ kfree(kbuf);
+ return ret;
+ }
+ }
+
+ kfree(kbuf);
+ return total_retlen;
+} /* mtdchar_write */
+
+/*======================================================================
+
+ IOCTL calls for getting device parameters.
+
+======================================================================*/
+
+static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
+{
+ struct mtd_info *mtd = mfi->mtd;
+ size_t retlen;
+
+ switch (mode) {
+ case MTD_OTP_FACTORY:
+ if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) ==
+ -EOPNOTSUPP)
+ return -EOPNOTSUPP;
+
+ mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
+ break;
+ case MTD_OTP_USER:
+ if (mtd_read_user_prot_reg(mtd, -1, 0, &retlen, NULL) ==
+ -EOPNOTSUPP)
+ return -EOPNOTSUPP;
+
+ mfi->mode = MTD_FILE_MODE_OTP_USER;
+ break;
+ case MTD_OTP_OFF:
+ mfi->mode = MTD_FILE_MODE_NORMAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
+ uint64_t start, uint32_t length, void __user *ptr,
+ uint32_t __user *retp)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ struct mtd_file_info *mfi = file->private_data;
+ struct mtd_oob_ops ops = {};
+ uint32_t retlen;
+ int ret = 0;
+
+ if (length > 4096)
+ return -EINVAL;
+
+ if (!master->_write_oob)
+ return -EOPNOTSUPP;
+
+ ops.ooblen = length;
+ ops.ooboffs = start & (mtd->writesize - 1);
+ ops.datbuf = NULL;
+ ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
+ MTD_OPS_PLACE_OOB;
+
+ if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
+ return -EINVAL;
+
+ ops.oobbuf = memdup_user(ptr, length);
+ if (IS_ERR(ops.oobbuf))
+ return PTR_ERR(ops.oobbuf);
+
+ start &= ~((uint64_t)mtd->writesize - 1);
+ ret = mtd_write_oob(mtd, start, &ops);
+
+ if (ops.oobretlen > 0xFFFFFFFFU)
+ ret = -EOVERFLOW;
+ retlen = ops.oobretlen;
+ if (copy_to_user(retp, &retlen, sizeof(length)))
+ ret = -EFAULT;
+
+ kfree(ops.oobbuf);
+ return ret;
+}
+
+static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
+ uint64_t start, uint32_t length, void __user *ptr,
+ uint32_t __user *retp)
+{
+ struct mtd_file_info *mfi = file->private_data;
+ struct mtd_oob_ops ops = {};
+ int ret = 0;
+
+ if (length > 4096)
+ return -EINVAL;
+
+ ops.ooblen = length;
+ ops.ooboffs = start & (mtd->writesize - 1);
+ ops.datbuf = NULL;
+ ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
+ MTD_OPS_PLACE_OOB;
+
+ if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
+ return -EINVAL;
+
+ ops.oobbuf = kmalloc(length, GFP_KERNEL);
+ if (!ops.oobbuf)
+ return -ENOMEM;
+
+ start &= ~((uint64_t)mtd->writesize - 1);
+ ret = mtd_read_oob(mtd, start, &ops);
+
+ if (put_user(ops.oobretlen, retp))
+ ret = -EFAULT;
+ else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
+ ops.oobretlen))
+ ret = -EFAULT;
+
+ kfree(ops.oobbuf);
+
+ /*
+ * NAND returns -EBADMSG on ECC errors, but it returns the OOB
+ * data. For our userspace tools it is important to dump areas
+ * with ECC errors!
+ * For kernel internal usage it also might return -EUCLEAN
+ * to signal the caller that a bitflip has occurred and has
+ * been corrected by the ECC algorithm.
+ *
+ * Note: currently the standard NAND function, nand_read_oob_std,
+ * does not calculate ECC for the OOB area, so do not rely on
+ * this behavior unless you have replaced it with your own.
+ */
+ if (mtd_is_bitflip_or_eccerr(ret))
+ return 0;
+
+ return ret;
+}
+
+/*
+ * Copies (and truncates, if necessary) OOB layout information to the
+ * deprecated layout struct, nand_ecclayout_user. This is necessary only to
+ * support the deprecated API ioctl ECCGETLAYOUT while allowing all new
+ * functionality to use mtd_ooblayout_ops flexibly (i.e. mtd_ooblayout_ops
+ * can describe any kind of OOB layout with almost zero overhead from a
+ * memory usage point of view).
+ */
+static int shrink_ecclayout(struct mtd_info *mtd,
+ struct nand_ecclayout_user *to)
+{
+ struct mtd_oob_region oobregion;
+ int i, section = 0, ret;
+
+ if (!mtd || !to)
+ return -EINVAL;
+
+ memset(to, 0, sizeof(*to));
+
+ to->eccbytes = 0;
+ for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) {
+ u32 eccpos;
+
+ ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
+ if (ret < 0) {
+ if (ret != -ERANGE)
+ return ret;
+
+ break;
+ }
+
+ eccpos = oobregion.offset;
+ for (; i < MTD_MAX_ECCPOS_ENTRIES &&
+ eccpos < oobregion.offset + oobregion.length; i++) {
+ to->eccpos[i] = eccpos++;
+ to->eccbytes++;
+ }
+ }
+
+ for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
+ ret = mtd_ooblayout_free(mtd, i, &oobregion);
+ if (ret < 0) {
+ if (ret != -ERANGE)
+ return ret;
+
+ break;
+ }
+
+ to->oobfree[i].offset = oobregion.offset;
+ to->oobfree[i].length = oobregion.length;
+ to->oobavail += to->oobfree[i].length;
+ }
+
+ return 0;
+}
+
+static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to)
+{
+ struct mtd_oob_region oobregion;
+ int i, section = 0, ret;
+
+ if (!mtd || !to)
+ return -EINVAL;
+
+ memset(to, 0, sizeof(*to));
+
+ to->eccbytes = 0;
+ for (i = 0; i < ARRAY_SIZE(to->eccpos);) {
+ u32 eccpos;
+
+ ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
+ if (ret < 0) {
+ if (ret != -ERANGE)
+ return ret;
+
+ break;
+ }
+
+ if (oobregion.length + i > ARRAY_SIZE(to->eccpos))
+ return -EINVAL;
+
+ eccpos = oobregion.offset;
+ for (; eccpos < oobregion.offset + oobregion.length; i++) {
+ to->eccpos[i] = eccpos++;
+ to->eccbytes++;
+ }
+ }
+
+ for (i = 0; i < 8; i++) {
+ ret = mtd_ooblayout_free(mtd, i, &oobregion);
+ if (ret < 0) {
+ if (ret != -ERANGE)
+ return ret;
+
+ break;
+ }
+
+ to->oobfree[i][0] = oobregion.offset;
+ to->oobfree[i][1] = oobregion.length;
+ }
+
+ to->useecc = MTD_NANDECC_AUTOPLACE;
+
+ return 0;
+}
+
+static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
+ struct blkpg_ioctl_arg *arg)
+{
+ struct blkpg_partition p;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&p, arg->data, sizeof(p)))
+ return -EFAULT;
+
+ switch (arg->op) {
+ case BLKPG_ADD_PARTITION:
+
+ /* Only master mtd device must be used to add partitions */
+ if (mtd_is_partition(mtd))
+ return -EINVAL;
+
+ /* Sanitize user input */
+ p.devname[BLKPG_DEVNAMELTH - 1] = '\0';
+
+ return mtd_add_partition(mtd, p.devname, p.start, p.length);
+
+ case BLKPG_DEL_PARTITION:
+
+ if (p.pno < 0)
+ return -EINVAL;
+
+ return mtd_del_partition(mtd, p.pno);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static void adjust_oob_length(struct mtd_info *mtd, uint64_t start,
+ struct mtd_oob_ops *ops)
+{
+ uint32_t start_page, end_page;
+ u32 oob_per_page;
+
+ if (ops->len == 0 || ops->ooblen == 0)
+ return;
+
+ start_page = mtd_div_by_ws(start, mtd);
+ end_page = mtd_div_by_ws(start + ops->len - 1, mtd);
+ oob_per_page = mtd_oobavail(mtd, ops);
+
+ ops->ooblen = min_t(size_t, ops->ooblen,
+ (end_page - start_page + 1) * oob_per_page);
+}
+
+static noinline_for_stack int
+mtdchar_write_ioctl(struct mtd_info *mtd, struct mtd_write_req __user *argp)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ struct mtd_write_req req;
+ const void __user *usr_data, *usr_oob;
+ uint8_t *datbuf = NULL, *oobbuf = NULL;
+ size_t datbuf_len, oobbuf_len;
+ int ret = 0;
+
+ if (copy_from_user(&req, argp, sizeof(req)))
+ return -EFAULT;
+
+ usr_data = (const void __user *)(uintptr_t)req.usr_data;
+ usr_oob = (const void __user *)(uintptr_t)req.usr_oob;
+
+ if (!master->_write_oob)
+ return -EOPNOTSUPP;
+
+ if (!usr_data)
+ req.len = 0;
+
+ if (!usr_oob)
+ req.ooblen = 0;
+
+ req.len &= 0xffffffff;
+ req.ooblen &= 0xffffffff;
+
+ if (req.start + req.len > mtd->size)
+ return -EINVAL;
+
+ datbuf_len = min_t(size_t, req.len, mtd->erasesize);
+ if (datbuf_len > 0) {
+ datbuf = kvmalloc(datbuf_len, GFP_KERNEL);
+ if (!datbuf)
+ return -ENOMEM;
+ }
+
+ oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize);
+ if (oobbuf_len > 0) {
+ oobbuf = kvmalloc(oobbuf_len, GFP_KERNEL);
+ if (!oobbuf) {
+ kvfree(datbuf);
+ return -ENOMEM;
+ }
+ }
+
+ while (req.len > 0 || (!usr_data && req.ooblen > 0)) {
+ struct mtd_oob_ops ops = {
+ .mode = req.mode,
+ .len = min_t(size_t, req.len, datbuf_len),
+ .ooblen = min_t(size_t, req.ooblen, oobbuf_len),
+ .datbuf = datbuf,
+ .oobbuf = oobbuf,
+ };
+
+ /*
+ * Shorten non-page-aligned, eraseblock-sized writes so that
+ * the write ends on an eraseblock boundary. This is necessary
+ * for adjust_oob_length() to properly handle non-page-aligned
+ * writes.
+ */
+ if (ops.len == mtd->erasesize)
+ ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd);
+
+ /*
+ * For writes which are not OOB-only, adjust the amount of OOB
+ * data written according to the number of data pages written.
+ * This is necessary to prevent OOB data from being skipped
+ * over in data+OOB writes requiring multiple mtd_write_oob()
+ * calls to be completed.
+ */
+ adjust_oob_length(mtd, req.start, &ops);
+
+ if (copy_from_user(datbuf, usr_data, ops.len) ||
+ copy_from_user(oobbuf, usr_oob, ops.ooblen)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = mtd_write_oob(mtd, req.start, &ops);
+ if (ret)
+ break;
+
+ req.start += ops.retlen;
+ req.len -= ops.retlen;
+ usr_data += ops.retlen;
+
+ req.ooblen -= ops.oobretlen;
+ usr_oob += ops.oobretlen;
+ }
+
+ kvfree(datbuf);
+ kvfree(oobbuf);
+
+ return ret;
+}
+
+static noinline_for_stack int
+mtdchar_read_ioctl(struct mtd_info *mtd, struct mtd_read_req __user *argp)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ struct mtd_read_req req;
+ void __user *usr_data, *usr_oob;
+ uint8_t *datbuf = NULL, *oobbuf = NULL;
+ size_t datbuf_len, oobbuf_len;
+ size_t orig_len, orig_ooblen;
+ int ret = 0;
+
+ if (copy_from_user(&req, argp, sizeof(req)))
+ return -EFAULT;
+
+ orig_len = req.len;
+ orig_ooblen = req.ooblen;
+
+ usr_data = (void __user *)(uintptr_t)req.usr_data;
+ usr_oob = (void __user *)(uintptr_t)req.usr_oob;
+
+ if (!master->_read_oob)
+ return -EOPNOTSUPP;
+
+ if (!usr_data)
+ req.len = 0;
+
+ if (!usr_oob)
+ req.ooblen = 0;
+
+ req.ecc_stats.uncorrectable_errors = 0;
+ req.ecc_stats.corrected_bitflips = 0;
+ req.ecc_stats.max_bitflips = 0;
+
+ req.len &= 0xffffffff;
+ req.ooblen &= 0xffffffff;
+
+ if (req.start + req.len > mtd->size) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ datbuf_len = min_t(size_t, req.len, mtd->erasesize);
+ if (datbuf_len > 0) {
+ datbuf = kvmalloc(datbuf_len, GFP_KERNEL);
+ if (!datbuf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize);
+ if (oobbuf_len > 0) {
+ oobbuf = kvmalloc(oobbuf_len, GFP_KERNEL);
+ if (!oobbuf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ while (req.len > 0 || (!usr_data && req.ooblen > 0)) {
+ struct mtd_req_stats stats;
+ struct mtd_oob_ops ops = {
+ .mode = req.mode,
+ .len = min_t(size_t, req.len, datbuf_len),
+ .ooblen = min_t(size_t, req.ooblen, oobbuf_len),
+ .datbuf = datbuf,
+ .oobbuf = oobbuf,
+ .stats = &stats,
+ };
+
+ /*
+ * Shorten non-page-aligned, eraseblock-sized reads so that the
+ * read ends on an eraseblock boundary. This is necessary in
+ * order to prevent OOB data for some pages from being
+ * duplicated in the output of non-page-aligned reads requiring
+ * multiple mtd_read_oob() calls to be completed.
+ */
+ if (ops.len == mtd->erasesize)
+ ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd);
+
+ ret = mtd_read_oob(mtd, (loff_t)req.start, &ops);
+
+ req.ecc_stats.uncorrectable_errors +=
+ stats.uncorrectable_errors;
+ req.ecc_stats.corrected_bitflips += stats.corrected_bitflips;
+ req.ecc_stats.max_bitflips =
+ max(req.ecc_stats.max_bitflips, stats.max_bitflips);
+
+ if (ret && !mtd_is_bitflip_or_eccerr(ret))
+ break;
+
+ if (copy_to_user(usr_data, ops.datbuf, ops.retlen) ||
+ copy_to_user(usr_oob, ops.oobbuf, ops.oobretlen)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ req.start += ops.retlen;
+ req.len -= ops.retlen;
+ usr_data += ops.retlen;
+
+ req.ooblen -= ops.oobretlen;
+ usr_oob += ops.oobretlen;
+ }
+
+ /*
+ * As multiple iterations of the above loop (and therefore multiple
+ * mtd_read_oob() calls) may be necessary to complete the read request,
+ * adjust the final return code to ensure it accounts for all detected
+ * ECC errors.
+ */
+ if (!ret || mtd_is_bitflip(ret)) {
+ if (req.ecc_stats.uncorrectable_errors > 0)
+ ret = -EBADMSG;
+ else if (req.ecc_stats.corrected_bitflips > 0)
+ ret = -EUCLEAN;
+ }
+
+out:
+ req.len = orig_len - req.len;
+ req.ooblen = orig_ooblen - req.ooblen;
+
+ if (copy_to_user(argp, &req, sizeof(req)))
+ ret = -EFAULT;
+
+ kvfree(datbuf);
+ kvfree(oobbuf);
+
+ return ret;
+}
+
+static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
+{
+ struct mtd_file_info *mfi = file->private_data;
+ struct mtd_info *mtd = mfi->mtd;
+ struct mtd_info *master = mtd_get_master(mtd);
+ void __user *argp = (void __user *)arg;
+ int ret = 0;
+ struct mtd_info_user info;
+
+ pr_debug("MTD_ioctl\n");
+
+ /*
+ * Check the file mode to require "dangerous" commands to have write
+ * permissions.
+ */
+ switch (cmd) {
+ /* "safe" commands */
+ case MEMGETREGIONCOUNT:
+ case MEMGETREGIONINFO:
+ case MEMGETINFO:
+ case MEMREADOOB:
+ case MEMREADOOB64:
+ case MEMREAD:
+ case MEMISLOCKED:
+ case MEMGETOOBSEL:
+ case MEMGETBADBLOCK:
+ case OTPSELECT:
+ case OTPGETREGIONCOUNT:
+ case OTPGETREGIONINFO:
+ case ECCGETLAYOUT:
+ case ECCGETSTATS:
+ case MTDFILEMODE:
+ case BLKPG:
+ case BLKRRPART:
+ break;
+
+ /* "dangerous" commands */
+ case MEMERASE:
+ case MEMERASE64:
+ case MEMLOCK:
+ case MEMUNLOCK:
+ case MEMSETBADBLOCK:
+ case MEMWRITEOOB:
+ case MEMWRITEOOB64:
+ case MEMWRITE:
+ case OTPLOCK:
+ case OTPERASE:
+ if (!(file->f_mode & FMODE_WRITE))
+ return -EPERM;
+ break;
+
+ default:
+ return -ENOTTY;
+ }
+
+ switch (cmd) {
+ case MEMGETREGIONCOUNT:
+ if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
+ return -EFAULT;
+ break;
+
+ case MEMGETREGIONINFO:
+ {
+ uint32_t ur_idx;
+ struct mtd_erase_region_info *kr;
+ struct region_info_user __user *ur = argp;
+
+ if (get_user(ur_idx, &(ur->regionindex)))
+ return -EFAULT;
+
+ if (ur_idx >= mtd->numeraseregions)
+ return -EINVAL;
+
+ kr = &(mtd->eraseregions[ur_idx]);
+
+ if (put_user(kr->offset, &(ur->offset))
+ || put_user(kr->erasesize, &(ur->erasesize))
+ || put_user(kr->numblocks, &(ur->numblocks)))
+ return -EFAULT;
+
+ break;
+ }
+
+ case MEMGETINFO:
+ memset(&info, 0, sizeof(info));
+ info.type = mtd->type;
+ info.flags = mtd->flags;
+ info.size = mtd->size;
+ info.erasesize = mtd->erasesize;
+ info.writesize = mtd->writesize;
+ info.oobsize = mtd->oobsize;
+ /* The below field is obsolete */
+ info.padding = 0;
+ if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
+ return -EFAULT;
+ break;
+
+ case MEMERASE:
+ case MEMERASE64:
+ {
+ struct erase_info *erase;
+
+ erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
+ if (!erase)
+ ret = -ENOMEM;
+ else {
+ if (cmd == MEMERASE64) {
+ struct erase_info_user64 einfo64;
+
+ if (copy_from_user(&einfo64, argp,
+ sizeof(struct erase_info_user64))) {
+ kfree(erase);
+ return -EFAULT;
+ }
+ erase->addr = einfo64.start;
+ erase->len = einfo64.length;
+ } else {
+ struct erase_info_user einfo32;
+
+ if (copy_from_user(&einfo32, argp,
+ sizeof(struct erase_info_user))) {
+ kfree(erase);
+ return -EFAULT;
+ }
+ erase->addr = einfo32.start;
+ erase->len = einfo32.length;
+ }
+
+ ret = mtd_erase(mtd, erase);
+ kfree(erase);
+ }
+ break;
+ }
+
+ case MEMWRITEOOB:
+ {
+ struct mtd_oob_buf buf;
+ struct mtd_oob_buf __user *buf_user = argp;
+
+ /* NOTE: writes return length to buf_user->length */
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+ ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
+ buf.ptr, &buf_user->length);
+ break;
+ }
+
+ case MEMREADOOB:
+ {
+ struct mtd_oob_buf buf;
+ struct mtd_oob_buf __user *buf_user = argp;
+
+ /* NOTE: writes return length to buf_user->start */
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+ ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
+ buf.ptr, &buf_user->start);
+ break;
+ }
+
+ case MEMWRITEOOB64:
+ {
+ struct mtd_oob_buf64 buf;
+ struct mtd_oob_buf64 __user *buf_user = argp;
+
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+ ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
+ (void __user *)(uintptr_t)buf.usr_ptr,
+ &buf_user->length);
+ break;
+ }
+
+ case MEMREADOOB64:
+ {
+ struct mtd_oob_buf64 buf;
+ struct mtd_oob_buf64 __user *buf_user = argp;
+
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+ ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
+ (void __user *)(uintptr_t)buf.usr_ptr,
+ &buf_user->length);
+ break;
+ }
+
+ case MEMWRITE:
+ {
+ ret = mtdchar_write_ioctl(mtd,
+ (struct mtd_write_req __user *)arg);
+ break;
+ }
+
+ case MEMREAD:
+ {
+ ret = mtdchar_read_ioctl(mtd,
+ (struct mtd_read_req __user *)arg);
+ break;
+ }
+
+ case MEMLOCK:
+ {
+ struct erase_info_user einfo;
+
+ if (copy_from_user(&einfo, argp, sizeof(einfo)))
+ return -EFAULT;
+
+ ret = mtd_lock(mtd, einfo.start, einfo.length);
+ break;
+ }
+
+ case MEMUNLOCK:
+ {
+ struct erase_info_user einfo;
+
+ if (copy_from_user(&einfo, argp, sizeof(einfo)))
+ return -EFAULT;
+
+ ret = mtd_unlock(mtd, einfo.start, einfo.length);
+ break;
+ }
+
+ case MEMISLOCKED:
+ {
+ struct erase_info_user einfo;
+
+ if (copy_from_user(&einfo, argp, sizeof(einfo)))
+ return -EFAULT;
+
+ ret = mtd_is_locked(mtd, einfo.start, einfo.length);
+ break;
+ }
+
+ /* Legacy interface */
+ case MEMGETOOBSEL:
+ {
+ struct nand_oobinfo oi;
+
+ if (!master->ooblayout)
+ return -EOPNOTSUPP;
+
+ ret = get_oobinfo(mtd, &oi);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
+ return -EFAULT;
+ break;
+ }
+
+ case MEMGETBADBLOCK:
+ {
+ loff_t offs;
+
+ if (copy_from_user(&offs, argp, sizeof(loff_t)))
+ return -EFAULT;
+ return mtd_block_isbad(mtd, offs);
+ }
+
+ case MEMSETBADBLOCK:
+ {
+ loff_t offs;
+
+ if (copy_from_user(&offs, argp, sizeof(loff_t)))
+ return -EFAULT;
+ return mtd_block_markbad(mtd, offs);
+ }
+
+ case OTPSELECT:
+ {
+ int mode;
+ if (copy_from_user(&mode, argp, sizeof(int)))
+ return -EFAULT;
+
+ mfi->mode = MTD_FILE_MODE_NORMAL;
+
+ ret = otp_select_filemode(mfi, mode);
+
+ file->f_pos = 0;
+ break;
+ }
+
+ case OTPGETREGIONCOUNT:
+ case OTPGETREGIONINFO:
+ {
+ struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
+ size_t retlen;
+ if (!buf)
+ return -ENOMEM;
+ switch (mfi->mode) {
+ case MTD_FILE_MODE_OTP_FACTORY:
+ ret = mtd_get_fact_prot_info(mtd, 4096, &retlen, buf);
+ break;
+ case MTD_FILE_MODE_OTP_USER:
+ ret = mtd_get_user_prot_info(mtd, 4096, &retlen, buf);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ if (!ret) {
+ if (cmd == OTPGETREGIONCOUNT) {
+ int nbr = retlen / sizeof(struct otp_info);
+ ret = copy_to_user(argp, &nbr, sizeof(int));
+ } else
+ ret = copy_to_user(argp, buf, retlen);
+ if (ret)
+ ret = -EFAULT;
+ }
+ kfree(buf);
+ break;
+ }
+
+ case OTPLOCK:
+ case OTPERASE:
+ {
+ struct otp_info oinfo;
+
+ if (mfi->mode != MTD_FILE_MODE_OTP_USER)
+ return -EINVAL;
+ if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
+ return -EFAULT;
+ if (cmd == OTPLOCK)
+ ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
+ else
+ ret = mtd_erase_user_prot_reg(mtd, oinfo.start, oinfo.length);
+ break;
+ }
+
+ /* This ioctl is being deprecated - it truncates the ECC layout */
+ case ECCGETLAYOUT:
+ {
+ struct nand_ecclayout_user *usrlay;
+
+ if (!master->ooblayout)
+ return -EOPNOTSUPP;
+
+ usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
+ if (!usrlay)
+ return -ENOMEM;
+
+ shrink_ecclayout(mtd, usrlay);
+
+ if (copy_to_user(argp, usrlay, sizeof(*usrlay)))
+ ret = -EFAULT;
+ kfree(usrlay);
+ break;
+ }
+
+ case ECCGETSTATS:
+ {
+ if (copy_to_user(argp, &mtd->ecc_stats,
+ sizeof(struct mtd_ecc_stats)))
+ return -EFAULT;
+ break;
+ }
+
+ case MTDFILEMODE:
+ {
+ mfi->mode = 0;
+
+ switch(arg) {
+ case MTD_FILE_MODE_OTP_FACTORY:
+ case MTD_FILE_MODE_OTP_USER:
+ ret = otp_select_filemode(mfi, arg);
+ break;
+
+ case MTD_FILE_MODE_RAW:
+ if (!mtd_has_oob(mtd))
+ return -EOPNOTSUPP;
+ mfi->mode = arg;
+ break;
+
+ case MTD_FILE_MODE_NORMAL:
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ file->f_pos = 0;
+ break;
+ }
+
+ case BLKPG:
+ {
+ struct blkpg_ioctl_arg __user *blk_arg = argp;
+ struct blkpg_ioctl_arg a;
+
+ if (copy_from_user(&a, blk_arg, sizeof(a)))
+ ret = -EFAULT;
+ else
+ ret = mtdchar_blkpg_ioctl(mtd, &a);
+ break;
+ }
+
+ case BLKRRPART:
+ {
+ /* No reread partition feature. Just return ok */
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+} /* memory_ioctl */
+
+static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
+{
+ struct mtd_file_info *mfi = file->private_data;
+ struct mtd_info *mtd = mfi->mtd;
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ret;
+
+ mutex_lock(&master->master.chrdev_lock);
+ ret = mtdchar_ioctl(file, cmd, arg);
+ mutex_unlock(&master->master.chrdev_lock);
+
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+
+struct mtd_oob_buf32 {
+ u_int32_t start;
+ u_int32_t length;
+ compat_caddr_t ptr; /* unsigned char* */
+};
+
+#define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
+#define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
+
+static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct mtd_file_info *mfi = file->private_data;
+ struct mtd_info *mtd = mfi->mtd;
+ struct mtd_info *master = mtd_get_master(mtd);
+ void __user *argp = compat_ptr(arg);
+ int ret = 0;
+
+ mutex_lock(&master->master.chrdev_lock);
+
+ switch (cmd) {
+ case MEMWRITEOOB32:
+ {
+ struct mtd_oob_buf32 buf;
+ struct mtd_oob_buf32 __user *buf_user = argp;
+
+ if (!(file->f_mode & FMODE_WRITE)) {
+ ret = -EPERM;
+ break;
+ }
+
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+ ret = mtdchar_writeoob(file, mtd, buf.start,
+ buf.length, compat_ptr(buf.ptr),
+ &buf_user->length);
+ break;
+ }
+
+ case MEMREADOOB32:
+ {
+ struct mtd_oob_buf32 buf;
+ struct mtd_oob_buf32 __user *buf_user = argp;
+
+ /* NOTE: writes return length to buf->start */
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+ ret = mtdchar_readoob(file, mtd, buf.start,
+ buf.length, compat_ptr(buf.ptr),
+ &buf_user->start);
+ break;
+ }
+
+ case BLKPG:
+ {
+ /* Convert from blkpg_compat_ioctl_arg to blkpg_ioctl_arg */
+ struct blkpg_compat_ioctl_arg __user *uarg = argp;
+ struct blkpg_compat_ioctl_arg compat_arg;
+ struct blkpg_ioctl_arg a;
+
+ if (copy_from_user(&compat_arg, uarg, sizeof(compat_arg))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ memset(&a, 0, sizeof(a));
+ a.op = compat_arg.op;
+ a.flags = compat_arg.flags;
+ a.datalen = compat_arg.datalen;
+ a.data = compat_ptr(compat_arg.data);
+
+ ret = mtdchar_blkpg_ioctl(mtd, &a);
+ break;
+ }
+
+ default:
+ ret = mtdchar_ioctl(file, cmd, (unsigned long)argp);
+ }
+
+ mutex_unlock(&master->master.chrdev_lock);
+
+ return ret;
+}
+
+#endif /* CONFIG_COMPAT */
+
+/*
+ * try to determine where a shared mapping can be made
+ * - only supported for NOMMU at the moment (MMU can't doesn't copy private
+ * mappings)
+ */
+#ifndef CONFIG_MMU
+static unsigned long mtdchar_get_unmapped_area(struct file *file,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags)
+{
+ struct mtd_file_info *mfi = file->private_data;
+ struct mtd_info *mtd = mfi->mtd;
+ unsigned long offset;
+ int ret;
+
+ if (addr != 0)
+ return (unsigned long) -EINVAL;
+
+ if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
+ return (unsigned long) -EINVAL;
+
+ offset = pgoff << PAGE_SHIFT;
+ if (offset > mtd->size - len)
+ return (unsigned long) -EINVAL;
+
+ ret = mtd_get_unmapped_area(mtd, len, offset, flags);
+ return ret == -EOPNOTSUPP ? -ENODEV : ret;
+}
+
+static unsigned mtdchar_mmap_capabilities(struct file *file)
+{
+ struct mtd_file_info *mfi = file->private_data;
+
+ return mtd_mmap_capabilities(mfi->mtd);
+}
+#endif
+
+/*
+ * set up a mapping for shared memory segments
+ */
+static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
+{
+#ifdef CONFIG_MMU
+ struct mtd_file_info *mfi = file->private_data;
+ struct mtd_info *mtd = mfi->mtd;
+ struct map_info *map = mtd->priv;
+
+ /* This is broken because it assumes the MTD device is map-based
+ and that mtd->priv is a valid struct map_info. It should be
+ replaced with something that uses the mtd_get_unmapped_area()
+ operation properly. */
+ if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
+#ifdef pgprot_noncached
+ if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory))
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+#endif
+ return vm_iomap_memory(vma, map->phys, map->size);
+ }
+ return -ENODEV;
+#else
+ return vma->vm_flags & VM_SHARED ? 0 : -EACCES;
+#endif
+}
+
+static const struct file_operations mtd_fops = {
+ .owner = THIS_MODULE,
+ .llseek = mtdchar_lseek,
+ .read = mtdchar_read,
+ .write = mtdchar_write,
+ .unlocked_ioctl = mtdchar_unlocked_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mtdchar_compat_ioctl,
+#endif
+ .open = mtdchar_open,
+ .release = mtdchar_close,
+ .mmap = mtdchar_mmap,
+#ifndef CONFIG_MMU
+ .get_unmapped_area = mtdchar_get_unmapped_area,
+ .mmap_capabilities = mtdchar_mmap_capabilities,
+#endif
+};
+
+int __init init_mtdchar(void)
+{
+ int ret;
+
+ ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
+ "mtd", &mtd_fops);
+ if (ret < 0) {
+ pr_err("Can't allocate major number %d for MTD\n",
+ MTD_CHAR_MAJOR);
+ return ret;
+ }
+
+ return ret;
+}
+
+void __exit cleanup_mtdchar(void)
+{
+ __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
+}
+
+MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
new file mode 100644
index 0000000000..193428de6a
--- /dev/null
+++ b/drivers/mtd/mtdconcat.c
@@ -0,0 +1,914 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MTD device concatenation layer
+ *
+ * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de>
+ * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * NAND support by Christian Gan <cgan@iders.ca>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/backing-dev.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/concat.h>
+
+#include <asm/div64.h>
+
+/*
+ * Our storage structure:
+ * Subdev points to an array of pointers to struct mtd_info objects
+ * which is allocated along with this structure
+ *
+ */
+struct mtd_concat {
+ struct mtd_info mtd;
+ int num_subdev;
+ struct mtd_info **subdev;
+};
+
+/*
+ * how to calculate the size required for the above structure,
+ * including the pointer array subdev points to:
+ */
+#define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
+ ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
+
+/*
+ * Given a pointer to the MTD object in the mtd_concat structure,
+ * we can retrieve the pointer to that structure with this macro.
+ */
+#define CONCAT(x) ((struct mtd_concat *)(x))
+
+/*
+ * MTD methods which look up the relevant subdevice, translate the
+ * effective address and pass through to the subdevice.
+ */
+
+static int
+concat_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t * retlen, u_char * buf)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int ret = 0, err;
+ int i;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ size_t size, retsize;
+
+ if (from >= subdev->size) {
+ /* Not destined for this subdev */
+ size = 0;
+ from -= subdev->size;
+ continue;
+ }
+ if (from + len > subdev->size)
+ /* First part goes into this subdev */
+ size = subdev->size - from;
+ else
+ /* Entire transaction goes into this subdev */
+ size = len;
+
+ err = mtd_read(subdev, from, size, &retsize, buf);
+
+ /* Save information about bitflips! */
+ if (unlikely(err)) {
+ if (mtd_is_eccerr(err)) {
+ mtd->ecc_stats.failed++;
+ ret = err;
+ } else if (mtd_is_bitflip(err)) {
+ mtd->ecc_stats.corrected++;
+ /* Do not overwrite -EBADMSG !! */
+ if (!ret)
+ ret = err;
+ } else
+ return err;
+ }
+
+ *retlen += retsize;
+ len -= size;
+ if (len == 0)
+ return ret;
+
+ buf += size;
+ from = 0;
+ }
+ return -EINVAL;
+}
+
+static int
+concat_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t * retlen, const u_char * buf)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int err = -EINVAL;
+ int i;
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ size_t size, retsize;
+
+ if (to >= subdev->size) {
+ to -= subdev->size;
+ continue;
+ }
+ if (to + len > subdev->size)
+ size = subdev->size - to;
+ else
+ size = len;
+
+ err = mtd_panic_write(subdev, to, size, &retsize, buf);
+ if (err == -EOPNOTSUPP) {
+ printk(KERN_ERR "mtdconcat: Cannot write from panic without panic_write\n");
+ return err;
+ }
+ if (err)
+ break;
+
+ *retlen += retsize;
+ len -= size;
+ if (len == 0)
+ break;
+
+ err = -EINVAL;
+ buf += size;
+ to = 0;
+ }
+ return err;
+}
+
+
+static int
+concat_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t * retlen, const u_char * buf)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int err = -EINVAL;
+ int i;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ size_t size, retsize;
+
+ if (to >= subdev->size) {
+ size = 0;
+ to -= subdev->size;
+ continue;
+ }
+ if (to + len > subdev->size)
+ size = subdev->size - to;
+ else
+ size = len;
+
+ err = mtd_write(subdev, to, size, &retsize, buf);
+ if (err)
+ break;
+
+ *retlen += retsize;
+ len -= size;
+ if (len == 0)
+ break;
+
+ err = -EINVAL;
+ buf += size;
+ to = 0;
+ }
+ return err;
+}
+
+static int
+concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t * retlen)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ struct kvec *vecs_copy;
+ unsigned long entry_low, entry_high;
+ size_t total_len = 0;
+ int i;
+ int err = -EINVAL;
+
+ /* Calculate total length of data */
+ for (i = 0; i < count; i++)
+ total_len += vecs[i].iov_len;
+
+ /* Check alignment */
+ if (mtd->writesize > 1) {
+ uint64_t __to = to;
+ if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
+ return -EINVAL;
+ }
+
+ /* make a copy of vecs */
+ vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
+ if (!vecs_copy)
+ return -ENOMEM;
+
+ entry_low = 0;
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ size_t size, wsize, retsize, old_iov_len;
+
+ if (to >= subdev->size) {
+ to -= subdev->size;
+ continue;
+ }
+
+ size = min_t(uint64_t, total_len, subdev->size - to);
+ wsize = size; /* store for future use */
+
+ entry_high = entry_low;
+ while (entry_high < count) {
+ if (size <= vecs_copy[entry_high].iov_len)
+ break;
+ size -= vecs_copy[entry_high++].iov_len;
+ }
+
+ old_iov_len = vecs_copy[entry_high].iov_len;
+ vecs_copy[entry_high].iov_len = size;
+
+ err = mtd_writev(subdev, &vecs_copy[entry_low],
+ entry_high - entry_low + 1, to, &retsize);
+
+ vecs_copy[entry_high].iov_len = old_iov_len - size;
+ vecs_copy[entry_high].iov_base += size;
+
+ entry_low = entry_high;
+
+ if (err)
+ break;
+
+ *retlen += retsize;
+ total_len -= wsize;
+
+ if (total_len == 0)
+ break;
+
+ err = -EINVAL;
+ to = 0;
+ }
+
+ kfree(vecs_copy);
+ return err;
+}
+
+static int
+concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ struct mtd_oob_ops devops = *ops;
+ int i, err, ret = 0;
+
+ ops->retlen = ops->oobretlen = 0;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+
+ if (from >= subdev->size) {
+ from -= subdev->size;
+ continue;
+ }
+
+ /* partial read ? */
+ if (from + devops.len > subdev->size)
+ devops.len = subdev->size - from;
+
+ err = mtd_read_oob(subdev, from, &devops);
+ ops->retlen += devops.retlen;
+ ops->oobretlen += devops.oobretlen;
+
+ /* Save information about bitflips! */
+ if (unlikely(err)) {
+ if (mtd_is_eccerr(err)) {
+ mtd->ecc_stats.failed++;
+ ret = err;
+ } else if (mtd_is_bitflip(err)) {
+ mtd->ecc_stats.corrected++;
+ /* Do not overwrite -EBADMSG !! */
+ if (!ret)
+ ret = err;
+ } else
+ return err;
+ }
+
+ if (devops.datbuf) {
+ devops.len = ops->len - ops->retlen;
+ if (!devops.len)
+ return ret;
+ devops.datbuf += devops.retlen;
+ }
+ if (devops.oobbuf) {
+ devops.ooblen = ops->ooblen - ops->oobretlen;
+ if (!devops.ooblen)
+ return ret;
+ devops.oobbuf += ops->oobretlen;
+ }
+
+ from = 0;
+ }
+ return -EINVAL;
+}
+
+static int
+concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ struct mtd_oob_ops devops = *ops;
+ int i, err;
+
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+
+ ops->retlen = ops->oobretlen = 0;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+
+ if (to >= subdev->size) {
+ to -= subdev->size;
+ continue;
+ }
+
+ /* partial write ? */
+ if (to + devops.len > subdev->size)
+ devops.len = subdev->size - to;
+
+ err = mtd_write_oob(subdev, to, &devops);
+ ops->retlen += devops.retlen;
+ ops->oobretlen += devops.oobretlen;
+ if (err)
+ return err;
+
+ if (devops.datbuf) {
+ devops.len = ops->len - ops->retlen;
+ if (!devops.len)
+ return 0;
+ devops.datbuf += devops.retlen;
+ }
+ if (devops.oobbuf) {
+ devops.ooblen = ops->ooblen - ops->oobretlen;
+ if (!devops.ooblen)
+ return 0;
+ devops.oobbuf += devops.oobretlen;
+ }
+ to = 0;
+ }
+ return -EINVAL;
+}
+
+static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ struct mtd_info *subdev;
+ int i, err;
+ uint64_t length, offset = 0;
+ struct erase_info *erase;
+
+ /*
+ * Check for proper erase block alignment of the to-be-erased area.
+ * It is easier to do this based on the super device's erase
+ * region info rather than looking at each particular sub-device
+ * in turn.
+ */
+ if (!concat->mtd.numeraseregions) {
+ /* the easy case: device has uniform erase block size */
+ if (instr->addr & (concat->mtd.erasesize - 1))
+ return -EINVAL;
+ if (instr->len & (concat->mtd.erasesize - 1))
+ return -EINVAL;
+ } else {
+ /* device has variable erase size */
+ struct mtd_erase_region_info *erase_regions =
+ concat->mtd.eraseregions;
+
+ /*
+ * Find the erase region where the to-be-erased area begins:
+ */
+ for (i = 0; i < concat->mtd.numeraseregions &&
+ instr->addr >= erase_regions[i].offset; i++) ;
+ --i;
+
+ /*
+ * Now erase_regions[i] is the region in which the
+ * to-be-erased area begins. Verify that the starting
+ * offset is aligned to this region's erase size:
+ */
+ if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1))
+ return -EINVAL;
+
+ /*
+ * now find the erase region where the to-be-erased area ends:
+ */
+ for (; i < concat->mtd.numeraseregions &&
+ (instr->addr + instr->len) >= erase_regions[i].offset;
+ ++i) ;
+ --i;
+ /*
+ * check if the ending offset is aligned to this region's erase size
+ */
+ if (i < 0 || ((instr->addr + instr->len) &
+ (erase_regions[i].erasesize - 1)))
+ return -EINVAL;
+ }
+
+ /* make a local copy of instr to avoid modifying the caller's struct */
+ erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
+
+ if (!erase)
+ return -ENOMEM;
+
+ *erase = *instr;
+ length = instr->len;
+
+ /*
+ * find the subdevice where the to-be-erased area begins, adjust
+ * starting offset to be relative to the subdevice start
+ */
+ for (i = 0; i < concat->num_subdev; i++) {
+ subdev = concat->subdev[i];
+ if (subdev->size <= erase->addr) {
+ erase->addr -= subdev->size;
+ offset += subdev->size;
+ } else {
+ break;
+ }
+ }
+
+ /* must never happen since size limit has been verified above */
+ BUG_ON(i >= concat->num_subdev);
+
+ /* now do the erase: */
+ err = 0;
+ for (; length > 0; i++) {
+ /* loop for all subdevices affected by this request */
+ subdev = concat->subdev[i]; /* get current subdevice */
+
+ /* limit length to subdevice's size: */
+ if (erase->addr + length > subdev->size)
+ erase->len = subdev->size - erase->addr;
+ else
+ erase->len = length;
+
+ length -= erase->len;
+ if ((err = mtd_erase(subdev, erase))) {
+ /* sanity check: should never happen since
+ * block alignment has been checked above */
+ BUG_ON(err == -EINVAL);
+ if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
+ instr->fail_addr = erase->fail_addr + offset;
+ break;
+ }
+ /*
+ * erase->addr specifies the offset of the area to be
+ * erased *within the current subdevice*. It can be
+ * non-zero only the first time through this loop, i.e.
+ * for the first subdevice where blocks need to be erased.
+ * All the following erases must begin at the start of the
+ * current subdevice, i.e. at offset zero.
+ */
+ erase->addr = 0;
+ offset += subdev->size;
+ }
+ kfree(erase);
+
+ return err;
+}
+
+static int concat_xxlock(struct mtd_info *mtd, loff_t ofs, uint64_t len,
+ bool is_lock)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i, err = -EINVAL;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ uint64_t size;
+
+ if (ofs >= subdev->size) {
+ size = 0;
+ ofs -= subdev->size;
+ continue;
+ }
+ if (ofs + len > subdev->size)
+ size = subdev->size - ofs;
+ else
+ size = len;
+
+ if (is_lock)
+ err = mtd_lock(subdev, ofs, size);
+ else
+ err = mtd_unlock(subdev, ofs, size);
+ if (err)
+ break;
+
+ len -= size;
+ if (len == 0)
+ break;
+
+ err = -EINVAL;
+ ofs = 0;
+ }
+
+ return err;
+}
+
+static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ return concat_xxlock(mtd, ofs, len, true);
+}
+
+static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ return concat_xxlock(mtd, ofs, len, false);
+}
+
+static int concat_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i, err = -EINVAL;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+
+ if (ofs >= subdev->size) {
+ ofs -= subdev->size;
+ continue;
+ }
+
+ if (ofs + len > subdev->size)
+ break;
+
+ return mtd_is_locked(subdev, ofs, len);
+ }
+
+ return err;
+}
+
+static void concat_sync(struct mtd_info *mtd)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ mtd_sync(subdev);
+ }
+}
+
+static int concat_suspend(struct mtd_info *mtd)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i, rc = 0;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ if ((rc = mtd_suspend(subdev)) < 0)
+ return rc;
+ }
+ return rc;
+}
+
+static void concat_resume(struct mtd_info *mtd)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ mtd_resume(subdev);
+ }
+}
+
+static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i, res = 0;
+
+ if (!mtd_can_have_bb(concat->subdev[0]))
+ return res;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+
+ if (ofs >= subdev->size) {
+ ofs -= subdev->size;
+ continue;
+ }
+
+ res = mtd_block_isbad(subdev, ofs);
+ break;
+ }
+
+ return res;
+}
+
+static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i, err = -EINVAL;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+
+ if (ofs >= subdev->size) {
+ ofs -= subdev->size;
+ continue;
+ }
+
+ err = mtd_block_markbad(subdev, ofs);
+ if (!err)
+ mtd->ecc_stats.badblocks++;
+ break;
+ }
+
+ return err;
+}
+
+/*
+ * This function constructs a virtual MTD device by concatenating
+ * num_devs MTD devices. A pointer to the new device object is
+ * stored to *new_dev upon success. This function does _not_
+ * register any devices: this is the caller's responsibility.
+ */
+struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */
+ int num_devs, /* number of subdevices */
+ const char *name)
+{ /* name for the new device */
+ int i;
+ size_t size;
+ struct mtd_concat *concat;
+ struct mtd_info *subdev_master = NULL;
+ uint32_t max_erasesize, curr_erasesize;
+ int num_erase_region;
+ int max_writebufsize = 0;
+
+ printk(KERN_NOTICE "Concatenating MTD devices:\n");
+ for (i = 0; i < num_devs; i++)
+ printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
+ printk(KERN_NOTICE "into device \"%s\"\n", name);
+
+ /* allocate the device structure */
+ size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
+ concat = kzalloc(size, GFP_KERNEL);
+ if (!concat) {
+ printk
+ ("memory allocation error while creating concatenated device \"%s\"\n",
+ name);
+ return NULL;
+ }
+ concat->subdev = (struct mtd_info **) (concat + 1);
+
+ /*
+ * Set up the new "super" device's MTD object structure, check for
+ * incompatibilities between the subdevices.
+ */
+ concat->mtd.type = subdev[0]->type;
+ concat->mtd.flags = subdev[0]->flags;
+ concat->mtd.size = subdev[0]->size;
+ concat->mtd.erasesize = subdev[0]->erasesize;
+ concat->mtd.writesize = subdev[0]->writesize;
+
+ for (i = 0; i < num_devs; i++)
+ if (max_writebufsize < subdev[i]->writebufsize)
+ max_writebufsize = subdev[i]->writebufsize;
+ concat->mtd.writebufsize = max_writebufsize;
+
+ concat->mtd.subpage_sft = subdev[0]->subpage_sft;
+ concat->mtd.oobsize = subdev[0]->oobsize;
+ concat->mtd.oobavail = subdev[0]->oobavail;
+
+ subdev_master = mtd_get_master(subdev[0]);
+ if (subdev_master->_writev)
+ concat->mtd._writev = concat_writev;
+ if (subdev_master->_read_oob)
+ concat->mtd._read_oob = concat_read_oob;
+ if (subdev_master->_write_oob)
+ concat->mtd._write_oob = concat_write_oob;
+ if (subdev_master->_block_isbad)
+ concat->mtd._block_isbad = concat_block_isbad;
+ if (subdev_master->_block_markbad)
+ concat->mtd._block_markbad = concat_block_markbad;
+ if (subdev_master->_panic_write)
+ concat->mtd._panic_write = concat_panic_write;
+ if (subdev_master->_read)
+ concat->mtd._read = concat_read;
+ if (subdev_master->_write)
+ concat->mtd._write = concat_write;
+
+ concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
+
+ concat->subdev[0] = subdev[0];
+
+ for (i = 1; i < num_devs; i++) {
+ if (concat->mtd.type != subdev[i]->type) {
+ kfree(concat);
+ printk("Incompatible device type on \"%s\"\n",
+ subdev[i]->name);
+ return NULL;
+ }
+ if (concat->mtd.flags != subdev[i]->flags) {
+ /*
+ * Expect all flags except MTD_WRITEABLE to be
+ * equal on all subdevices.
+ */
+ if ((concat->mtd.flags ^ subdev[i]->
+ flags) & ~MTD_WRITEABLE) {
+ kfree(concat);
+ printk("Incompatible device flags on \"%s\"\n",
+ subdev[i]->name);
+ return NULL;
+ } else
+ /* if writeable attribute differs,
+ make super device writeable */
+ concat->mtd.flags |=
+ subdev[i]->flags & MTD_WRITEABLE;
+ }
+
+ subdev_master = mtd_get_master(subdev[i]);
+ concat->mtd.size += subdev[i]->size;
+ concat->mtd.ecc_stats.badblocks +=
+ subdev[i]->ecc_stats.badblocks;
+ if (concat->mtd.writesize != subdev[i]->writesize ||
+ concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
+ concat->mtd.oobsize != subdev[i]->oobsize ||
+ !concat->mtd._read_oob != !subdev_master->_read_oob ||
+ !concat->mtd._write_oob != !subdev_master->_write_oob) {
+ /*
+ * Check against subdev[i] for data members, because
+ * subdev's attributes may be different from master
+ * mtd device. Check against subdev's master mtd
+ * device for callbacks, because the existence of
+ * subdev's callbacks is decided by master mtd device.
+ */
+ kfree(concat);
+ printk("Incompatible OOB or ECC data on \"%s\"\n",
+ subdev[i]->name);
+ return NULL;
+ }
+ concat->subdev[i] = subdev[i];
+
+ }
+
+ mtd_set_ooblayout(&concat->mtd, subdev[0]->ooblayout);
+
+ concat->num_subdev = num_devs;
+ concat->mtd.name = name;
+
+ concat->mtd._erase = concat_erase;
+ concat->mtd._sync = concat_sync;
+ concat->mtd._lock = concat_lock;
+ concat->mtd._unlock = concat_unlock;
+ concat->mtd._is_locked = concat_is_locked;
+ concat->mtd._suspend = concat_suspend;
+ concat->mtd._resume = concat_resume;
+
+ /*
+ * Combine the erase block size info of the subdevices:
+ *
+ * first, walk the map of the new device and see how
+ * many changes in erase size we have
+ */
+ max_erasesize = curr_erasesize = subdev[0]->erasesize;
+ num_erase_region = 1;
+ for (i = 0; i < num_devs; i++) {
+ if (subdev[i]->numeraseregions == 0) {
+ /* current subdevice has uniform erase size */
+ if (subdev[i]->erasesize != curr_erasesize) {
+ /* if it differs from the last subdevice's erase size, count it */
+ ++num_erase_region;
+ curr_erasesize = subdev[i]->erasesize;
+ if (curr_erasesize > max_erasesize)
+ max_erasesize = curr_erasesize;
+ }
+ } else {
+ /* current subdevice has variable erase size */
+ int j;
+ for (j = 0; j < subdev[i]->numeraseregions; j++) {
+
+ /* walk the list of erase regions, count any changes */
+ if (subdev[i]->eraseregions[j].erasesize !=
+ curr_erasesize) {
+ ++num_erase_region;
+ curr_erasesize =
+ subdev[i]->eraseregions[j].
+ erasesize;
+ if (curr_erasesize > max_erasesize)
+ max_erasesize = curr_erasesize;
+ }
+ }
+ }
+ }
+
+ if (num_erase_region == 1) {
+ /*
+ * All subdevices have the same uniform erase size.
+ * This is easy:
+ */
+ concat->mtd.erasesize = curr_erasesize;
+ concat->mtd.numeraseregions = 0;
+ } else {
+ uint64_t tmp64;
+
+ /*
+ * erase block size varies across the subdevices: allocate
+ * space to store the data describing the variable erase regions
+ */
+ struct mtd_erase_region_info *erase_region_p;
+ uint64_t begin, position;
+
+ concat->mtd.erasesize = max_erasesize;
+ concat->mtd.numeraseregions = num_erase_region;
+ concat->mtd.eraseregions = erase_region_p =
+ kmalloc_array(num_erase_region,
+ sizeof(struct mtd_erase_region_info),
+ GFP_KERNEL);
+ if (!erase_region_p) {
+ kfree(concat);
+ printk
+ ("memory allocation error while creating erase region list"
+ " for device \"%s\"\n", name);
+ return NULL;
+ }
+
+ /*
+ * walk the map of the new device once more and fill in
+ * erase region info:
+ */
+ curr_erasesize = subdev[0]->erasesize;
+ begin = position = 0;
+ for (i = 0; i < num_devs; i++) {
+ if (subdev[i]->numeraseregions == 0) {
+ /* current subdevice has uniform erase size */
+ if (subdev[i]->erasesize != curr_erasesize) {
+ /*
+ * fill in an mtd_erase_region_info structure for the area
+ * we have walked so far:
+ */
+ erase_region_p->offset = begin;
+ erase_region_p->erasesize =
+ curr_erasesize;
+ tmp64 = position - begin;
+ do_div(tmp64, curr_erasesize);
+ erase_region_p->numblocks = tmp64;
+ begin = position;
+
+ curr_erasesize = subdev[i]->erasesize;
+ ++erase_region_p;
+ }
+ position += subdev[i]->size;
+ } else {
+ /* current subdevice has variable erase size */
+ int j;
+ for (j = 0; j < subdev[i]->numeraseregions; j++) {
+ /* walk the list of erase regions, count any changes */
+ if (subdev[i]->eraseregions[j].
+ erasesize != curr_erasesize) {
+ erase_region_p->offset = begin;
+ erase_region_p->erasesize =
+ curr_erasesize;
+ tmp64 = position - begin;
+ do_div(tmp64, curr_erasesize);
+ erase_region_p->numblocks = tmp64;
+ begin = position;
+
+ curr_erasesize =
+ subdev[i]->eraseregions[j].
+ erasesize;
+ ++erase_region_p;
+ }
+ position +=
+ subdev[i]->eraseregions[j].
+ numblocks * (uint64_t)curr_erasesize;
+ }
+ }
+ }
+ /* Now write the final entry */
+ erase_region_p->offset = begin;
+ erase_region_p->erasesize = curr_erasesize;
+ tmp64 = position - begin;
+ do_div(tmp64, curr_erasesize);
+ erase_region_p->numblocks = tmp64;
+ }
+
+ return &concat->mtd;
+}
+
+/* Cleans the context obtained from mtd_concat_create() */
+void mtd_concat_destroy(struct mtd_info *mtd)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ if (concat->mtd.numeraseregions)
+ kfree(concat->mtd.eraseregions);
+ kfree(concat);
+}
+
+EXPORT_SYMBOL(mtd_concat_create);
+EXPORT_SYMBOL(mtd_concat_destroy);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
+MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
new file mode 100644
index 0000000000..9bd661be3a
--- /dev/null
+++ b/drivers/mtd/mtdcore.c
@@ -0,0 +1,2560 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Core registration and callback routines for MTD
+ * drivers and users.
+ *
+ * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
+ * Copyright © 2006 Red Hat UK Limited
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/major.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/ioctl.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/proc_fs.h>
+#include <linux/idr.h>
+#include <linux/backing-dev.h>
+#include <linux/gfp.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/reboot.h>
+#include <linux/leds.h>
+#include <linux/debugfs.h>
+#include <linux/nvmem-provider.h>
+#include <linux/root_dev.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#include "mtdcore.h"
+
+struct backing_dev_info *mtd_bdi;
+
+#ifdef CONFIG_PM_SLEEP
+
+static int mtd_cls_suspend(struct device *dev)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return mtd ? mtd_suspend(mtd) : 0;
+}
+
+static int mtd_cls_resume(struct device *dev)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ if (mtd)
+ mtd_resume(mtd);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume);
+#define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
+#else
+#define MTD_CLS_PM_OPS NULL
+#endif
+
+static struct class mtd_class = {
+ .name = "mtd",
+ .pm = MTD_CLS_PM_OPS,
+};
+
+static DEFINE_IDR(mtd_idr);
+
+/* These are exported solely for the purpose of mtd_blkdevs.c. You
+ should not use them for _anything_ else */
+DEFINE_MUTEX(mtd_table_mutex);
+EXPORT_SYMBOL_GPL(mtd_table_mutex);
+
+struct mtd_info *__mtd_next_device(int i)
+{
+ return idr_get_next(&mtd_idr, &i);
+}
+EXPORT_SYMBOL_GPL(__mtd_next_device);
+
+static LIST_HEAD(mtd_notifiers);
+
+
+#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
+
+/* REVISIT once MTD uses the driver model better, whoever allocates
+ * the mtd_info will probably want to use the release() hook...
+ */
+static void mtd_release(struct device *dev)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ dev_t index = MTD_DEVT(mtd->index);
+
+ idr_remove(&mtd_idr, mtd->index);
+ of_node_put(mtd_get_of_node(mtd));
+
+ if (mtd_is_partition(mtd))
+ release_mtd_partition(mtd);
+
+ /* remove /dev/mtdXro node */
+ device_destroy(&mtd_class, index + 1);
+}
+
+static void mtd_device_release(struct kref *kref)
+{
+ struct mtd_info *mtd = container_of(kref, struct mtd_info, refcnt);
+ bool is_partition = mtd_is_partition(mtd);
+
+ debugfs_remove_recursive(mtd->dbg.dfs_dir);
+
+ /* Try to remove the NVMEM provider */
+ nvmem_unregister(mtd->nvmem);
+
+ device_unregister(&mtd->dev);
+
+ /*
+ * Clear dev so mtd can be safely re-registered later if desired.
+ * Should not be done for partition,
+ * as it was already destroyed in device_unregister().
+ */
+ if (!is_partition)
+ memset(&mtd->dev, 0, sizeof(mtd->dev));
+
+ module_put(THIS_MODULE);
+}
+
+#define MTD_DEVICE_ATTR_RO(name) \
+static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL)
+
+#define MTD_DEVICE_ATTR_RW(name) \
+static DEVICE_ATTR(name, 0644, mtd_##name##_show, mtd_##name##_store)
+
+static ssize_t mtd_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ char *type;
+
+ switch (mtd->type) {
+ case MTD_ABSENT:
+ type = "absent";
+ break;
+ case MTD_RAM:
+ type = "ram";
+ break;
+ case MTD_ROM:
+ type = "rom";
+ break;
+ case MTD_NORFLASH:
+ type = "nor";
+ break;
+ case MTD_NANDFLASH:
+ type = "nand";
+ break;
+ case MTD_DATAFLASH:
+ type = "dataflash";
+ break;
+ case MTD_UBIVOLUME:
+ type = "ubi";
+ break;
+ case MTD_MLCNANDFLASH:
+ type = "mlc-nand";
+ break;
+ default:
+ type = "unknown";
+ }
+
+ return sysfs_emit(buf, "%s\n", type);
+}
+MTD_DEVICE_ATTR_RO(type);
+
+static ssize_t mtd_flags_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "0x%lx\n", (unsigned long)mtd->flags);
+}
+MTD_DEVICE_ATTR_RO(flags);
+
+static ssize_t mtd_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%llu\n", (unsigned long long)mtd->size);
+}
+MTD_DEVICE_ATTR_RO(size);
+
+static ssize_t mtd_erasesize_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->erasesize);
+}
+MTD_DEVICE_ATTR_RO(erasesize);
+
+static ssize_t mtd_writesize_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->writesize);
+}
+MTD_DEVICE_ATTR_RO(writesize);
+
+static ssize_t mtd_subpagesize_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
+
+ return sysfs_emit(buf, "%u\n", subpagesize);
+}
+MTD_DEVICE_ATTR_RO(subpagesize);
+
+static ssize_t mtd_oobsize_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->oobsize);
+}
+MTD_DEVICE_ATTR_RO(oobsize);
+
+static ssize_t mtd_oobavail_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%u\n", mtd->oobavail);
+}
+MTD_DEVICE_ATTR_RO(oobavail);
+
+static ssize_t mtd_numeraseregions_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%u\n", mtd->numeraseregions);
+}
+MTD_DEVICE_ATTR_RO(numeraseregions);
+
+static ssize_t mtd_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", mtd->name);
+}
+MTD_DEVICE_ATTR_RO(name);
+
+static ssize_t mtd_ecc_strength_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%u\n", mtd->ecc_strength);
+}
+MTD_DEVICE_ATTR_RO(ecc_strength);
+
+static ssize_t mtd_bitflip_threshold_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%u\n", mtd->bitflip_threshold);
+}
+
+static ssize_t mtd_bitflip_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ unsigned int bitflip_threshold;
+ int retval;
+
+ retval = kstrtouint(buf, 0, &bitflip_threshold);
+ if (retval)
+ return retval;
+
+ mtd->bitflip_threshold = bitflip_threshold;
+ return count;
+}
+MTD_DEVICE_ATTR_RW(bitflip_threshold);
+
+static ssize_t mtd_ecc_step_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%u\n", mtd->ecc_step_size);
+
+}
+MTD_DEVICE_ATTR_RO(ecc_step_size);
+
+static ssize_t mtd_corrected_bits_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
+
+ return sysfs_emit(buf, "%u\n", ecc_stats->corrected);
+}
+MTD_DEVICE_ATTR_RO(corrected_bits); /* ecc stats corrected */
+
+static ssize_t mtd_ecc_failures_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
+
+ return sysfs_emit(buf, "%u\n", ecc_stats->failed);
+}
+MTD_DEVICE_ATTR_RO(ecc_failures); /* ecc stats errors */
+
+static ssize_t mtd_bad_blocks_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
+
+ return sysfs_emit(buf, "%u\n", ecc_stats->badblocks);
+}
+MTD_DEVICE_ATTR_RO(bad_blocks);
+
+static ssize_t mtd_bbt_blocks_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
+
+ return sysfs_emit(buf, "%u\n", ecc_stats->bbtblocks);
+}
+MTD_DEVICE_ATTR_RO(bbt_blocks);
+
+static struct attribute *mtd_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_flags.attr,
+ &dev_attr_size.attr,
+ &dev_attr_erasesize.attr,
+ &dev_attr_writesize.attr,
+ &dev_attr_subpagesize.attr,
+ &dev_attr_oobsize.attr,
+ &dev_attr_oobavail.attr,
+ &dev_attr_numeraseregions.attr,
+ &dev_attr_name.attr,
+ &dev_attr_ecc_strength.attr,
+ &dev_attr_ecc_step_size.attr,
+ &dev_attr_corrected_bits.attr,
+ &dev_attr_ecc_failures.attr,
+ &dev_attr_bad_blocks.attr,
+ &dev_attr_bbt_blocks.attr,
+ &dev_attr_bitflip_threshold.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(mtd);
+
+static const struct device_type mtd_devtype = {
+ .name = "mtd",
+ .groups = mtd_groups,
+ .release = mtd_release,
+};
+
+static bool mtd_expert_analysis_mode;
+
+#ifdef CONFIG_DEBUG_FS
+bool mtd_check_expert_analysis_mode(void)
+{
+ const char *mtd_expert_analysis_warning =
+ "Bad block checks have been entirely disabled.\n"
+ "This is only reserved for post-mortem forensics and debug purposes.\n"
+ "Never enable this mode if you do not know what you are doing!\n";
+
+ return WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning);
+}
+EXPORT_SYMBOL_GPL(mtd_check_expert_analysis_mode);
+#endif
+
+static struct dentry *dfs_dir_mtd;
+
+static void mtd_debugfs_populate(struct mtd_info *mtd)
+{
+ struct device *dev = &mtd->dev;
+
+ if (IS_ERR_OR_NULL(dfs_dir_mtd))
+ return;
+
+ mtd->dbg.dfs_dir = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
+}
+
+#ifndef CONFIG_MMU
+unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
+{
+ switch (mtd->type) {
+ case MTD_RAM:
+ return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
+ NOMMU_MAP_READ | NOMMU_MAP_WRITE;
+ case MTD_ROM:
+ return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
+ NOMMU_MAP_READ;
+ default:
+ return NOMMU_MAP_COPY;
+ }
+}
+EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
+#endif
+
+static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
+ void *cmd)
+{
+ struct mtd_info *mtd;
+
+ mtd = container_of(n, struct mtd_info, reboot_notifier);
+ mtd->_reboot(mtd);
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * mtd_wunit_to_pairing_info - get pairing information of a wunit
+ * @mtd: pointer to new MTD device info structure
+ * @wunit: write unit we are interested in
+ * @info: returned pairing information
+ *
+ * Retrieve pairing information associated to the wunit.
+ * This is mainly useful when dealing with MLC/TLC NANDs where pages can be
+ * paired together, and where programming a page may influence the page it is
+ * paired with.
+ * The notion of page is replaced by the term wunit (write-unit) to stay
+ * consistent with the ->writesize field.
+ *
+ * The @wunit argument can be extracted from an absolute offset using
+ * mtd_offset_to_wunit(). @info is filled with the pairing information attached
+ * to @wunit.
+ *
+ * From the pairing info the MTD user can find all the wunits paired with
+ * @wunit using the following loop:
+ *
+ * for (i = 0; i < mtd_pairing_groups(mtd); i++) {
+ * info.pair = i;
+ * mtd_pairing_info_to_wunit(mtd, &info);
+ * ...
+ * }
+ */
+int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
+ struct mtd_pairing_info *info)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master);
+
+ if (wunit < 0 || wunit >= npairs)
+ return -EINVAL;
+
+ if (master->pairing && master->pairing->get_info)
+ return master->pairing->get_info(master, wunit, info);
+
+ info->group = 0;
+ info->pair = wunit;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
+
+/**
+ * mtd_pairing_info_to_wunit - get wunit from pairing information
+ * @mtd: pointer to new MTD device info structure
+ * @info: pairing information struct
+ *
+ * Returns a positive number representing the wunit associated to the info
+ * struct, or a negative error code.
+ *
+ * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to
+ * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info()
+ * doc).
+ *
+ * It can also be used to only program the first page of each pair (i.e.
+ * page attached to group 0), which allows one to use an MLC NAND in
+ * software-emulated SLC mode:
+ *
+ * info.group = 0;
+ * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
+ * for (info.pair = 0; info.pair < npairs; info.pair++) {
+ * wunit = mtd_pairing_info_to_wunit(mtd, &info);
+ * mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit),
+ * mtd->writesize, &retlen, buf + (i * mtd->writesize));
+ * }
+ */
+int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
+ const struct mtd_pairing_info *info)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ngroups = mtd_pairing_groups(master);
+ int npairs = mtd_wunit_per_eb(master) / ngroups;
+
+ if (!info || info->pair < 0 || info->pair >= npairs ||
+ info->group < 0 || info->group >= ngroups)
+ return -EINVAL;
+
+ if (master->pairing && master->pairing->get_wunit)
+ return mtd->pairing->get_wunit(master, info);
+
+ return info->pair;
+}
+EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
+
+/**
+ * mtd_pairing_groups - get the number of pairing groups
+ * @mtd: pointer to new MTD device info structure
+ *
+ * Returns the number of pairing groups.
+ *
+ * This number is usually equal to the number of bits exposed by a single
+ * cell, and can be used in conjunction with mtd_pairing_info_to_wunit()
+ * to iterate over all pages of a given pair.
+ */
+int mtd_pairing_groups(struct mtd_info *mtd)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->pairing || !master->pairing->ngroups)
+ return 1;
+
+ return master->pairing->ngroups;
+}
+EXPORT_SYMBOL_GPL(mtd_pairing_groups);
+
+static int mtd_nvmem_reg_read(void *priv, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct mtd_info *mtd = priv;
+ size_t retlen;
+ int err;
+
+ err = mtd_read(mtd, offset, bytes, &retlen, val);
+ if (err && err != -EUCLEAN)
+ return err;
+
+ return retlen == bytes ? 0 : -EIO;
+}
+
+static int mtd_nvmem_add(struct mtd_info *mtd)
+{
+ struct device_node *node = mtd_get_of_node(mtd);
+ struct nvmem_config config = {};
+
+ config.id = NVMEM_DEVID_NONE;
+ config.dev = &mtd->dev;
+ config.name = dev_name(&mtd->dev);
+ config.owner = THIS_MODULE;
+ config.reg_read = mtd_nvmem_reg_read;
+ config.size = mtd->size;
+ config.word_size = 1;
+ config.stride = 1;
+ config.read_only = true;
+ config.root_only = true;
+ config.ignore_wp = true;
+ config.no_of_node = !of_device_is_compatible(node, "nvmem-cells");
+ config.priv = mtd;
+
+ mtd->nvmem = nvmem_register(&config);
+ if (IS_ERR(mtd->nvmem)) {
+ /* Just ignore if there is no NVMEM support in the kernel */
+ if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP)
+ mtd->nvmem = NULL;
+ else
+ return dev_err_probe(&mtd->dev, PTR_ERR(mtd->nvmem),
+ "Failed to register NVMEM device\n");
+ }
+
+ return 0;
+}
+
+static void mtd_check_of_node(struct mtd_info *mtd)
+{
+ struct device_node *partitions, *parent_dn, *mtd_dn = NULL;
+ const char *pname, *prefix = "partition-";
+ int plen, mtd_name_len, offset, prefix_len;
+
+ /* Check if MTD already has a device node */
+ if (mtd_get_of_node(mtd))
+ return;
+
+ if (!mtd_is_partition(mtd))
+ return;
+
+ parent_dn = of_node_get(mtd_get_of_node(mtd->parent));
+ if (!parent_dn)
+ return;
+
+ if (mtd_is_partition(mtd->parent))
+ partitions = of_node_get(parent_dn);
+ else
+ partitions = of_get_child_by_name(parent_dn, "partitions");
+ if (!partitions)
+ goto exit_parent;
+
+ prefix_len = strlen(prefix);
+ mtd_name_len = strlen(mtd->name);
+
+ /* Search if a partition is defined with the same name */
+ for_each_child_of_node(partitions, mtd_dn) {
+ /* Skip partition with no/wrong prefix */
+ if (!of_node_name_prefix(mtd_dn, prefix))
+ continue;
+
+ /* Label have priority. Check that first */
+ if (!of_property_read_string(mtd_dn, "label", &pname)) {
+ offset = 0;
+ } else {
+ pname = mtd_dn->name;
+ offset = prefix_len;
+ }
+
+ plen = strlen(pname) - offset;
+ if (plen == mtd_name_len &&
+ !strncmp(mtd->name, pname + offset, plen)) {
+ mtd_set_of_node(mtd, mtd_dn);
+ break;
+ }
+ }
+
+ of_node_put(partitions);
+exit_parent:
+ of_node_put(parent_dn);
+}
+
+/**
+ * add_mtd_device - register an MTD device
+ * @mtd: pointer to new MTD device info structure
+ *
+ * Add a device to the list of MTD devices present in the system, and
+ * notify each currently active MTD 'user' of its arrival. Returns
+ * zero on success or non-zero on failure.
+ */
+
+int add_mtd_device(struct mtd_info *mtd)
+{
+ struct device_node *np = mtd_get_of_node(mtd);
+ struct mtd_info *master = mtd_get_master(mtd);
+ struct mtd_notifier *not;
+ int i, error, ofidx;
+
+ /*
+ * May occur, for instance, on buggy drivers which call
+ * mtd_device_parse_register() multiple times on the same master MTD,
+ * especially with CONFIG_MTD_PARTITIONED_MASTER=y.
+ */
+ if (WARN_ONCE(mtd->dev.type, "MTD already registered\n"))
+ return -EEXIST;
+
+ BUG_ON(mtd->writesize == 0);
+
+ /*
+ * MTD drivers should implement ->_{write,read}() or
+ * ->_{write,read}_oob(), but not both.
+ */
+ if (WARN_ON((mtd->_write && mtd->_write_oob) ||
+ (mtd->_read && mtd->_read_oob)))
+ return -EINVAL;
+
+ if (WARN_ON((!mtd->erasesize || !master->_erase) &&
+ !(mtd->flags & MTD_NO_ERASE)))
+ return -EINVAL;
+
+ /*
+ * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the
+ * master is an MLC NAND and has a proper pairing scheme defined.
+ * We also reject masters that implement ->_writev() for now, because
+ * NAND controller drivers don't implement this hook, and adding the
+ * SLC -> MLC address/length conversion to this path is useless if we
+ * don't have a user.
+ */
+ if (mtd->flags & MTD_SLC_ON_MLC_EMULATION &&
+ (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH ||
+ !master->pairing || master->_writev))
+ return -EINVAL;
+
+ mutex_lock(&mtd_table_mutex);
+
+ ofidx = -1;
+ if (np)
+ ofidx = of_alias_get_id(np, "mtd");
+ if (ofidx >= 0)
+ i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL);
+ else
+ i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
+ if (i < 0) {
+ error = i;
+ goto fail_locked;
+ }
+
+ mtd->index = i;
+ kref_init(&mtd->refcnt);
+
+ /* default value if not set by driver */
+ if (mtd->bitflip_threshold == 0)
+ mtd->bitflip_threshold = mtd->ecc_strength;
+
+ if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
+ int ngroups = mtd_pairing_groups(master);
+
+ mtd->erasesize /= ngroups;
+ mtd->size = (u64)mtd_div_by_eb(mtd->size, master) *
+ mtd->erasesize;
+ }
+
+ if (is_power_of_2(mtd->erasesize))
+ mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
+ else
+ mtd->erasesize_shift = 0;
+
+ if (is_power_of_2(mtd->writesize))
+ mtd->writesize_shift = ffs(mtd->writesize) - 1;
+ else
+ mtd->writesize_shift = 0;
+
+ mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
+ mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
+
+ /* Some chips always power up locked. Unlock them now */
+ if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
+ error = mtd_unlock(mtd, 0, mtd->size);
+ if (error && error != -EOPNOTSUPP)
+ printk(KERN_WARNING
+ "%s: unlock failed, writes may not work\n",
+ mtd->name);
+ /* Ignore unlock failures? */
+ error = 0;
+ }
+
+ /* Caller should have set dev.parent to match the
+ * physical device, if appropriate.
+ */
+ mtd->dev.type = &mtd_devtype;
+ mtd->dev.class = &mtd_class;
+ mtd->dev.devt = MTD_DEVT(i);
+ dev_set_name(&mtd->dev, "mtd%d", i);
+ dev_set_drvdata(&mtd->dev, mtd);
+ mtd_check_of_node(mtd);
+ of_node_get(mtd_get_of_node(mtd));
+ error = device_register(&mtd->dev);
+ if (error) {
+ put_device(&mtd->dev);
+ goto fail_added;
+ }
+
+ /* Add the nvmem provider */
+ error = mtd_nvmem_add(mtd);
+ if (error)
+ goto fail_nvmem_add;
+
+ mtd_debugfs_populate(mtd);
+
+ device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
+ "mtd%dro", i);
+
+ pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
+ /* No need to get a refcount on the module containing
+ the notifier, since we hold the mtd_table_mutex */
+ list_for_each_entry(not, &mtd_notifiers, list)
+ not->add(mtd);
+
+ mutex_unlock(&mtd_table_mutex);
+
+ if (of_property_read_bool(mtd_get_of_node(mtd), "linux,rootfs")) {
+ if (IS_BUILTIN(CONFIG_MTD)) {
+ pr_info("mtd: setting mtd%d (%s) as root device\n", mtd->index, mtd->name);
+ ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
+ } else {
+ pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n",
+ mtd->index, mtd->name);
+ }
+ }
+
+ /* We _know_ we aren't being removed, because
+ our caller is still holding us here. So none
+ of this try_ nonsense, and no bitching about it
+ either. :) */
+ __module_get(THIS_MODULE);
+ return 0;
+
+fail_nvmem_add:
+ device_unregister(&mtd->dev);
+fail_added:
+ of_node_put(mtd_get_of_node(mtd));
+ idr_remove(&mtd_idr, i);
+fail_locked:
+ mutex_unlock(&mtd_table_mutex);
+ return error;
+}
+
+/**
+ * del_mtd_device - unregister an MTD device
+ * @mtd: pointer to MTD device info structure
+ *
+ * Remove a device from the list of MTD devices present in the system,
+ * and notify each currently active MTD 'user' of its departure.
+ * Returns zero on success or 1 on failure, which currently will happen
+ * if the requested device does not appear to be present in the list.
+ */
+
+int del_mtd_device(struct mtd_info *mtd)
+{
+ int ret;
+ struct mtd_notifier *not;
+
+ mutex_lock(&mtd_table_mutex);
+
+ if (idr_find(&mtd_idr, mtd->index) != mtd) {
+ ret = -ENODEV;
+ goto out_error;
+ }
+
+ /* No need to get a refcount on the module containing
+ the notifier, since we hold the mtd_table_mutex */
+ list_for_each_entry(not, &mtd_notifiers, list)
+ not->remove(mtd);
+
+ kref_put(&mtd->refcnt, mtd_device_release);
+ ret = 0;
+
+out_error:
+ mutex_unlock(&mtd_table_mutex);
+ return ret;
+}
+
+/*
+ * Set a few defaults based on the parent devices, if not provided by the
+ * driver
+ */
+static void mtd_set_dev_defaults(struct mtd_info *mtd)
+{
+ if (mtd->dev.parent) {
+ if (!mtd->owner && mtd->dev.parent->driver)
+ mtd->owner = mtd->dev.parent->driver->owner;
+ if (!mtd->name)
+ mtd->name = dev_name(mtd->dev.parent);
+ } else {
+ pr_debug("mtd device won't show a device symlink in sysfs\n");
+ }
+
+ INIT_LIST_HEAD(&mtd->partitions);
+ mutex_init(&mtd->master.partitions_lock);
+ mutex_init(&mtd->master.chrdev_lock);
+}
+
+static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user)
+{
+ struct otp_info *info;
+ ssize_t size = 0;
+ unsigned int i;
+ size_t retlen;
+ int ret;
+
+ info = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ if (is_user)
+ ret = mtd_get_user_prot_info(mtd, PAGE_SIZE, &retlen, info);
+ else
+ ret = mtd_get_fact_prot_info(mtd, PAGE_SIZE, &retlen, info);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < retlen / sizeof(*info); i++)
+ size += info[i].length;
+
+ kfree(info);
+ return size;
+
+err:
+ kfree(info);
+
+ /* ENODATA means there is no OTP region. */
+ return ret == -ENODATA ? 0 : ret;
+}
+
+static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
+ const char *compatible,
+ int size,
+ nvmem_reg_read_t reg_read)
+{
+ struct nvmem_device *nvmem = NULL;
+ struct nvmem_config config = {};
+ struct device_node *np;
+
+ /* DT binding is optional */
+ np = of_get_compatible_child(mtd->dev.of_node, compatible);
+
+ /* OTP nvmem will be registered on the physical device */
+ config.dev = mtd->dev.parent;
+ config.name = compatible;
+ config.id = NVMEM_DEVID_AUTO;
+ config.owner = THIS_MODULE;
+ config.type = NVMEM_TYPE_OTP;
+ config.root_only = true;
+ config.ignore_wp = true;
+ config.reg_read = reg_read;
+ config.size = size;
+ config.of_node = np;
+ config.priv = mtd;
+
+ nvmem = nvmem_register(&config);
+ /* Just ignore if there is no NVMEM support in the kernel */
+ if (IS_ERR(nvmem) && PTR_ERR(nvmem) == -EOPNOTSUPP)
+ nvmem = NULL;
+
+ of_node_put(np);
+
+ return nvmem;
+}
+
+static int mtd_nvmem_user_otp_reg_read(void *priv, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct mtd_info *mtd = priv;
+ size_t retlen;
+ int ret;
+
+ ret = mtd_read_user_prot_reg(mtd, offset, bytes, &retlen, val);
+ if (ret)
+ return ret;
+
+ return retlen == bytes ? 0 : -EIO;
+}
+
+static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct mtd_info *mtd = priv;
+ size_t retlen;
+ int ret;
+
+ ret = mtd_read_fact_prot_reg(mtd, offset, bytes, &retlen, val);
+ if (ret)
+ return ret;
+
+ return retlen == bytes ? 0 : -EIO;
+}
+
+static int mtd_otp_nvmem_add(struct mtd_info *mtd)
+{
+ struct device *dev = mtd->dev.parent;
+ struct nvmem_device *nvmem;
+ ssize_t size;
+ int err;
+
+ if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) {
+ size = mtd_otp_size(mtd, true);
+ if (size < 0)
+ return size;
+
+ if (size > 0) {
+ nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
+ mtd_nvmem_user_otp_reg_read);
+ if (IS_ERR(nvmem)) {
+ err = PTR_ERR(nvmem);
+ goto err;
+ }
+ mtd->otp_user_nvmem = nvmem;
+ }
+ }
+
+ if (mtd->_get_fact_prot_info && mtd->_read_fact_prot_reg) {
+ size = mtd_otp_size(mtd, false);
+ if (size < 0) {
+ err = size;
+ goto err;
+ }
+
+ if (size > 0) {
+ /*
+ * The factory OTP contains thing such as a unique serial
+ * number and is small, so let's read it out and put it
+ * into the entropy pool.
+ */
+ void *otp;
+
+ otp = kmalloc(size, GFP_KERNEL);
+ if (!otp) {
+ err = -ENOMEM;
+ goto err;
+ }
+ err = mtd_nvmem_fact_otp_reg_read(mtd, 0, otp, size);
+ if (err < 0) {
+ kfree(otp);
+ goto err;
+ }
+ add_device_randomness(otp, err);
+ kfree(otp);
+
+ nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size,
+ mtd_nvmem_fact_otp_reg_read);
+ if (IS_ERR(nvmem)) {
+ err = PTR_ERR(nvmem);
+ goto err;
+ }
+ mtd->otp_factory_nvmem = nvmem;
+ }
+ }
+
+ return 0;
+
+err:
+ nvmem_unregister(mtd->otp_user_nvmem);
+ return dev_err_probe(dev, err, "Failed to register OTP NVMEM device\n");
+}
+
+/**
+ * mtd_device_parse_register - parse partitions and register an MTD device.
+ *
+ * @mtd: the MTD device to register
+ * @types: the list of MTD partition probes to try, see
+ * 'parse_mtd_partitions()' for more information
+ * @parser_data: MTD partition parser-specific data
+ * @parts: fallback partition information to register, if parsing fails;
+ * only valid if %nr_parts > %0
+ * @nr_parts: the number of partitions in parts, if zero then the full
+ * MTD device is registered if no partition info is found
+ *
+ * This function aggregates MTD partitions parsing (done by
+ * 'parse_mtd_partitions()') and MTD device and partitions registering. It
+ * basically follows the most common pattern found in many MTD drivers:
+ *
+ * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is
+ * registered first.
+ * * Then It tries to probe partitions on MTD device @mtd using parsers
+ * specified in @types (if @types is %NULL, then the default list of parsers
+ * is used, see 'parse_mtd_partitions()' for more information). If none are
+ * found this functions tries to fallback to information specified in
+ * @parts/@nr_parts.
+ * * If no partitions were found this function just registers the MTD device
+ * @mtd and exits.
+ *
+ * Returns zero in case of success and a negative error code in case of failure.
+ */
+int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
+ struct mtd_part_parser_data *parser_data,
+ const struct mtd_partition *parts,
+ int nr_parts)
+{
+ int ret;
+
+ mtd_set_dev_defaults(mtd);
+
+ ret = mtd_otp_nvmem_add(mtd);
+ if (ret)
+ goto out;
+
+ if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
+ ret = add_mtd_device(mtd);
+ if (ret)
+ goto out;
+ }
+
+ /* Prefer parsed partitions over driver-provided fallback */
+ ret = parse_mtd_partitions(mtd, types, parser_data);
+ if (ret == -EPROBE_DEFER)
+ goto out;
+
+ if (ret > 0)
+ ret = 0;
+ else if (nr_parts)
+ ret = add_mtd_partitions(mtd, parts, nr_parts);
+ else if (!device_is_registered(&mtd->dev))
+ ret = add_mtd_device(mtd);
+ else
+ ret = 0;
+
+ if (ret)
+ goto out;
+
+ /*
+ * FIXME: some drivers unfortunately call this function more than once.
+ * So we have to check if we've already assigned the reboot notifier.
+ *
+ * Generally, we can make multiple calls work for most cases, but it
+ * does cause problems with parse_mtd_partitions() above (e.g.,
+ * cmdlineparts will register partitions more than once).
+ */
+ WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call,
+ "MTD already registered\n");
+ if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
+ mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
+ register_reboot_notifier(&mtd->reboot_notifier);
+ }
+
+out:
+ if (ret) {
+ nvmem_unregister(mtd->otp_user_nvmem);
+ nvmem_unregister(mtd->otp_factory_nvmem);
+ }
+
+ if (ret && device_is_registered(&mtd->dev))
+ del_mtd_device(mtd);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mtd_device_parse_register);
+
+/**
+ * mtd_device_unregister - unregister an existing MTD device.
+ *
+ * @master: the MTD device to unregister. This will unregister both the master
+ * and any partitions if registered.
+ */
+int mtd_device_unregister(struct mtd_info *master)
+{
+ int err;
+
+ if (master->_reboot) {
+ unregister_reboot_notifier(&master->reboot_notifier);
+ memset(&master->reboot_notifier, 0, sizeof(master->reboot_notifier));
+ }
+
+ nvmem_unregister(master->otp_user_nvmem);
+ nvmem_unregister(master->otp_factory_nvmem);
+
+ err = del_mtd_partitions(master);
+ if (err)
+ return err;
+
+ if (!device_is_registered(&master->dev))
+ return 0;
+
+ return del_mtd_device(master);
+}
+EXPORT_SYMBOL_GPL(mtd_device_unregister);
+
+/**
+ * register_mtd_user - register a 'user' of MTD devices.
+ * @new: pointer to notifier info structure
+ *
+ * Registers a pair of callbacks function to be called upon addition
+ * or removal of MTD devices. Causes the 'add' callback to be immediately
+ * invoked for each MTD device currently present in the system.
+ */
+void register_mtd_user (struct mtd_notifier *new)
+{
+ struct mtd_info *mtd;
+
+ mutex_lock(&mtd_table_mutex);
+
+ list_add(&new->list, &mtd_notifiers);
+
+ __module_get(THIS_MODULE);
+
+ mtd_for_each_device(mtd)
+ new->add(mtd);
+
+ mutex_unlock(&mtd_table_mutex);
+}
+EXPORT_SYMBOL_GPL(register_mtd_user);
+
+/**
+ * unregister_mtd_user - unregister a 'user' of MTD devices.
+ * @old: pointer to notifier info structure
+ *
+ * Removes a callback function pair from the list of 'users' to be
+ * notified upon addition or removal of MTD devices. Causes the
+ * 'remove' callback to be immediately invoked for each MTD device
+ * currently present in the system.
+ */
+int unregister_mtd_user (struct mtd_notifier *old)
+{
+ struct mtd_info *mtd;
+
+ mutex_lock(&mtd_table_mutex);
+
+ module_put(THIS_MODULE);
+
+ mtd_for_each_device(mtd)
+ old->remove(mtd);
+
+ list_del(&old->list);
+ mutex_unlock(&mtd_table_mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(unregister_mtd_user);
+
+/**
+ * get_mtd_device - obtain a validated handle for an MTD device
+ * @mtd: last known address of the required MTD device
+ * @num: internal device number of the required MTD device
+ *
+ * Given a number and NULL address, return the num'th entry in the device
+ * table, if any. Given an address and num == -1, search the device table
+ * for a device with that address and return if it's still present. Given
+ * both, return the num'th driver only if its address matches. Return
+ * error code if not.
+ */
+struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
+{
+ struct mtd_info *ret = NULL, *other;
+ int err = -ENODEV;
+
+ mutex_lock(&mtd_table_mutex);
+
+ if (num == -1) {
+ mtd_for_each_device(other) {
+ if (other == mtd) {
+ ret = mtd;
+ break;
+ }
+ }
+ } else if (num >= 0) {
+ ret = idr_find(&mtd_idr, num);
+ if (mtd && mtd != ret)
+ ret = NULL;
+ }
+
+ if (!ret) {
+ ret = ERR_PTR(err);
+ goto out;
+ }
+
+ err = __get_mtd_device(ret);
+ if (err)
+ ret = ERR_PTR(err);
+out:
+ mutex_unlock(&mtd_table_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(get_mtd_device);
+
+
+int __get_mtd_device(struct mtd_info *mtd)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ int err;
+
+ if (master->_get_device) {
+ err = master->_get_device(mtd);
+ if (err)
+ return err;
+ }
+
+ if (!try_module_get(master->owner)) {
+ if (master->_put_device)
+ master->_put_device(master);
+ return -ENODEV;
+ }
+
+ while (mtd) {
+ if (mtd != master)
+ kref_get(&mtd->refcnt);
+ mtd = mtd->parent;
+ }
+
+ if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+ kref_get(&master->refcnt);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__get_mtd_device);
+
+/**
+ * of_get_mtd_device_by_node - obtain an MTD device associated with a given node
+ *
+ * @np: device tree node
+ */
+struct mtd_info *of_get_mtd_device_by_node(struct device_node *np)
+{
+ struct mtd_info *mtd = NULL;
+ struct mtd_info *tmp;
+ int err;
+
+ mutex_lock(&mtd_table_mutex);
+
+ err = -EPROBE_DEFER;
+ mtd_for_each_device(tmp) {
+ if (mtd_get_of_node(tmp) == np) {
+ mtd = tmp;
+ err = __get_mtd_device(mtd);
+ break;
+ }
+ }
+
+ mutex_unlock(&mtd_table_mutex);
+
+ return err ? ERR_PTR(err) : mtd;
+}
+EXPORT_SYMBOL_GPL(of_get_mtd_device_by_node);
+
+/**
+ * get_mtd_device_nm - obtain a validated handle for an MTD device by
+ * device name
+ * @name: MTD device name to open
+ *
+ * This function returns MTD device description structure in case of
+ * success and an error code in case of failure.
+ */
+struct mtd_info *get_mtd_device_nm(const char *name)
+{
+ int err = -ENODEV;
+ struct mtd_info *mtd = NULL, *other;
+
+ mutex_lock(&mtd_table_mutex);
+
+ mtd_for_each_device(other) {
+ if (!strcmp(name, other->name)) {
+ mtd = other;
+ break;
+ }
+ }
+
+ if (!mtd)
+ goto out_unlock;
+
+ err = __get_mtd_device(mtd);
+ if (err)
+ goto out_unlock;
+
+ mutex_unlock(&mtd_table_mutex);
+ return mtd;
+
+out_unlock:
+ mutex_unlock(&mtd_table_mutex);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(get_mtd_device_nm);
+
+void put_mtd_device(struct mtd_info *mtd)
+{
+ mutex_lock(&mtd_table_mutex);
+ __put_mtd_device(mtd);
+ mutex_unlock(&mtd_table_mutex);
+
+}
+EXPORT_SYMBOL_GPL(put_mtd_device);
+
+void __put_mtd_device(struct mtd_info *mtd)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ while (mtd) {
+ /* kref_put() can relese mtd, so keep a reference mtd->parent */
+ struct mtd_info *parent = mtd->parent;
+
+ if (mtd != master)
+ kref_put(&mtd->refcnt, mtd_device_release);
+ mtd = parent;
+ }
+
+ if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+ kref_put(&master->refcnt, mtd_device_release);
+
+ module_put(master->owner);
+
+ /* must be the last as master can be freed in the _put_device */
+ if (master->_put_device)
+ master->_put_device(master);
+}
+EXPORT_SYMBOL_GPL(__put_mtd_device);
+
+/*
+ * Erase is an synchronous operation. Device drivers are epected to return a
+ * negative error code if the operation failed and update instr->fail_addr
+ * to point the portion that was not properly erased.
+ */
+int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ u64 mst_ofs = mtd_get_master_ofs(mtd, 0);
+ struct erase_info adjinstr;
+ int ret;
+
+ instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+ adjinstr = *instr;
+
+ if (!mtd->erasesize || !master->_erase)
+ return -ENOTSUPP;
+
+ if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
+ return -EINVAL;
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+
+ if (!instr->len)
+ return 0;
+
+ ledtrig_mtd_activity();
+
+ if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
+ adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) *
+ master->erasesize;
+ adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) *
+ master->erasesize) -
+ adjinstr.addr;
+ }
+
+ adjinstr.addr += mst_ofs;
+
+ ret = master->_erase(master, &adjinstr);
+
+ if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) {
+ instr->fail_addr = adjinstr.fail_addr - mst_ofs;
+ if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
+ instr->fail_addr = mtd_div_by_eb(instr->fail_addr,
+ master);
+ instr->fail_addr *= mtd->erasesize;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mtd_erase);
+
+/*
+ * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
+ */
+int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
+ void **virt, resource_size_t *phys)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ *retlen = 0;
+ *virt = NULL;
+ if (phys)
+ *phys = 0;
+ if (!master->_point)
+ return -EOPNOTSUPP;
+ if (from < 0 || from >= mtd->size || len > mtd->size - from)
+ return -EINVAL;
+ if (!len)
+ return 0;
+
+ from = mtd_get_master_ofs(mtd, from);
+ return master->_point(master, from, len, retlen, virt, phys);
+}
+EXPORT_SYMBOL_GPL(mtd_point);
+
+/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
+int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_unpoint)
+ return -EOPNOTSUPP;
+ if (from < 0 || from >= mtd->size || len > mtd->size - from)
+ return -EINVAL;
+ if (!len)
+ return 0;
+ return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
+}
+EXPORT_SYMBOL_GPL(mtd_unpoint);
+
+/*
+ * Allow NOMMU mmap() to directly map the device (if not NULL)
+ * - return the address to which the offset maps
+ * - return -ENOSYS to indicate refusal to do the mapping
+ */
+unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
+ unsigned long offset, unsigned long flags)
+{
+ size_t retlen;
+ void *virt;
+ int ret;
+
+ ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
+ if (ret)
+ return ret;
+ if (retlen != len) {
+ mtd_unpoint(mtd, offset, retlen);
+ return -ENOSYS;
+ }
+ return (unsigned long)virt;
+}
+EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
+
+static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master,
+ const struct mtd_ecc_stats *old_stats)
+{
+ struct mtd_ecc_stats diff;
+
+ if (master == mtd)
+ return;
+
+ diff = master->ecc_stats;
+ diff.failed -= old_stats->failed;
+ diff.corrected -= old_stats->corrected;
+
+ while (mtd->parent) {
+ mtd->ecc_stats.failed += diff.failed;
+ mtd->ecc_stats.corrected += diff.corrected;
+ mtd = mtd->parent;
+ }
+}
+
+int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
+ u_char *buf)
+{
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .datbuf = buf,
+ };
+ int ret;
+
+ ret = mtd_read_oob(mtd, from, &ops);
+ *retlen = ops.retlen;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mtd_read);
+
+int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
+ const u_char *buf)
+{
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .datbuf = (u8 *)buf,
+ };
+ int ret;
+
+ ret = mtd_write_oob(mtd, to, &ops);
+ *retlen = ops.retlen;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mtd_write);
+
+/*
+ * In blackbox flight recorder like scenarios we want to make successful writes
+ * in interrupt context. panic_write() is only intended to be called when its
+ * known the kernel is about to panic and we need the write to succeed. Since
+ * the kernel is not going to be running for much longer, this function can
+ * break locks and delay to ensure the write succeeds (but not sleep).
+ */
+int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
+ const u_char *buf)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ *retlen = 0;
+ if (!master->_panic_write)
+ return -EOPNOTSUPP;
+ if (to < 0 || to >= mtd->size || len > mtd->size - to)
+ return -EINVAL;
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ if (!len)
+ return 0;
+ if (!master->oops_panic_write)
+ master->oops_panic_write = true;
+
+ return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
+ retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_panic_write);
+
+static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
+ struct mtd_oob_ops *ops)
+{
+ /*
+ * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
+ * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
+ * this case.
+ */
+ if (!ops->datbuf)
+ ops->len = 0;
+
+ if (!ops->oobbuf)
+ ops->ooblen = 0;
+
+ if (offs < 0 || offs + ops->len > mtd->size)
+ return -EINVAL;
+
+ if (ops->ooblen) {
+ size_t maxooblen;
+
+ if (ops->ooboffs >= mtd_oobavail(mtd, ops))
+ return -EINVAL;
+
+ maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) -
+ mtd_div_by_ws(offs, mtd)) *
+ mtd_oobavail(mtd, ops)) - ops->ooboffs;
+ if (ops->ooblen > maxooblen)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ret;
+
+ from = mtd_get_master_ofs(mtd, from);
+ if (master->_read_oob)
+ ret = master->_read_oob(master, from, ops);
+ else
+ ret = master->_read(master, from, ops->len, &ops->retlen,
+ ops->datbuf);
+
+ return ret;
+}
+
+static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ret;
+
+ to = mtd_get_master_ofs(mtd, to);
+ if (master->_write_oob)
+ ret = master->_write_oob(master, to, ops);
+ else
+ ret = master->_write(master, to, ops->len, &ops->retlen,
+ ops->datbuf);
+
+ return ret;
+}
+
+static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read,
+ struct mtd_oob_ops *ops)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ngroups = mtd_pairing_groups(master);
+ int npairs = mtd_wunit_per_eb(master) / ngroups;
+ struct mtd_oob_ops adjops = *ops;
+ unsigned int wunit, oobavail;
+ struct mtd_pairing_info info;
+ int max_bitflips = 0;
+ u32 ebofs, pageofs;
+ loff_t base, pos;
+
+ ebofs = mtd_mod_by_eb(start, mtd);
+ base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize;
+ info.group = 0;
+ info.pair = mtd_div_by_ws(ebofs, mtd);
+ pageofs = mtd_mod_by_ws(ebofs, mtd);
+ oobavail = mtd_oobavail(mtd, ops);
+
+ while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
+ int ret;
+
+ if (info.pair >= npairs) {
+ info.pair = 0;
+ base += master->erasesize;
+ }
+
+ wunit = mtd_pairing_info_to_wunit(master, &info);
+ pos = mtd_wunit_to_offset(mtd, base, wunit);
+
+ adjops.len = ops->len - ops->retlen;
+ if (adjops.len > mtd->writesize - pageofs)
+ adjops.len = mtd->writesize - pageofs;
+
+ adjops.ooblen = ops->ooblen - ops->oobretlen;
+ if (adjops.ooblen > oobavail - adjops.ooboffs)
+ adjops.ooblen = oobavail - adjops.ooboffs;
+
+ if (read) {
+ ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops);
+ if (ret > 0)
+ max_bitflips = max(max_bitflips, ret);
+ } else {
+ ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops);
+ }
+
+ if (ret < 0)
+ return ret;
+
+ max_bitflips = max(max_bitflips, ret);
+ ops->retlen += adjops.retlen;
+ ops->oobretlen += adjops.oobretlen;
+ adjops.datbuf += adjops.retlen;
+ adjops.oobbuf += adjops.oobretlen;
+ adjops.ooboffs = 0;
+ pageofs = 0;
+ info.pair++;
+ }
+
+ return max_bitflips;
+}
+
+int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ struct mtd_ecc_stats old_stats = master->ecc_stats;
+ int ret_code;
+
+ ops->retlen = ops->oobretlen = 0;
+
+ ret_code = mtd_check_oob_ops(mtd, from, ops);
+ if (ret_code)
+ return ret_code;
+
+ ledtrig_mtd_activity();
+
+ /* Check the validity of a potential fallback on mtd->_read */
+ if (!master->_read_oob && (!master->_read || ops->oobbuf))
+ return -EOPNOTSUPP;
+
+ if (ops->stats)
+ memset(ops->stats, 0, sizeof(*ops->stats));
+
+ if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
+ ret_code = mtd_io_emulated_slc(mtd, from, true, ops);
+ else
+ ret_code = mtd_read_oob_std(mtd, from, ops);
+
+ mtd_update_ecc_stats(mtd, master, &old_stats);
+
+ /*
+ * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
+ * similar to mtd->_read(), returning a non-negative integer
+ * representing max bitflips. In other cases, mtd->_read_oob() may
+ * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
+ */
+ if (unlikely(ret_code < 0))
+ return ret_code;
+ if (mtd->ecc_strength == 0)
+ return 0; /* device lacks ecc */
+ if (ops->stats)
+ ops->stats->max_bitflips = ret_code;
+ return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
+}
+EXPORT_SYMBOL_GPL(mtd_read_oob);
+
+int mtd_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ret;
+
+ ops->retlen = ops->oobretlen = 0;
+
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+
+ ret = mtd_check_oob_ops(mtd, to, ops);
+ if (ret)
+ return ret;
+
+ ledtrig_mtd_activity();
+
+ /* Check the validity of a potential fallback on mtd->_write */
+ if (!master->_write_oob && (!master->_write || ops->oobbuf))
+ return -EOPNOTSUPP;
+
+ if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
+ return mtd_io_emulated_slc(mtd, to, false, ops);
+
+ return mtd_write_oob_std(mtd, to, ops);
+}
+EXPORT_SYMBOL_GPL(mtd_write_oob);
+
+/**
+ * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
+ * @mtd: MTD device structure
+ * @section: ECC section. Depending on the layout you may have all the ECC
+ * bytes stored in a single contiguous section, or one section
+ * per ECC chunk (and sometime several sections for a single ECC
+ * ECC chunk)
+ * @oobecc: OOB region struct filled with the appropriate ECC position
+ * information
+ *
+ * This function returns ECC section information in the OOB area. If you want
+ * to get all the ECC bytes information, then you should call
+ * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobecc)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ memset(oobecc, 0, sizeof(*oobecc));
+
+ if (!master || section < 0)
+ return -EINVAL;
+
+ if (!master->ooblayout || !master->ooblayout->ecc)
+ return -ENOTSUPP;
+
+ return master->ooblayout->ecc(master, section, oobecc);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
+
+/**
+ * mtd_ooblayout_free - Get the OOB region definition of a specific free
+ * section
+ * @mtd: MTD device structure
+ * @section: Free section you are interested in. Depending on the layout
+ * you may have all the free bytes stored in a single contiguous
+ * section, or one section per ECC chunk plus an extra section
+ * for the remaining bytes (or other funky layout).
+ * @oobfree: OOB region struct filled with the appropriate free position
+ * information
+ *
+ * This function returns free bytes position in the OOB area. If you want
+ * to get all the free bytes information, then you should call
+ * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobfree)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ memset(oobfree, 0, sizeof(*oobfree));
+
+ if (!master || section < 0)
+ return -EINVAL;
+
+ if (!master->ooblayout || !master->ooblayout->free)
+ return -ENOTSUPP;
+
+ return master->ooblayout->free(master, section, oobfree);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
+
+/**
+ * mtd_ooblayout_find_region - Find the region attached to a specific byte
+ * @mtd: mtd info structure
+ * @byte: the byte we are searching for
+ * @sectionp: pointer where the section id will be stored
+ * @oobregion: used to retrieve the ECC position
+ * @iter: iterator function. Should be either mtd_ooblayout_free or
+ * mtd_ooblayout_ecc depending on the region type you're searching for
+ *
+ * This function returns the section id and oobregion information of a
+ * specific byte. For example, say you want to know where the 4th ECC byte is
+ * stored, you'll use:
+ *
+ * mtd_ooblayout_find_region(mtd, 3, &section, &oobregion, mtd_ooblayout_ecc);
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
+ int *sectionp, struct mtd_oob_region *oobregion,
+ int (*iter)(struct mtd_info *,
+ int section,
+ struct mtd_oob_region *oobregion))
+{
+ int pos = 0, ret, section = 0;
+
+ memset(oobregion, 0, sizeof(*oobregion));
+
+ while (1) {
+ ret = iter(mtd, section, oobregion);
+ if (ret)
+ return ret;
+
+ if (pos + oobregion->length > byte)
+ break;
+
+ pos += oobregion->length;
+ section++;
+ }
+
+ /*
+ * Adjust region info to make it start at the beginning at the
+ * 'start' ECC byte.
+ */
+ oobregion->offset += byte - pos;
+ oobregion->length -= byte - pos;
+ *sectionp = section;
+
+ return 0;
+}
+
+/**
+ * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
+ * ECC byte
+ * @mtd: mtd info structure
+ * @eccbyte: the byte we are searching for
+ * @section: pointer where the section id will be stored
+ * @oobregion: OOB region information
+ *
+ * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
+ * byte.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
+ int *section,
+ struct mtd_oob_region *oobregion)
+{
+ return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
+ mtd_ooblayout_ecc);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
+
+/**
+ * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
+ * @mtd: mtd info structure
+ * @buf: destination buffer to store OOB bytes
+ * @oobbuf: OOB buffer
+ * @start: first byte to retrieve
+ * @nbytes: number of bytes to retrieve
+ * @iter: section iterator
+ *
+ * Extract bytes attached to a specific category (ECC or free)
+ * from the OOB buffer and copy them into buf.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
+ const u8 *oobbuf, int start, int nbytes,
+ int (*iter)(struct mtd_info *,
+ int section,
+ struct mtd_oob_region *oobregion))
+{
+ struct mtd_oob_region oobregion;
+ int section, ret;
+
+ ret = mtd_ooblayout_find_region(mtd, start, &section,
+ &oobregion, iter);
+
+ while (!ret) {
+ int cnt;
+
+ cnt = min_t(int, nbytes, oobregion.length);
+ memcpy(buf, oobbuf + oobregion.offset, cnt);
+ buf += cnt;
+ nbytes -= cnt;
+
+ if (!nbytes)
+ break;
+
+ ret = iter(mtd, ++section, &oobregion);
+ }
+
+ return ret;
+}
+
+/**
+ * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
+ * @mtd: mtd info structure
+ * @buf: source buffer to get OOB bytes from
+ * @oobbuf: OOB buffer
+ * @start: first OOB byte to set
+ * @nbytes: number of OOB bytes to set
+ * @iter: section iterator
+ *
+ * Fill the OOB buffer with data provided in buf. The category (ECC or free)
+ * is selected by passing the appropriate iterator.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
+ u8 *oobbuf, int start, int nbytes,
+ int (*iter)(struct mtd_info *,
+ int section,
+ struct mtd_oob_region *oobregion))
+{
+ struct mtd_oob_region oobregion;
+ int section, ret;
+
+ ret = mtd_ooblayout_find_region(mtd, start, &section,
+ &oobregion, iter);
+
+ while (!ret) {
+ int cnt;
+
+ cnt = min_t(int, nbytes, oobregion.length);
+ memcpy(oobbuf + oobregion.offset, buf, cnt);
+ buf += cnt;
+ nbytes -= cnt;
+
+ if (!nbytes)
+ break;
+
+ ret = iter(mtd, ++section, &oobregion);
+ }
+
+ return ret;
+}
+
+/**
+ * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
+ * @mtd: mtd info structure
+ * @iter: category iterator
+ *
+ * Count the number of bytes in a given category.
+ *
+ * Returns a positive value on success, a negative error code otherwise.
+ */
+static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
+ int (*iter)(struct mtd_info *,
+ int section,
+ struct mtd_oob_region *oobregion))
+{
+ struct mtd_oob_region oobregion;
+ int section = 0, ret, nbytes = 0;
+
+ while (1) {
+ ret = iter(mtd, section++, &oobregion);
+ if (ret) {
+ if (ret == -ERANGE)
+ ret = nbytes;
+ break;
+ }
+
+ nbytes += oobregion.length;
+ }
+
+ return ret;
+}
+
+/**
+ * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
+ * @mtd: mtd info structure
+ * @eccbuf: destination buffer to store ECC bytes
+ * @oobbuf: OOB buffer
+ * @start: first ECC byte to retrieve
+ * @nbytes: number of ECC bytes to retrieve
+ *
+ * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
+ const u8 *oobbuf, int start, int nbytes)
+{
+ return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
+ mtd_ooblayout_ecc);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
+
+/**
+ * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
+ * @mtd: mtd info structure
+ * @eccbuf: source buffer to get ECC bytes from
+ * @oobbuf: OOB buffer
+ * @start: first ECC byte to set
+ * @nbytes: number of ECC bytes to set
+ *
+ * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
+ u8 *oobbuf, int start, int nbytes)
+{
+ return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
+ mtd_ooblayout_ecc);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
+
+/**
+ * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
+ * @mtd: mtd info structure
+ * @databuf: destination buffer to store ECC bytes
+ * @oobbuf: OOB buffer
+ * @start: first ECC byte to retrieve
+ * @nbytes: number of ECC bytes to retrieve
+ *
+ * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
+ const u8 *oobbuf, int start, int nbytes)
+{
+ return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
+ mtd_ooblayout_free);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
+
+/**
+ * mtd_ooblayout_set_databytes - set data bytes into the oob buffer
+ * @mtd: mtd info structure
+ * @databuf: source buffer to get data bytes from
+ * @oobbuf: OOB buffer
+ * @start: first ECC byte to set
+ * @nbytes: number of ECC bytes to set
+ *
+ * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
+ u8 *oobbuf, int start, int nbytes)
+{
+ return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
+ mtd_ooblayout_free);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
+
+/**
+ * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
+ * @mtd: mtd info structure
+ *
+ * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
+{
+ return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
+
+/**
+ * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB
+ * @mtd: mtd info structure
+ *
+ * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
+{
+ return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
+
+/*
+ * Method to access the protection register area, present in some flash
+ * devices. The user data is one time programmable but the factory data is read
+ * only.
+ */
+int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
+ struct otp_info *buf)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_get_fact_prot_info)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ return master->_get_fact_prot_info(master, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
+
+int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ *retlen = 0;
+ if (!master->_read_fact_prot_reg)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ return master->_read_fact_prot_reg(master, from, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
+
+int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
+ struct otp_info *buf)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_get_user_prot_info)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ return master->_get_user_prot_info(master, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
+
+int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ *retlen = 0;
+ if (!master->_read_user_prot_reg)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ return master->_read_user_prot_reg(master, from, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
+
+int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ret;
+
+ *retlen = 0;
+ if (!master->_write_user_prot_reg)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ ret = master->_write_user_prot_reg(master, to, len, retlen, buf);
+ if (ret)
+ return ret;
+
+ /*
+ * If no data could be written at all, we are out of memory and
+ * must return -ENOSPC.
+ */
+ return (*retlen) ? 0 : -ENOSPC;
+}
+EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
+
+int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_lock_user_prot_reg)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ return master->_lock_user_prot_reg(master, from, len);
+}
+EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
+
+int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_erase_user_prot_reg)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ return master->_erase_user_prot_reg(master, from, len);
+}
+EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg);
+
+/* Chip-supported device locking */
+int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_lock)
+ return -EOPNOTSUPP;
+ if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
+ return -EINVAL;
+ if (!len)
+ return 0;
+
+ if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
+ ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
+ len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
+ }
+
+ return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
+}
+EXPORT_SYMBOL_GPL(mtd_lock);
+
+int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_unlock)
+ return -EOPNOTSUPP;
+ if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
+ return -EINVAL;
+ if (!len)
+ return 0;
+
+ if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
+ ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
+ len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
+ }
+
+ return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
+}
+EXPORT_SYMBOL_GPL(mtd_unlock);
+
+int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_is_locked)
+ return -EOPNOTSUPP;
+ if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
+ return -EINVAL;
+ if (!len)
+ return 0;
+
+ if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
+ ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
+ len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
+ }
+
+ return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
+}
+EXPORT_SYMBOL_GPL(mtd_is_locked);
+
+int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (ofs < 0 || ofs >= mtd->size)
+ return -EINVAL;
+ if (!master->_block_isreserved)
+ return 0;
+
+ if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
+ ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
+
+ return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
+}
+EXPORT_SYMBOL_GPL(mtd_block_isreserved);
+
+int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (ofs < 0 || ofs >= mtd->size)
+ return -EINVAL;
+ if (!master->_block_isbad)
+ return 0;
+
+ if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
+ ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
+
+ return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
+}
+EXPORT_SYMBOL_GPL(mtd_block_isbad);
+
+int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ret;
+
+ if (!master->_block_markbad)
+ return -EOPNOTSUPP;
+ if (ofs < 0 || ofs >= mtd->size)
+ return -EINVAL;
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+
+ if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
+ ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
+
+ ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
+ if (ret)
+ return ret;
+
+ while (mtd->parent) {
+ mtd->ecc_stats.badblocks++;
+ mtd = mtd->parent;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtd_block_markbad);
+
+/*
+ * default_mtd_writev - the default writev method
+ * @mtd: mtd device description object pointer
+ * @vecs: the vectors to write
+ * @count: count of vectors in @vecs
+ * @to: the MTD device offset to write to
+ * @retlen: on exit contains the count of bytes written to the MTD device.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
+{
+ unsigned long i;
+ size_t totlen = 0, thislen;
+ int ret = 0;
+
+ for (i = 0; i < count; i++) {
+ if (!vecs[i].iov_len)
+ continue;
+ ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
+ vecs[i].iov_base);
+ totlen += thislen;
+ if (ret || thislen != vecs[i].iov_len)
+ break;
+ to += vecs[i].iov_len;
+ }
+ *retlen = totlen;
+ return ret;
+}
+
+/*
+ * mtd_writev - the vector-based MTD write method
+ * @mtd: mtd device description object pointer
+ * @vecs: the vectors to write
+ * @count: count of vectors in @vecs
+ * @to: the MTD device offset to write to
+ * @retlen: on exit contains the count of bytes written to the MTD device.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ *retlen = 0;
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+
+ if (!master->_writev)
+ return default_mtd_writev(mtd, vecs, count, to, retlen);
+
+ return master->_writev(master, vecs, count,
+ mtd_get_master_ofs(mtd, to), retlen);
+}
+EXPORT_SYMBOL_GPL(mtd_writev);
+
+/**
+ * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
+ * @mtd: mtd device description object pointer
+ * @size: a pointer to the ideal or maximum size of the allocation, points
+ * to the actual allocation size on success.
+ *
+ * This routine attempts to allocate a contiguous kernel buffer up to
+ * the specified size, backing off the size of the request exponentially
+ * until the request succeeds or until the allocation size falls below
+ * the system page size. This attempts to make sure it does not adversely
+ * impact system performance, so when allocating more than one page, we
+ * ask the memory allocator to avoid re-trying, swapping, writing back
+ * or performing I/O.
+ *
+ * Note, this function also makes sure that the allocated buffer is aligned to
+ * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
+ *
+ * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
+ * to handle smaller (i.e. degraded) buffer allocations under low- or
+ * fragmented-memory situations where such reduced allocations, from a
+ * requested ideal, are allowed.
+ *
+ * Returns a pointer to the allocated buffer on success; otherwise, NULL.
+ */
+void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
+{
+ gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY;
+ size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
+ void *kbuf;
+
+ *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
+
+ while (*size > min_alloc) {
+ kbuf = kmalloc(*size, flags);
+ if (kbuf)
+ return kbuf;
+
+ *size >>= 1;
+ *size = ALIGN(*size, mtd->writesize);
+ }
+
+ /*
+ * For the last resort allocation allow 'kmalloc()' to do all sorts of
+ * things (write-back, dropping caches, etc) by using GFP_KERNEL.
+ */
+ return kmalloc(*size, GFP_KERNEL);
+}
+EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
+
+#ifdef CONFIG_PROC_FS
+
+/*====================================================================*/
+/* Support for /proc/mtd */
+
+static int mtd_proc_show(struct seq_file *m, void *v)
+{
+ struct mtd_info *mtd;
+
+ seq_puts(m, "dev: size erasesize name\n");
+ mutex_lock(&mtd_table_mutex);
+ mtd_for_each_device(mtd) {
+ seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
+ mtd->index, (unsigned long long)mtd->size,
+ mtd->erasesize, mtd->name);
+ }
+ mutex_unlock(&mtd_table_mutex);
+ return 0;
+}
+#endif /* CONFIG_PROC_FS */
+
+/*====================================================================*/
+/* Init code */
+
+static struct backing_dev_info * __init mtd_bdi_init(const char *name)
+{
+ struct backing_dev_info *bdi;
+ int ret;
+
+ bdi = bdi_alloc(NUMA_NO_NODE);
+ if (!bdi)
+ return ERR_PTR(-ENOMEM);
+ bdi->ra_pages = 0;
+ bdi->io_pages = 0;
+
+ /*
+ * We put '-0' suffix to the name to get the same name format as we
+ * used to get. Since this is called only once, we get a unique name.
+ */
+ ret = bdi_register(bdi, "%.28s-0", name);
+ if (ret)
+ bdi_put(bdi);
+
+ return ret ? ERR_PTR(ret) : bdi;
+}
+
+static struct proc_dir_entry *proc_mtd;
+
+static int __init init_mtd(void)
+{
+ int ret;
+
+ ret = class_register(&mtd_class);
+ if (ret)
+ goto err_reg;
+
+ mtd_bdi = mtd_bdi_init("mtd");
+ if (IS_ERR(mtd_bdi)) {
+ ret = PTR_ERR(mtd_bdi);
+ goto err_bdi;
+ }
+
+ proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show);
+
+ ret = init_mtdchar();
+ if (ret)
+ goto out_procfs;
+
+ dfs_dir_mtd = debugfs_create_dir("mtd", NULL);
+ debugfs_create_bool("expert_analysis_mode", 0600, dfs_dir_mtd,
+ &mtd_expert_analysis_mode);
+
+ return 0;
+
+out_procfs:
+ if (proc_mtd)
+ remove_proc_entry("mtd", NULL);
+ bdi_unregister(mtd_bdi);
+ bdi_put(mtd_bdi);
+err_bdi:
+ class_unregister(&mtd_class);
+err_reg:
+ pr_err("Error registering mtd class or bdi: %d\n", ret);
+ return ret;
+}
+
+static void __exit cleanup_mtd(void)
+{
+ debugfs_remove_recursive(dfs_dir_mtd);
+ cleanup_mtdchar();
+ if (proc_mtd)
+ remove_proc_entry("mtd", NULL);
+ class_unregister(&mtd_class);
+ bdi_unregister(mtd_bdi);
+ bdi_put(mtd_bdi);
+ idr_destroy(&mtd_idr);
+}
+
+module_init(init_mtd);
+module_exit(cleanup_mtd);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("Core MTD registration and access routines");
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
new file mode 100644
index 0000000000..b014861a06
--- /dev/null
+++ b/drivers/mtd/mtdcore.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * These are exported solely for the purpose of mtd_blkdevs.c and mtdchar.c.
+ * You should not use them for _anything_ else.
+ */
+
+extern struct mutex mtd_table_mutex;
+extern struct backing_dev_info *mtd_bdi;
+
+struct mtd_info *__mtd_next_device(int i);
+int __must_check add_mtd_device(struct mtd_info *mtd);
+int del_mtd_device(struct mtd_info *mtd);
+int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
+int del_mtd_partitions(struct mtd_info *);
+void release_mtd_partition(struct mtd_info *mtd);
+
+struct mtd_partitions;
+
+int parse_mtd_partitions(struct mtd_info *master, const char * const *types,
+ struct mtd_part_parser_data *data);
+
+void mtd_part_parser_cleanup(struct mtd_partitions *parts);
+
+int __init init_mtdchar(void);
+void __exit cleanup_mtdchar(void);
+
+#define mtd_for_each_device(mtd) \
+ for ((mtd) = __mtd_next_device(0); \
+ (mtd) != NULL; \
+ (mtd) = __mtd_next_device(mtd->index + 1))
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
new file mode 100644
index 0000000000..2f11585b56
--- /dev/null
+++ b/drivers/mtd/mtdoops.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MTD Oops/Panic logger
+ *
+ * Copyright © 2007 Nokia Corporation. All rights reserved.
+ *
+ * Author: Richard Purdie <rpurdie@openedhand.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/timekeeping.h>
+#include <linux/mtd/mtd.h>
+#include <linux/kmsg_dump.h>
+
+/* Maximum MTD partition size */
+#define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024)
+
+static unsigned long record_size = 4096;
+module_param(record_size, ulong, 0400);
+MODULE_PARM_DESC(record_size,
+ "record size for MTD OOPS pages in bytes (default 4096)");
+
+static char mtddev[80];
+module_param_string(mtddev, mtddev, 80, 0400);
+MODULE_PARM_DESC(mtddev,
+ "name or index number of the MTD device to use");
+
+static int dump_oops = 1;
+module_param(dump_oops, int, 0600);
+MODULE_PARM_DESC(dump_oops,
+ "set to 1 to dump oopses, 0 to only dump panics (default 1)");
+
+#define MTDOOPS_KERNMSG_MAGIC_v1 0x5d005d00 /* Original */
+#define MTDOOPS_KERNMSG_MAGIC_v2 0x5d005e00 /* Adds the timestamp */
+
+struct mtdoops_hdr {
+ u32 seq;
+ u32 magic;
+ ktime_t timestamp;
+} __packed;
+
+static struct mtdoops_context {
+ struct kmsg_dumper dump;
+
+ int mtd_index;
+ struct work_struct work_erase;
+ struct work_struct work_write;
+ struct mtd_info *mtd;
+ int oops_pages;
+ int nextpage;
+ int nextcount;
+ unsigned long *oops_page_used;
+
+ unsigned long oops_buf_busy;
+ void *oops_buf;
+} oops_cxt;
+
+static void mark_page_used(struct mtdoops_context *cxt, int page)
+{
+ set_bit(page, cxt->oops_page_used);
+}
+
+static void mark_page_unused(struct mtdoops_context *cxt, int page)
+{
+ clear_bit(page, cxt->oops_page_used);
+}
+
+static int page_is_used(struct mtdoops_context *cxt, int page)
+{
+ return test_bit(page, cxt->oops_page_used);
+}
+
+static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize;
+ u32 start_page = start_page_offset / record_size;
+ u32 erase_pages = mtd->erasesize / record_size;
+ struct erase_info erase;
+ int ret;
+ int page;
+
+ erase.addr = offset;
+ erase.len = mtd->erasesize;
+
+ ret = mtd_erase(mtd, &erase);
+ if (ret) {
+ pr_warn("erase of region [0x%llx, 0x%llx] on \"%s\" failed\n",
+ (unsigned long long)erase.addr,
+ (unsigned long long)erase.len, mtddev);
+ return ret;
+ }
+
+ /* Mark pages as unused */
+ for (page = start_page; page < start_page + erase_pages; page++)
+ mark_page_unused(cxt, page);
+
+ return 0;
+}
+
+static void mtdoops_erase(struct mtdoops_context *cxt)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ int i = 0, j, ret, mod;
+
+ /* We were unregistered */
+ if (!mtd)
+ return;
+
+ mod = (cxt->nextpage * record_size) % mtd->erasesize;
+ if (mod != 0) {
+ cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / record_size);
+ if (cxt->nextpage >= cxt->oops_pages)
+ cxt->nextpage = 0;
+ }
+
+ while ((ret = mtd_block_isbad(mtd, cxt->nextpage * record_size)) > 0) {
+badblock:
+ pr_warn("bad block at %08lx\n",
+ cxt->nextpage * record_size);
+ i++;
+ cxt->nextpage = cxt->nextpage + (mtd->erasesize / record_size);
+ if (cxt->nextpage >= cxt->oops_pages)
+ cxt->nextpage = 0;
+ if (i == cxt->oops_pages / (mtd->erasesize / record_size)) {
+ pr_err("all blocks bad!\n");
+ return;
+ }
+ }
+
+ if (ret < 0) {
+ pr_err("mtd_block_isbad failed, aborting\n");
+ return;
+ }
+
+ for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
+ ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size);
+
+ if (ret >= 0) {
+ pr_debug("ready %d, %d\n",
+ cxt->nextpage, cxt->nextcount);
+ return;
+ }
+
+ if (ret == -EIO) {
+ ret = mtd_block_markbad(mtd, cxt->nextpage * record_size);
+ if (ret < 0 && ret != -EOPNOTSUPP) {
+ pr_err("block_markbad failed, aborting\n");
+ return;
+ }
+ }
+ goto badblock;
+}
+
+/* Scheduled work - when we can't proceed without erasing a block */
+static void mtdoops_workfunc_erase(struct work_struct *work)
+{
+ struct mtdoops_context *cxt =
+ container_of(work, struct mtdoops_context, work_erase);
+ mtdoops_erase(cxt);
+}
+
+static void mtdoops_inc_counter(struct mtdoops_context *cxt, int panic)
+{
+ cxt->nextpage++;
+ if (cxt->nextpage >= cxt->oops_pages)
+ cxt->nextpage = 0;
+ cxt->nextcount++;
+ if (cxt->nextcount == 0xffffffff)
+ cxt->nextcount = 0;
+
+ if (page_is_used(cxt, cxt->nextpage)) {
+ pr_debug("not ready %d, %d (erase %s)\n",
+ cxt->nextpage, cxt->nextcount,
+ panic ? "immediately" : "scheduled");
+ if (panic) {
+ /* In case of panic, erase immediately */
+ mtdoops_erase(cxt);
+ } else {
+ /* Otherwise, schedule work to erase it "nicely" */
+ schedule_work(&cxt->work_erase);
+ }
+ } else {
+ pr_debug("ready %d, %d (no erase)\n",
+ cxt->nextpage, cxt->nextcount);
+ }
+}
+
+static void mtdoops_write(struct mtdoops_context *cxt, int panic)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ size_t retlen;
+ struct mtdoops_hdr *hdr;
+ int ret;
+
+ if (test_and_set_bit(0, &cxt->oops_buf_busy))
+ return;
+
+ /* Add mtdoops header to the buffer */
+ hdr = (struct mtdoops_hdr *)cxt->oops_buf;
+ hdr->seq = cxt->nextcount;
+ hdr->magic = MTDOOPS_KERNMSG_MAGIC_v2;
+ hdr->timestamp = ktime_get_real();
+
+ if (panic) {
+ ret = mtd_panic_write(mtd, cxt->nextpage * record_size,
+ record_size, &retlen, cxt->oops_buf);
+ if (ret == -EOPNOTSUPP) {
+ pr_err("Cannot write from panic without panic_write\n");
+ goto out;
+ }
+ } else
+ ret = mtd_write(mtd, cxt->nextpage * record_size,
+ record_size, &retlen, cxt->oops_buf);
+
+ if (retlen != record_size || ret < 0)
+ pr_err("write failure at %ld (%td of %ld written), error %d\n",
+ cxt->nextpage * record_size, retlen, record_size, ret);
+ mark_page_used(cxt, cxt->nextpage);
+ memset(cxt->oops_buf, 0xff, record_size);
+
+ mtdoops_inc_counter(cxt, panic);
+out:
+ clear_bit(0, &cxt->oops_buf_busy);
+}
+
+static void mtdoops_workfunc_write(struct work_struct *work)
+{
+ struct mtdoops_context *cxt =
+ container_of(work, struct mtdoops_context, work_write);
+
+ mtdoops_write(cxt, 0);
+}
+
+static void find_next_position(struct mtdoops_context *cxt)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ struct mtdoops_hdr hdr;
+ int ret, page, maxpos = 0;
+ u32 maxcount = 0xffffffff;
+ size_t retlen;
+
+ for (page = 0; page < cxt->oops_pages; page++) {
+ if (mtd_block_isbad(mtd, page * record_size))
+ continue;
+ /* Assume the page is used */
+ mark_page_used(cxt, page);
+ ret = mtd_read(mtd, page * record_size, sizeof(hdr),
+ &retlen, (u_char *)&hdr);
+ if (retlen != sizeof(hdr) ||
+ (ret < 0 && !mtd_is_bitflip(ret))) {
+ pr_err("read failure at %ld (%zu of %zu read), err %d\n",
+ page * record_size, retlen, sizeof(hdr), ret);
+ continue;
+ }
+
+ if (hdr.seq == 0xffffffff && hdr.magic == 0xffffffff)
+ mark_page_unused(cxt, page);
+ if (hdr.seq == 0xffffffff ||
+ (hdr.magic != MTDOOPS_KERNMSG_MAGIC_v1 &&
+ hdr.magic != MTDOOPS_KERNMSG_MAGIC_v2))
+ continue;
+ if (maxcount == 0xffffffff) {
+ maxcount = hdr.seq;
+ maxpos = page;
+ } else if (hdr.seq < 0x40000000 && maxcount > 0xc0000000) {
+ maxcount = hdr.seq;
+ maxpos = page;
+ } else if (hdr.seq > maxcount && hdr.seq < 0xc0000000) {
+ maxcount = hdr.seq;
+ maxpos = page;
+ } else if (hdr.seq > maxcount && hdr.seq > 0xc0000000
+ && maxcount > 0x80000000) {
+ maxcount = hdr.seq;
+ maxpos = page;
+ }
+ }
+ if (maxcount == 0xffffffff) {
+ cxt->nextpage = cxt->oops_pages - 1;
+ cxt->nextcount = 0;
+ }
+ else {
+ cxt->nextpage = maxpos;
+ cxt->nextcount = maxcount;
+ }
+
+ mtdoops_inc_counter(cxt, 0);
+}
+
+static void mtdoops_do_dump(struct kmsg_dumper *dumper,
+ enum kmsg_dump_reason reason)
+{
+ struct mtdoops_context *cxt = container_of(dumper,
+ struct mtdoops_context, dump);
+ struct kmsg_dump_iter iter;
+
+ /* Only dump oopses if dump_oops is set */
+ if (reason == KMSG_DUMP_OOPS && !dump_oops)
+ return;
+
+ kmsg_dump_rewind(&iter);
+
+ if (test_and_set_bit(0, &cxt->oops_buf_busy))
+ return;
+ kmsg_dump_get_buffer(&iter, true,
+ cxt->oops_buf + sizeof(struct mtdoops_hdr),
+ record_size - sizeof(struct mtdoops_hdr), NULL);
+ clear_bit(0, &cxt->oops_buf_busy);
+
+ if (reason != KMSG_DUMP_OOPS) {
+ /* Panics must be written immediately */
+ mtdoops_write(cxt, 1);
+ } else {
+ /* For other cases, schedule work to write it "nicely" */
+ schedule_work(&cxt->work_write);
+ }
+}
+
+static void mtdoops_notify_add(struct mtd_info *mtd)
+{
+ struct mtdoops_context *cxt = &oops_cxt;
+ u64 mtdoops_pages = div_u64(mtd->size, record_size);
+ int err;
+
+ if (!strcmp(mtd->name, mtddev))
+ cxt->mtd_index = mtd->index;
+
+ if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
+ return;
+
+ if (mtd->size < mtd->erasesize * 2) {
+ pr_err("MTD partition %d not big enough for mtdoops\n",
+ mtd->index);
+ return;
+ }
+ if (mtd->erasesize < record_size) {
+ pr_err("eraseblock size of MTD partition %d too small\n",
+ mtd->index);
+ return;
+ }
+ if (mtd->size > MTDOOPS_MAX_MTD_SIZE) {
+ pr_err("mtd%d is too large (limit is %d MiB)\n",
+ mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024);
+ return;
+ }
+
+ /* oops_page_used is a bit field */
+ cxt->oops_page_used =
+ vmalloc(array_size(sizeof(unsigned long),
+ DIV_ROUND_UP(mtdoops_pages,
+ BITS_PER_LONG)));
+ if (!cxt->oops_page_used) {
+ pr_err("could not allocate page array\n");
+ return;
+ }
+
+ cxt->dump.max_reason = KMSG_DUMP_OOPS;
+ cxt->dump.dump = mtdoops_do_dump;
+ err = kmsg_dump_register(&cxt->dump);
+ if (err) {
+ pr_err("registering kmsg dumper failed, error %d\n", err);
+ vfree(cxt->oops_page_used);
+ cxt->oops_page_used = NULL;
+ return;
+ }
+
+ cxt->mtd = mtd;
+ cxt->oops_pages = (int)mtd->size / record_size;
+ find_next_position(cxt);
+ pr_info("Attached to MTD device %d\n", mtd->index);
+}
+
+static void mtdoops_notify_remove(struct mtd_info *mtd)
+{
+ struct mtdoops_context *cxt = &oops_cxt;
+
+ if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
+ return;
+
+ if (kmsg_dump_unregister(&cxt->dump) < 0)
+ pr_warn("could not unregister kmsg_dumper\n");
+
+ cxt->mtd = NULL;
+ flush_work(&cxt->work_erase);
+ flush_work(&cxt->work_write);
+}
+
+
+static struct mtd_notifier mtdoops_notifier = {
+ .add = mtdoops_notify_add,
+ .remove = mtdoops_notify_remove,
+};
+
+static int __init mtdoops_init(void)
+{
+ struct mtdoops_context *cxt = &oops_cxt;
+ int mtd_index;
+ char *endp;
+
+ if (strlen(mtddev) == 0) {
+ pr_err("mtd device (mtddev=name/number) must be supplied\n");
+ return -EINVAL;
+ }
+ if ((record_size & 4095) != 0) {
+ pr_err("record_size must be a multiple of 4096\n");
+ return -EINVAL;
+ }
+ if (record_size < 4096) {
+ pr_err("record_size must be over 4096 bytes\n");
+ return -EINVAL;
+ }
+
+ /* Setup the MTD device to use */
+ cxt->mtd_index = -1;
+ mtd_index = simple_strtoul(mtddev, &endp, 0);
+ if (*endp == '\0')
+ cxt->mtd_index = mtd_index;
+
+ cxt->oops_buf = vmalloc(record_size);
+ if (!cxt->oops_buf)
+ return -ENOMEM;
+ memset(cxt->oops_buf, 0xff, record_size);
+ cxt->oops_buf_busy = 0;
+
+ INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
+ INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
+
+ register_mtd_user(&mtdoops_notifier);
+ return 0;
+}
+
+static void __exit mtdoops_exit(void)
+{
+ struct mtdoops_context *cxt = &oops_cxt;
+
+ unregister_mtd_user(&mtdoops_notifier);
+ vfree(cxt->oops_buf);
+ vfree(cxt->oops_page_used);
+}
+
+
+module_init(mtdoops_init);
+module_exit(mtdoops_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
+MODULE_DESCRIPTION("MTD Oops/Panic console logger/driver");
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
new file mode 100644
index 0000000000..23483db8f3
--- /dev/null
+++ b/drivers/mtd/mtdpart.c
@@ -0,0 +1,737 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Simple MTD partitioning layer
+ *
+ * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
+ * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
+ * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/kmod.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+#include "mtdcore.h"
+
+/*
+ * MTD methods which simply translate the effective address and pass through
+ * to the _real_ device.
+ */
+
+static inline void free_partition(struct mtd_info *mtd)
+{
+ kfree(mtd->name);
+ kfree(mtd);
+}
+
+void release_mtd_partition(struct mtd_info *mtd)
+{
+ WARN_ON(!list_empty(&mtd->part.node));
+ free_partition(mtd);
+}
+
+static struct mtd_info *allocate_partition(struct mtd_info *parent,
+ const struct mtd_partition *part,
+ int partno, uint64_t cur_offset)
+{
+ struct mtd_info *master = mtd_get_master(parent);
+ int wr_alignment = (parent->flags & MTD_NO_ERASE) ?
+ master->writesize : master->erasesize;
+ u64 parent_size = mtd_is_partition(parent) ?
+ parent->part.size : parent->size;
+ struct mtd_info *child;
+ u32 remainder;
+ char *name;
+ u64 tmp;
+
+ /* allocate the partition structure */
+ child = kzalloc(sizeof(*child), GFP_KERNEL);
+ name = kstrdup(part->name, GFP_KERNEL);
+ if (!name || !child) {
+ printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
+ parent->name);
+ kfree(name);
+ kfree(child);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* set up the MTD object for this partition */
+ child->type = parent->type;
+ child->part.flags = parent->flags & ~part->mask_flags;
+ child->part.flags |= part->add_flags;
+ child->flags = child->part.flags;
+ child->part.size = part->size;
+ child->writesize = parent->writesize;
+ child->writebufsize = parent->writebufsize;
+ child->oobsize = parent->oobsize;
+ child->oobavail = parent->oobavail;
+ child->subpage_sft = parent->subpage_sft;
+
+ child->name = name;
+ child->owner = parent->owner;
+
+ /* NOTE: Historically, we didn't arrange MTDs as a tree out of
+ * concern for showing the same data in multiple partitions.
+ * However, it is very useful to have the master node present,
+ * so the MTD_PARTITIONED_MASTER option allows that. The master
+ * will have device nodes etc only if this is set, so make the
+ * parent conditional on that option. Note, this is a way to
+ * distinguish between the parent and its partitions in sysfs.
+ */
+ child->dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ?
+ &parent->dev : parent->dev.parent;
+ child->dev.of_node = part->of_node;
+ child->parent = parent;
+ child->part.offset = part->offset;
+ INIT_LIST_HEAD(&child->partitions);
+
+ if (child->part.offset == MTDPART_OFS_APPEND)
+ child->part.offset = cur_offset;
+ if (child->part.offset == MTDPART_OFS_NXTBLK) {
+ tmp = cur_offset;
+ child->part.offset = cur_offset;
+ remainder = do_div(tmp, wr_alignment);
+ if (remainder) {
+ child->part.offset += wr_alignment - remainder;
+ printk(KERN_NOTICE "Moving partition %d: "
+ "0x%012llx -> 0x%012llx\n", partno,
+ (unsigned long long)cur_offset,
+ child->part.offset);
+ }
+ }
+ if (child->part.offset == MTDPART_OFS_RETAIN) {
+ child->part.offset = cur_offset;
+ if (parent_size - child->part.offset >= child->part.size) {
+ child->part.size = parent_size - child->part.offset -
+ child->part.size;
+ } else {
+ printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
+ part->name, parent_size - child->part.offset,
+ child->part.size);
+ /* register to preserve ordering */
+ goto out_register;
+ }
+ }
+ if (child->part.size == MTDPART_SIZ_FULL)
+ child->part.size = parent_size - child->part.offset;
+
+ printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n",
+ child->part.offset, child->part.offset + child->part.size,
+ child->name);
+
+ /* let's do some sanity checks */
+ if (child->part.offset >= parent_size) {
+ /* let's register it anyway to preserve ordering */
+ child->part.offset = 0;
+ child->part.size = 0;
+
+ /* Initialize ->erasesize to make add_mtd_device() happy. */
+ child->erasesize = parent->erasesize;
+ printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
+ part->name);
+ goto out_register;
+ }
+ if (child->part.offset + child->part.size > parent->size) {
+ child->part.size = parent_size - child->part.offset;
+ printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
+ part->name, parent->name, child->part.size);
+ }
+
+ if (parent->numeraseregions > 1) {
+ /* Deal with variable erase size stuff */
+ int i, max = parent->numeraseregions;
+ u64 end = child->part.offset + child->part.size;
+ struct mtd_erase_region_info *regions = parent->eraseregions;
+
+ /* Find the first erase regions which is part of this
+ * partition. */
+ for (i = 0; i < max && regions[i].offset <= child->part.offset;
+ i++)
+ ;
+ /* The loop searched for the region _behind_ the first one */
+ if (i > 0)
+ i--;
+
+ /* Pick biggest erasesize */
+ for (; i < max && regions[i].offset < end; i++) {
+ if (child->erasesize < regions[i].erasesize)
+ child->erasesize = regions[i].erasesize;
+ }
+ BUG_ON(child->erasesize == 0);
+ } else {
+ /* Single erase size */
+ child->erasesize = master->erasesize;
+ }
+
+ /*
+ * Child erasesize might differ from the parent one if the parent
+ * exposes several regions with different erasesize. Adjust
+ * wr_alignment accordingly.
+ */
+ if (!(child->flags & MTD_NO_ERASE))
+ wr_alignment = child->erasesize;
+
+ tmp = mtd_get_master_ofs(child, 0);
+ remainder = do_div(tmp, wr_alignment);
+ if ((child->flags & MTD_WRITEABLE) && remainder) {
+ /* Doesn't start on a boundary of major erase size */
+ /* FIXME: Let it be writable if it is on a boundary of
+ * _minor_ erase size though */
+ child->flags &= ~MTD_WRITEABLE;
+ printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase/write block boundary -- force read-only\n",
+ part->name);
+ }
+
+ tmp = mtd_get_master_ofs(child, 0) + child->part.size;
+ remainder = do_div(tmp, wr_alignment);
+ if ((child->flags & MTD_WRITEABLE) && remainder) {
+ child->flags &= ~MTD_WRITEABLE;
+ printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase/write block -- force read-only\n",
+ part->name);
+ }
+
+ child->size = child->part.size;
+ child->ecc_step_size = parent->ecc_step_size;
+ child->ecc_strength = parent->ecc_strength;
+ child->bitflip_threshold = parent->bitflip_threshold;
+
+ if (master->_block_isbad) {
+ uint64_t offs = 0;
+
+ while (offs < child->part.size) {
+ if (mtd_block_isreserved(child, offs))
+ child->ecc_stats.bbtblocks++;
+ else if (mtd_block_isbad(child, offs))
+ child->ecc_stats.badblocks++;
+ offs += child->erasesize;
+ }
+ }
+
+out_register:
+ return child;
+}
+
+static ssize_t offset_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lld\n", mtd->part.offset);
+}
+static DEVICE_ATTR_RO(offset); /* mtd partition offset */
+
+static const struct attribute *mtd_partition_attrs[] = {
+ &dev_attr_offset.attr,
+ NULL
+};
+
+static int mtd_add_partition_attrs(struct mtd_info *new)
+{
+ int ret = sysfs_create_files(&new->dev.kobj, mtd_partition_attrs);
+ if (ret)
+ printk(KERN_WARNING
+ "mtd: failed to create partition attrs, err=%d\n", ret);
+ return ret;
+}
+
+int mtd_add_partition(struct mtd_info *parent, const char *name,
+ long long offset, long long length)
+{
+ struct mtd_info *master = mtd_get_master(parent);
+ u64 parent_size = mtd_is_partition(parent) ?
+ parent->part.size : parent->size;
+ struct mtd_partition part;
+ struct mtd_info *child;
+ int ret = 0;
+
+ /* the direct offset is expected */
+ if (offset == MTDPART_OFS_APPEND ||
+ offset == MTDPART_OFS_NXTBLK)
+ return -EINVAL;
+
+ if (length == MTDPART_SIZ_FULL)
+ length = parent_size - offset;
+
+ if (length <= 0)
+ return -EINVAL;
+
+ memset(&part, 0, sizeof(part));
+ part.name = name;
+ part.size = length;
+ part.offset = offset;
+
+ child = allocate_partition(parent, &part, -1, offset);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ mutex_lock(&master->master.partitions_lock);
+ list_add_tail(&child->part.node, &parent->partitions);
+ mutex_unlock(&master->master.partitions_lock);
+
+ ret = add_mtd_device(child);
+ if (ret)
+ goto err_remove_part;
+
+ mtd_add_partition_attrs(child);
+
+ return 0;
+
+err_remove_part:
+ mutex_lock(&master->master.partitions_lock);
+ list_del(&child->part.node);
+ mutex_unlock(&master->master.partitions_lock);
+
+ free_partition(child);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mtd_add_partition);
+
+/**
+ * __mtd_del_partition - delete MTD partition
+ *
+ * @mtd: MTD structure to be deleted
+ *
+ * This function must be called with the partitions mutex locked.
+ */
+static int __mtd_del_partition(struct mtd_info *mtd)
+{
+ struct mtd_info *child, *next;
+ int err;
+
+ list_for_each_entry_safe(child, next, &mtd->partitions, part.node) {
+ err = __mtd_del_partition(child);
+ if (err)
+ return err;
+ }
+
+ sysfs_remove_files(&mtd->dev.kobj, mtd_partition_attrs);
+
+ list_del_init(&mtd->part.node);
+ err = del_mtd_device(mtd);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/*
+ * This function unregisters and destroy all slave MTD objects which are
+ * attached to the given MTD object, recursively.
+ */
+static int __del_mtd_partitions(struct mtd_info *mtd)
+{
+ struct mtd_info *child, *next;
+ int ret, err = 0;
+
+ list_for_each_entry_safe(child, next, &mtd->partitions, part.node) {
+ if (mtd_has_partitions(child))
+ __del_mtd_partitions(child);
+
+ pr_info("Deleting %s MTD partition\n", child->name);
+ list_del_init(&child->part.node);
+ ret = del_mtd_device(child);
+ if (ret < 0) {
+ pr_err("Error when deleting partition \"%s\" (%d)\n",
+ child->name, ret);
+ err = ret;
+ continue;
+ }
+ }
+
+ return err;
+}
+
+int del_mtd_partitions(struct mtd_info *mtd)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ret;
+
+ pr_info("Deleting MTD partitions on \"%s\":\n", mtd->name);
+
+ mutex_lock(&master->master.partitions_lock);
+ ret = __del_mtd_partitions(mtd);
+ mutex_unlock(&master->master.partitions_lock);
+
+ return ret;
+}
+
+int mtd_del_partition(struct mtd_info *mtd, int partno)
+{
+ struct mtd_info *child, *master = mtd_get_master(mtd);
+ int ret = -EINVAL;
+
+ mutex_lock(&master->master.partitions_lock);
+ list_for_each_entry(child, &mtd->partitions, part.node) {
+ if (child->index == partno) {
+ ret = __mtd_del_partition(child);
+ break;
+ }
+ }
+ mutex_unlock(&master->master.partitions_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mtd_del_partition);
+
+/*
+ * This function, given a parent MTD object and a partition table, creates
+ * and registers the child MTD objects which are bound to the parent according
+ * to the partition definitions.
+ *
+ * For historical reasons, this function's caller only registers the parent
+ * if the MTD_PARTITIONED_MASTER config option is set.
+ */
+
+int add_mtd_partitions(struct mtd_info *parent,
+ const struct mtd_partition *parts,
+ int nbparts)
+{
+ struct mtd_info *child, *master = mtd_get_master(parent);
+ uint64_t cur_offset = 0;
+ int i, ret;
+
+ printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n",
+ nbparts, parent->name);
+
+ for (i = 0; i < nbparts; i++) {
+ child = allocate_partition(parent, parts + i, i, cur_offset);
+ if (IS_ERR(child)) {
+ ret = PTR_ERR(child);
+ goto err_del_partitions;
+ }
+
+ mutex_lock(&master->master.partitions_lock);
+ list_add_tail(&child->part.node, &parent->partitions);
+ mutex_unlock(&master->master.partitions_lock);
+
+ ret = add_mtd_device(child);
+ if (ret) {
+ mutex_lock(&master->master.partitions_lock);
+ list_del(&child->part.node);
+ mutex_unlock(&master->master.partitions_lock);
+
+ free_partition(child);
+ goto err_del_partitions;
+ }
+
+ mtd_add_partition_attrs(child);
+
+ /* Look for subpartitions */
+ parse_mtd_partitions(child, parts[i].types, NULL);
+
+ cur_offset = child->part.offset + child->part.size;
+ }
+
+ return 0;
+
+err_del_partitions:
+ del_mtd_partitions(master);
+
+ return ret;
+}
+
+static DEFINE_SPINLOCK(part_parser_lock);
+static LIST_HEAD(part_parsers);
+
+static struct mtd_part_parser *mtd_part_parser_get(const char *name)
+{
+ struct mtd_part_parser *p, *ret = NULL;
+
+ spin_lock(&part_parser_lock);
+
+ list_for_each_entry(p, &part_parsers, list)
+ if (!strcmp(p->name, name) && try_module_get(p->owner)) {
+ ret = p;
+ break;
+ }
+
+ spin_unlock(&part_parser_lock);
+
+ return ret;
+}
+
+static inline void mtd_part_parser_put(const struct mtd_part_parser *p)
+{
+ module_put(p->owner);
+}
+
+/*
+ * Many partition parsers just expected the core to kfree() all their data in
+ * one chunk. Do that by default.
+ */
+static void mtd_part_parser_cleanup_default(const struct mtd_partition *pparts,
+ int nr_parts)
+{
+ kfree(pparts);
+}
+
+int __register_mtd_parser(struct mtd_part_parser *p, struct module *owner)
+{
+ p->owner = owner;
+
+ if (!p->cleanup)
+ p->cleanup = &mtd_part_parser_cleanup_default;
+
+ spin_lock(&part_parser_lock);
+ list_add(&p->list, &part_parsers);
+ spin_unlock(&part_parser_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__register_mtd_parser);
+
+void deregister_mtd_parser(struct mtd_part_parser *p)
+{
+ spin_lock(&part_parser_lock);
+ list_del(&p->list);
+ spin_unlock(&part_parser_lock);
+}
+EXPORT_SYMBOL_GPL(deregister_mtd_parser);
+
+/*
+ * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
+ * are changing this array!
+ */
+static const char * const default_mtd_part_types[] = {
+ "cmdlinepart",
+ "ofpart",
+ NULL
+};
+
+/* Check DT only when looking for subpartitions. */
+static const char * const default_subpartition_types[] = {
+ "ofpart",
+ NULL
+};
+
+static int mtd_part_do_parse(struct mtd_part_parser *parser,
+ struct mtd_info *master,
+ struct mtd_partitions *pparts,
+ struct mtd_part_parser_data *data)
+{
+ int ret;
+
+ ret = (*parser->parse_fn)(master, &pparts->parts, data);
+ pr_debug("%s: parser %s: %i\n", master->name, parser->name, ret);
+ if (ret <= 0)
+ return ret;
+
+ pr_notice("%d %s partitions found on MTD device %s\n", ret,
+ parser->name, master->name);
+
+ pparts->nr_parts = ret;
+ pparts->parser = parser;
+
+ return ret;
+}
+
+/**
+ * mtd_part_get_compatible_parser - find MTD parser by a compatible string
+ *
+ * @compat: compatible string describing partitions in a device tree
+ *
+ * MTD parsers can specify supported partitions by providing a table of
+ * compatibility strings. This function finds a parser that advertises support
+ * for a passed value of "compatible".
+ */
+static struct mtd_part_parser *mtd_part_get_compatible_parser(const char *compat)
+{
+ struct mtd_part_parser *p, *ret = NULL;
+
+ spin_lock(&part_parser_lock);
+
+ list_for_each_entry(p, &part_parsers, list) {
+ const struct of_device_id *matches;
+
+ matches = p->of_match_table;
+ if (!matches)
+ continue;
+
+ for (; matches->compatible[0]; matches++) {
+ if (!strcmp(matches->compatible, compat) &&
+ try_module_get(p->owner)) {
+ ret = p;
+ break;
+ }
+ }
+
+ if (ret)
+ break;
+ }
+
+ spin_unlock(&part_parser_lock);
+
+ return ret;
+}
+
+static int mtd_part_of_parse(struct mtd_info *master,
+ struct mtd_partitions *pparts)
+{
+ struct mtd_part_parser *parser;
+ struct device_node *np;
+ struct device_node *child;
+ struct property *prop;
+ struct device *dev;
+ const char *compat;
+ const char *fixed = "fixed-partitions";
+ int ret, err = 0;
+
+ dev = &master->dev;
+ /* Use parent device (controller) if the top level MTD is not registered */
+ if (!IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) && !mtd_is_partition(master))
+ dev = master->dev.parent;
+
+ np = mtd_get_of_node(master);
+ if (mtd_is_partition(master))
+ of_node_get(np);
+ else
+ np = of_get_child_by_name(np, "partitions");
+
+ /*
+ * Don't create devices that are added to a bus but will never get
+ * probed. That'll cause fw_devlink to block probing of consumers of
+ * this partition until the partition device is probed.
+ */
+ for_each_child_of_node(np, child)
+ if (of_device_is_compatible(child, "nvmem-cells"))
+ of_node_set_flag(child, OF_POPULATED);
+
+ of_property_for_each_string(np, "compatible", prop, compat) {
+ parser = mtd_part_get_compatible_parser(compat);
+ if (!parser)
+ continue;
+ ret = mtd_part_do_parse(parser, master, pparts, NULL);
+ if (ret > 0) {
+ of_platform_populate(np, NULL, NULL, dev);
+ of_node_put(np);
+ return ret;
+ }
+ mtd_part_parser_put(parser);
+ if (ret < 0 && !err)
+ err = ret;
+ }
+ of_platform_populate(np, NULL, NULL, dev);
+ of_node_put(np);
+
+ /*
+ * For backward compatibility we have to try the "fixed-partitions"
+ * parser. It supports old DT format with partitions specified as a
+ * direct subnodes of a flash device DT node without any compatibility
+ * specified we could match.
+ */
+ parser = mtd_part_parser_get(fixed);
+ if (!parser && !request_module("%s", fixed))
+ parser = mtd_part_parser_get(fixed);
+ if (parser) {
+ ret = mtd_part_do_parse(parser, master, pparts, NULL);
+ if (ret > 0)
+ return ret;
+ mtd_part_parser_put(parser);
+ if (ret < 0 && !err)
+ err = ret;
+ }
+
+ return err;
+}
+
+/**
+ * parse_mtd_partitions - parse and register MTD partitions
+ *
+ * @master: the master partition (describes whole MTD device)
+ * @types: names of partition parsers to try or %NULL
+ * @data: MTD partition parser-specific data
+ *
+ * This function tries to find & register partitions on MTD device @master. It
+ * uses MTD partition parsers, specified in @types. However, if @types is %NULL,
+ * then the default list of parsers is used. The default list contains only the
+ * "cmdlinepart" and "ofpart" parsers ATM.
+ * Note: If there are more then one parser in @types, the kernel only takes the
+ * partitions parsed out by the first parser.
+ *
+ * This function may return:
+ * o a negative error code in case of failure
+ * o number of found partitions otherwise
+ */
+int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
+ struct mtd_part_parser_data *data)
+{
+ struct mtd_partitions pparts = { };
+ struct mtd_part_parser *parser;
+ int ret, err = 0;
+
+ if (!types)
+ types = mtd_is_partition(master) ? default_subpartition_types :
+ default_mtd_part_types;
+
+ for ( ; *types; types++) {
+ /*
+ * ofpart is a special type that means OF partitioning info
+ * should be used. It requires a bit different logic so it is
+ * handled in a separated function.
+ */
+ if (!strcmp(*types, "ofpart")) {
+ ret = mtd_part_of_parse(master, &pparts);
+ } else {
+ pr_debug("%s: parsing partitions %s\n", master->name,
+ *types);
+ parser = mtd_part_parser_get(*types);
+ if (!parser && !request_module("%s", *types))
+ parser = mtd_part_parser_get(*types);
+ pr_debug("%s: got parser %s\n", master->name,
+ parser ? parser->name : NULL);
+ if (!parser)
+ continue;
+ ret = mtd_part_do_parse(parser, master, &pparts, data);
+ if (ret <= 0)
+ mtd_part_parser_put(parser);
+ }
+ /* Found partitions! */
+ if (ret > 0) {
+ err = add_mtd_partitions(master, pparts.parts,
+ pparts.nr_parts);
+ mtd_part_parser_cleanup(&pparts);
+ return err ? err : pparts.nr_parts;
+ }
+ /*
+ * Stash the first error we see; only report it if no parser
+ * succeeds
+ */
+ if (ret < 0 && !err)
+ err = ret;
+ }
+ return err;
+}
+
+void mtd_part_parser_cleanup(struct mtd_partitions *parts)
+{
+ const struct mtd_part_parser *parser;
+
+ if (!parts)
+ return;
+
+ parser = parts->parser;
+ if (parser) {
+ if (parser->cleanup)
+ parser->cleanup(parts->parts, parts->nr_parts);
+
+ mtd_part_parser_put(parser);
+ }
+}
+
+/* Returns the size of the entire flash chip */
+uint64_t mtd_get_device_size(const struct mtd_info *mtd)
+{
+ struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd);
+
+ return master->size;
+}
+EXPORT_SYMBOL_GPL(mtd_get_device_size);
diff --git a/drivers/mtd/mtdpstore.c b/drivers/mtd/mtdpstore.c
new file mode 100644
index 0000000000..7ac8ac9013
--- /dev/null
+++ b/drivers/mtd/mtdpstore.c
@@ -0,0 +1,579 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define dev_fmt(fmt) "mtdoops-pstore: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pstore_blk.h>
+#include <linux/mtd/mtd.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+
+static struct mtdpstore_context {
+ int index;
+ struct pstore_blk_config info;
+ struct pstore_device_info dev;
+ struct mtd_info *mtd;
+ unsigned long *rmmap; /* removed bit map */
+ unsigned long *usedmap; /* used bit map */
+ /*
+ * used for panic write
+ * As there are no block_isbad for panic case, we should keep this
+ * status before panic to ensure panic_write not failed.
+ */
+ unsigned long *badmap; /* bad block bit map */
+} oops_cxt;
+
+static int mtdpstore_block_isbad(struct mtdpstore_context *cxt, loff_t off)
+{
+ int ret;
+ struct mtd_info *mtd = cxt->mtd;
+ u64 blknum;
+
+ off = ALIGN_DOWN(off, mtd->erasesize);
+ blknum = div_u64(off, mtd->erasesize);
+
+ if (test_bit(blknum, cxt->badmap))
+ return true;
+ ret = mtd_block_isbad(mtd, off);
+ if (ret < 0) {
+ dev_err(&mtd->dev, "mtd_block_isbad failed, aborting\n");
+ return ret;
+ } else if (ret > 0) {
+ set_bit(blknum, cxt->badmap);
+ return true;
+ }
+ return false;
+}
+
+static inline int mtdpstore_panic_block_isbad(struct mtdpstore_context *cxt,
+ loff_t off)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ u64 blknum;
+
+ off = ALIGN_DOWN(off, mtd->erasesize);
+ blknum = div_u64(off, mtd->erasesize);
+ return test_bit(blknum, cxt->badmap);
+}
+
+static inline void mtdpstore_mark_used(struct mtdpstore_context *cxt,
+ loff_t off)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ u64 zonenum = div_u64(off, cxt->info.kmsg_size);
+
+ dev_dbg(&mtd->dev, "mark zone %llu used\n", zonenum);
+ set_bit(zonenum, cxt->usedmap);
+}
+
+static inline void mtdpstore_mark_unused(struct mtdpstore_context *cxt,
+ loff_t off)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ u64 zonenum = div_u64(off, cxt->info.kmsg_size);
+
+ dev_dbg(&mtd->dev, "mark zone %llu unused\n", zonenum);
+ clear_bit(zonenum, cxt->usedmap);
+}
+
+static inline void mtdpstore_block_mark_unused(struct mtdpstore_context *cxt,
+ loff_t off)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
+ u64 zonenum;
+
+ off = ALIGN_DOWN(off, mtd->erasesize);
+ zonenum = div_u64(off, cxt->info.kmsg_size);
+ while (zonecnt > 0) {
+ dev_dbg(&mtd->dev, "mark zone %llu unused\n", zonenum);
+ clear_bit(zonenum, cxt->usedmap);
+ zonenum++;
+ zonecnt--;
+ }
+}
+
+static inline int mtdpstore_is_used(struct mtdpstore_context *cxt, loff_t off)
+{
+ u64 zonenum = div_u64(off, cxt->info.kmsg_size);
+ u64 blknum = div_u64(off, cxt->mtd->erasesize);
+
+ if (test_bit(blknum, cxt->badmap))
+ return true;
+ return test_bit(zonenum, cxt->usedmap);
+}
+
+static int mtdpstore_block_is_used(struct mtdpstore_context *cxt,
+ loff_t off)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
+ u64 zonenum;
+
+ off = ALIGN_DOWN(off, mtd->erasesize);
+ zonenum = div_u64(off, cxt->info.kmsg_size);
+ while (zonecnt > 0) {
+ if (test_bit(zonenum, cxt->usedmap))
+ return true;
+ zonenum++;
+ zonecnt--;
+ }
+ return false;
+}
+
+static int mtdpstore_is_empty(struct mtdpstore_context *cxt, char *buf,
+ size_t size)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ size_t sz;
+ int i;
+
+ sz = min_t(uint32_t, size, mtd->writesize / 4);
+ for (i = 0; i < sz; i++) {
+ if (buf[i] != (char)0xFF)
+ return false;
+ }
+ return true;
+}
+
+static void mtdpstore_mark_removed(struct mtdpstore_context *cxt, loff_t off)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ u64 zonenum = div_u64(off, cxt->info.kmsg_size);
+
+ dev_dbg(&mtd->dev, "mark zone %llu removed\n", zonenum);
+ set_bit(zonenum, cxt->rmmap);
+}
+
+static void mtdpstore_block_clear_removed(struct mtdpstore_context *cxt,
+ loff_t off)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
+ u64 zonenum;
+
+ off = ALIGN_DOWN(off, mtd->erasesize);
+ zonenum = div_u64(off, cxt->info.kmsg_size);
+ while (zonecnt > 0) {
+ clear_bit(zonenum, cxt->rmmap);
+ zonenum++;
+ zonecnt--;
+ }
+}
+
+static int mtdpstore_block_is_removed(struct mtdpstore_context *cxt,
+ loff_t off)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
+ u64 zonenum;
+
+ off = ALIGN_DOWN(off, mtd->erasesize);
+ zonenum = div_u64(off, cxt->info.kmsg_size);
+ while (zonecnt > 0) {
+ if (test_bit(zonenum, cxt->rmmap))
+ return true;
+ zonenum++;
+ zonecnt--;
+ }
+ return false;
+}
+
+static int mtdpstore_erase_do(struct mtdpstore_context *cxt, loff_t off)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ struct erase_info erase;
+ int ret;
+
+ off = ALIGN_DOWN(off, cxt->mtd->erasesize);
+ dev_dbg(&mtd->dev, "try to erase off 0x%llx\n", off);
+ erase.len = cxt->mtd->erasesize;
+ erase.addr = off;
+ ret = mtd_erase(cxt->mtd, &erase);
+ if (!ret)
+ mtdpstore_block_clear_removed(cxt, off);
+ else
+ dev_err(&mtd->dev, "erase of region [0x%llx, 0x%llx] on \"%s\" failed\n",
+ (unsigned long long)erase.addr,
+ (unsigned long long)erase.len, cxt->info.device);
+ return ret;
+}
+
+/*
+ * called while removing file
+ *
+ * Avoiding over erasing, do erase block only when the whole block is unused.
+ * If the block contains valid log, do erase lazily on flush_removed() when
+ * unregister.
+ */
+static ssize_t mtdpstore_erase(size_t size, loff_t off)
+{
+ struct mtdpstore_context *cxt = &oops_cxt;
+
+ if (mtdpstore_block_isbad(cxt, off))
+ return -EIO;
+
+ mtdpstore_mark_unused(cxt, off);
+
+ /* If the block still has valid data, mtdpstore do erase lazily */
+ if (likely(mtdpstore_block_is_used(cxt, off))) {
+ mtdpstore_mark_removed(cxt, off);
+ return 0;
+ }
+
+ /* all zones are unused, erase it */
+ return mtdpstore_erase_do(cxt, off);
+}
+
+/*
+ * What is security for mtdpstore?
+ * As there is no erase for panic case, we should ensure at least one zone
+ * is writable. Otherwise, panic write will fail.
+ * If zone is used, write operation will return -ENOMSG, which means that
+ * pstore/blk will try one by one until gets an empty zone. So, it is not
+ * needed to ensure the next zone is empty, but at least one.
+ */
+static int mtdpstore_security(struct mtdpstore_context *cxt, loff_t off)
+{
+ int ret = 0, i;
+ struct mtd_info *mtd = cxt->mtd;
+ u32 zonenum = (u32)div_u64(off, cxt->info.kmsg_size);
+ u32 zonecnt = (u32)div_u64(cxt->mtd->size, cxt->info.kmsg_size);
+ u32 blkcnt = (u32)div_u64(cxt->mtd->size, cxt->mtd->erasesize);
+ u32 erasesize = cxt->mtd->erasesize;
+
+ for (i = 0; i < zonecnt; i++) {
+ u32 num = (zonenum + i) % zonecnt;
+
+ /* found empty zone */
+ if (!test_bit(num, cxt->usedmap))
+ return 0;
+ }
+
+ /* If there is no any empty zone, we have no way but to do erase */
+ while (blkcnt--) {
+ div64_u64_rem(off + erasesize, cxt->mtd->size, (u64 *)&off);
+
+ if (mtdpstore_block_isbad(cxt, off))
+ continue;
+
+ ret = mtdpstore_erase_do(cxt, off);
+ if (!ret) {
+ mtdpstore_block_mark_unused(cxt, off);
+ break;
+ }
+ }
+
+ if (ret)
+ dev_err(&mtd->dev, "all blocks bad!\n");
+ dev_dbg(&mtd->dev, "end security\n");
+ return ret;
+}
+
+static ssize_t mtdpstore_write(const char *buf, size_t size, loff_t off)
+{
+ struct mtdpstore_context *cxt = &oops_cxt;
+ struct mtd_info *mtd = cxt->mtd;
+ size_t retlen;
+ int ret;
+
+ if (mtdpstore_block_isbad(cxt, off))
+ return -ENOMSG;
+
+ /* zone is used, please try next one */
+ if (mtdpstore_is_used(cxt, off))
+ return -ENOMSG;
+
+ dev_dbg(&mtd->dev, "try to write off 0x%llx size %zu\n", off, size);
+ ret = mtd_write(cxt->mtd, off, size, &retlen, (u_char *)buf);
+ if (ret < 0 || retlen != size) {
+ dev_err(&mtd->dev, "write failure at %lld (%zu of %zu written), err %d\n",
+ off, retlen, size, ret);
+ return -EIO;
+ }
+ mtdpstore_mark_used(cxt, off);
+
+ mtdpstore_security(cxt, off);
+ return retlen;
+}
+
+static inline bool mtdpstore_is_io_error(int ret)
+{
+ return ret < 0 && !mtd_is_bitflip(ret) && !mtd_is_eccerr(ret);
+}
+
+/*
+ * All zones will be read as pstore/blk will read zone one by one when do
+ * recover.
+ */
+static ssize_t mtdpstore_read(char *buf, size_t size, loff_t off)
+{
+ struct mtdpstore_context *cxt = &oops_cxt;
+ struct mtd_info *mtd = cxt->mtd;
+ size_t retlen, done;
+ int ret;
+
+ if (mtdpstore_block_isbad(cxt, off))
+ return -ENOMSG;
+
+ dev_dbg(&mtd->dev, "try to read off 0x%llx size %zu\n", off, size);
+ for (done = 0, retlen = 0; done < size; done += retlen) {
+ retlen = 0;
+
+ ret = mtd_read(cxt->mtd, off + done, size - done, &retlen,
+ (u_char *)buf + done);
+ if (mtdpstore_is_io_error(ret)) {
+ dev_err(&mtd->dev, "read failure at %lld (%zu of %zu read), err %d\n",
+ off + done, retlen, size - done, ret);
+ /* the zone may be broken, try next one */
+ return -ENOMSG;
+ }
+
+ /*
+ * ECC error. The impact on log data is so small. Maybe we can
+ * still read it and try to understand. So mtdpstore just hands
+ * over what it gets and user can judge whether the data is
+ * valid or not.
+ */
+ if (mtd_is_eccerr(ret)) {
+ dev_err(&mtd->dev, "ecc error at %lld (%zu of %zu read), err %d\n",
+ off + done, retlen, size - done, ret);
+ /* driver may not set retlen when ecc error */
+ retlen = retlen == 0 ? size - done : retlen;
+ }
+ }
+
+ if (mtdpstore_is_empty(cxt, buf, size))
+ mtdpstore_mark_unused(cxt, off);
+ else
+ mtdpstore_mark_used(cxt, off);
+
+ mtdpstore_security(cxt, off);
+ return retlen;
+}
+
+static ssize_t mtdpstore_panic_write(const char *buf, size_t size, loff_t off)
+{
+ struct mtdpstore_context *cxt = &oops_cxt;
+ struct mtd_info *mtd = cxt->mtd;
+ size_t retlen;
+ int ret;
+
+ if (mtdpstore_panic_block_isbad(cxt, off))
+ return -ENOMSG;
+
+ /* zone is used, please try next one */
+ if (mtdpstore_is_used(cxt, off))
+ return -ENOMSG;
+
+ ret = mtd_panic_write(cxt->mtd, off, size, &retlen, (u_char *)buf);
+ if (ret < 0 || size != retlen) {
+ dev_err(&mtd->dev, "panic write failure at %lld (%zu of %zu read), err %d\n",
+ off, retlen, size, ret);
+ return -EIO;
+ }
+ mtdpstore_mark_used(cxt, off);
+
+ return retlen;
+}
+
+static void mtdpstore_notify_add(struct mtd_info *mtd)
+{
+ int ret;
+ struct mtdpstore_context *cxt = &oops_cxt;
+ struct pstore_blk_config *info = &cxt->info;
+ unsigned long longcnt;
+
+ if (!strcmp(mtd->name, info->device))
+ cxt->index = mtd->index;
+
+ if (mtd->index != cxt->index || cxt->index < 0)
+ return;
+
+ dev_dbg(&mtd->dev, "found matching MTD device %s\n", mtd->name);
+
+ if (mtd->size < info->kmsg_size * 2) {
+ dev_err(&mtd->dev, "MTD partition %d not big enough\n",
+ mtd->index);
+ return;
+ }
+ /*
+ * kmsg_size must be aligned to 4096 Bytes, which is limited by
+ * psblk. The default value of kmsg_size is 64KB. If kmsg_size
+ * is larger than erasesize, some errors will occur since mtdpstore
+ * is designed on it.
+ */
+ if (mtd->erasesize < info->kmsg_size) {
+ dev_err(&mtd->dev, "eraseblock size of MTD partition %d too small\n",
+ mtd->index);
+ return;
+ }
+ if (unlikely(info->kmsg_size % mtd->writesize)) {
+ dev_err(&mtd->dev, "record size %lu KB must align to write size %d KB\n",
+ info->kmsg_size / 1024,
+ mtd->writesize / 1024);
+ return;
+ }
+
+ longcnt = BITS_TO_LONGS(div_u64(mtd->size, info->kmsg_size));
+ cxt->rmmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
+ cxt->usedmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
+
+ longcnt = BITS_TO_LONGS(div_u64(mtd->size, mtd->erasesize));
+ cxt->badmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
+
+ /* just support dmesg right now */
+ cxt->dev.flags = PSTORE_FLAGS_DMESG;
+ cxt->dev.zone.read = mtdpstore_read;
+ cxt->dev.zone.write = mtdpstore_write;
+ cxt->dev.zone.erase = mtdpstore_erase;
+ cxt->dev.zone.panic_write = mtdpstore_panic_write;
+ cxt->dev.zone.total_size = mtd->size;
+
+ ret = register_pstore_device(&cxt->dev);
+ if (ret) {
+ dev_err(&mtd->dev, "mtd%d register to psblk failed\n",
+ mtd->index);
+ return;
+ }
+ cxt->mtd = mtd;
+ dev_info(&mtd->dev, "Attached to MTD device %d\n", mtd->index);
+}
+
+static int mtdpstore_flush_removed_do(struct mtdpstore_context *cxt,
+ loff_t off, size_t size)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ u_char *buf;
+ int ret;
+ size_t retlen;
+ struct erase_info erase;
+
+ buf = kmalloc(mtd->erasesize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* 1st. read to cache */
+ ret = mtd_read(mtd, off, mtd->erasesize, &retlen, buf);
+ if (mtdpstore_is_io_error(ret))
+ goto free;
+
+ /* 2nd. erase block */
+ erase.len = mtd->erasesize;
+ erase.addr = off;
+ ret = mtd_erase(mtd, &erase);
+ if (ret)
+ goto free;
+
+ /* 3rd. write back */
+ while (size) {
+ unsigned int zonesize = cxt->info.kmsg_size;
+
+ /* there is valid data on block, write back */
+ if (mtdpstore_is_used(cxt, off)) {
+ ret = mtd_write(mtd, off, zonesize, &retlen, buf);
+ if (ret)
+ dev_err(&mtd->dev, "write failure at %lld (%zu of %u written), err %d\n",
+ off, retlen, zonesize, ret);
+ }
+
+ off += zonesize;
+ size -= min_t(unsigned int, zonesize, size);
+ }
+
+free:
+ kfree(buf);
+ return ret;
+}
+
+/*
+ * What does mtdpstore_flush_removed() do?
+ * When user remove any log file on pstore filesystem, mtdpstore should do
+ * something to ensure log file removed. If the whole block is no longer used,
+ * it's nice to erase the block. However if the block still contains valid log,
+ * what mtdpstore can do is to erase and write the valid log back.
+ */
+static int mtdpstore_flush_removed(struct mtdpstore_context *cxt)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ int ret;
+ loff_t off;
+ u32 blkcnt = (u32)div_u64(mtd->size, mtd->erasesize);
+
+ for (off = 0; blkcnt > 0; blkcnt--, off += mtd->erasesize) {
+ ret = mtdpstore_block_isbad(cxt, off);
+ if (ret)
+ continue;
+
+ ret = mtdpstore_block_is_removed(cxt, off);
+ if (!ret)
+ continue;
+
+ ret = mtdpstore_flush_removed_do(cxt, off, mtd->erasesize);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static void mtdpstore_notify_remove(struct mtd_info *mtd)
+{
+ struct mtdpstore_context *cxt = &oops_cxt;
+
+ if (mtd->index != cxt->index || cxt->index < 0)
+ return;
+
+ mtdpstore_flush_removed(cxt);
+
+ unregister_pstore_device(&cxt->dev);
+ kfree(cxt->badmap);
+ kfree(cxt->usedmap);
+ kfree(cxt->rmmap);
+ cxt->mtd = NULL;
+ cxt->index = -1;
+}
+
+static struct mtd_notifier mtdpstore_notifier = {
+ .add = mtdpstore_notify_add,
+ .remove = mtdpstore_notify_remove,
+};
+
+static int __init mtdpstore_init(void)
+{
+ int ret;
+ struct mtdpstore_context *cxt = &oops_cxt;
+ struct pstore_blk_config *info = &cxt->info;
+
+ ret = pstore_blk_get_config(info);
+ if (unlikely(ret))
+ return ret;
+
+ if (strlen(info->device) == 0) {
+ pr_err("mtd device must be supplied (device name is empty)\n");
+ return -EINVAL;
+ }
+ if (!info->kmsg_size) {
+ pr_err("no backend enabled (kmsg_size is 0)\n");
+ return -EINVAL;
+ }
+
+ /* Setup the MTD device to use */
+ ret = kstrtoint((char *)info->device, 0, &cxt->index);
+ if (ret)
+ cxt->index = -1;
+
+ register_mtd_user(&mtdpstore_notifier);
+ return 0;
+}
+module_init(mtdpstore_init);
+
+static void __exit mtdpstore_exit(void)
+{
+ unregister_mtd_user(&mtdpstore_notifier);
+}
+module_exit(mtdpstore_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("WeiXiong Liao <liaoweixiong@allwinnertech.com>");
+MODULE_DESCRIPTION("MTD backend for pstore/blk");
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
new file mode 100644
index 0000000000..b7e3763c47
--- /dev/null
+++ b/drivers/mtd/mtdsuper.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* MTD-based superblock management
+ *
+ * Copyright © 2001-2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * Written by: David Howells <dhowells@redhat.com>
+ * David Woodhouse <dwmw2@infradead.org>
+ */
+
+#include <linux/mtd/super.h>
+#include <linux/namei.h>
+#include <linux/export.h>
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <linux/major.h>
+#include <linux/backing-dev.h>
+#include <linux/blkdev.h>
+#include <linux/fs_context.h>
+#include "mtdcore.h"
+
+/*
+ * get a superblock on an MTD-backed filesystem
+ */
+static int mtd_get_sb(struct fs_context *fc,
+ struct mtd_info *mtd,
+ int (*fill_super)(struct super_block *,
+ struct fs_context *))
+{
+ struct super_block *sb;
+ int ret;
+
+ sb = sget_dev(fc, MKDEV(MTD_BLOCK_MAJOR, mtd->index));
+ if (IS_ERR(sb))
+ return PTR_ERR(sb);
+
+ if (sb->s_root) {
+ /* new mountpoint for an already mounted superblock */
+ pr_debug("MTDSB: Device %d (\"%s\") is already mounted\n",
+ mtd->index, mtd->name);
+ put_mtd_device(mtd);
+ } else {
+ /* fresh new superblock */
+ pr_debug("MTDSB: New superblock for device %d (\"%s\")\n",
+ mtd->index, mtd->name);
+
+ /*
+ * Would usually have been set with @sb_lock held but in
+ * contrast to sb->s_bdev that's checked with only
+ * @sb_lock held, nothing checks sb->s_mtd without also
+ * holding sb->s_umount and we're holding sb->s_umount
+ * here.
+ */
+ sb->s_mtd = mtd;
+ sb->s_bdi = bdi_get(mtd_bdi);
+
+ ret = fill_super(sb, fc);
+ if (ret < 0)
+ goto error_sb;
+
+ sb->s_flags |= SB_ACTIVE;
+ }
+
+ BUG_ON(fc->root);
+ fc->root = dget(sb->s_root);
+ return 0;
+
+error_sb:
+ deactivate_locked_super(sb);
+ return ret;
+}
+
+/*
+ * get a superblock on an MTD-backed filesystem by MTD device number
+ */
+static int mtd_get_sb_by_nr(struct fs_context *fc, int mtdnr,
+ int (*fill_super)(struct super_block *,
+ struct fs_context *))
+{
+ struct mtd_info *mtd;
+
+ mtd = get_mtd_device(NULL, mtdnr);
+ if (IS_ERR(mtd)) {
+ errorf(fc, "MTDSB: Device #%u doesn't appear to exist\n", mtdnr);
+ return PTR_ERR(mtd);
+ }
+
+ return mtd_get_sb(fc, mtd, fill_super);
+}
+
+/**
+ * get_tree_mtd - Get a superblock based on a single MTD device
+ * @fc: The filesystem context holding the parameters
+ * @fill_super: Helper to initialise a new superblock
+ */
+int get_tree_mtd(struct fs_context *fc,
+ int (*fill_super)(struct super_block *sb,
+ struct fs_context *fc))
+{
+#ifdef CONFIG_BLOCK
+ dev_t dev;
+ int ret;
+#endif
+ int mtdnr;
+
+ if (!fc->source)
+ return invalf(fc, "No source specified");
+
+ pr_debug("MTDSB: dev_name \"%s\"\n", fc->source);
+
+ /* the preferred way of mounting in future; especially when
+ * CONFIG_BLOCK=n - we specify the underlying MTD device by number or
+ * by name, so that we don't require block device support to be present
+ * in the kernel.
+ */
+ if (fc->source[0] == 'm' &&
+ fc->source[1] == 't' &&
+ fc->source[2] == 'd') {
+ if (fc->source[3] == ':') {
+ struct mtd_info *mtd;
+
+ /* mount by MTD device name */
+ pr_debug("MTDSB: mtd:%%s, name \"%s\"\n",
+ fc->source + 4);
+
+ mtd = get_mtd_device_nm(fc->source + 4);
+ if (!IS_ERR(mtd))
+ return mtd_get_sb(fc, mtd, fill_super);
+
+ errorf(fc, "MTD: MTD device with name \"%s\" not found",
+ fc->source + 4);
+
+ } else if (isdigit(fc->source[3])) {
+ /* mount by MTD device number name */
+ char *endptr;
+
+ mtdnr = simple_strtoul(fc->source + 3, &endptr, 0);
+ if (!*endptr) {
+ /* It was a valid number */
+ pr_debug("MTDSB: mtd%%d, mtdnr %d\n", mtdnr);
+ return mtd_get_sb_by_nr(fc, mtdnr, fill_super);
+ }
+ }
+ }
+
+#ifdef CONFIG_BLOCK
+ /* try the old way - the hack where we allowed users to mount
+ * /dev/mtdblock$(n) but didn't actually _use_ the blockdev
+ */
+ ret = lookup_bdev(fc->source, &dev);
+ if (ret) {
+ errorf(fc, "MTD: Couldn't look up '%s': %d", fc->source, ret);
+ return ret;
+ }
+ pr_debug("MTDSB: lookup_bdev() returned 0\n");
+
+ if (MAJOR(dev) == MTD_BLOCK_MAJOR)
+ return mtd_get_sb_by_nr(fc, MINOR(dev), fill_super);
+
+#endif /* CONFIG_BLOCK */
+
+ if (!(fc->sb_flags & SB_SILENT))
+ errorf(fc, "MTD: Attempt to mount non-MTD device \"%s\"",
+ fc->source);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(get_tree_mtd);
+
+/*
+ * destroy an MTD-based superblock
+ */
+void kill_mtd_super(struct super_block *sb)
+{
+ generic_shutdown_super(sb);
+ put_mtd_device(sb->s_mtd);
+ sb->s_mtd = NULL;
+}
+
+EXPORT_SYMBOL_GPL(kill_mtd_super);
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
new file mode 100644
index 0000000000..680366616d
--- /dev/null
+++ b/drivers/mtd/mtdswap.c
@@ -0,0 +1,1491 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Swap block device support for MTDs
+ * Turns an MTD device into a swap device with block wear leveling
+ *
+ * Copyright © 2007,2011 Nokia Corporation. All rights reserved.
+ *
+ * Authors: Jarkko Lavinen <jarkko.lavinen@nokia.com>
+ *
+ * Based on Richard Purdie's earlier implementation in 2007. Background
+ * support and lock-less operation written by Adrian Hunter.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/blktrans.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/blkdev.h>
+#include <linux/swap.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/device.h>
+#include <linux/math64.h>
+
+#define MTDSWAP_PREFIX "mtdswap"
+
+/*
+ * The number of free eraseblocks when GC should stop
+ */
+#define CLEAN_BLOCK_THRESHOLD 20
+
+/*
+ * Number of free eraseblocks below which GC can also collect low frag
+ * blocks.
+ */
+#define LOW_FRAG_GC_THRESHOLD 5
+
+/*
+ * Wear level cost amortization. We want to do wear leveling on the background
+ * without disturbing gc too much. This is made by defining max GC frequency.
+ * Frequency value 6 means 1/6 of the GC passes will pick an erase block based
+ * on the biggest wear difference rather than the biggest dirtiness.
+ *
+ * The lower freq2 should be chosen so that it makes sure the maximum erase
+ * difference will decrease even if a malicious application is deliberately
+ * trying to make erase differences large.
+ */
+#define MAX_ERASE_DIFF 4000
+#define COLLECT_NONDIRTY_BASE MAX_ERASE_DIFF
+#define COLLECT_NONDIRTY_FREQ1 6
+#define COLLECT_NONDIRTY_FREQ2 4
+
+#define PAGE_UNDEF UINT_MAX
+#define BLOCK_UNDEF UINT_MAX
+#define BLOCK_ERROR (UINT_MAX - 1)
+#define BLOCK_MAX (UINT_MAX - 2)
+
+#define EBLOCK_BAD (1 << 0)
+#define EBLOCK_NOMAGIC (1 << 1)
+#define EBLOCK_BITFLIP (1 << 2)
+#define EBLOCK_FAILED (1 << 3)
+#define EBLOCK_READERR (1 << 4)
+#define EBLOCK_IDX_SHIFT 5
+
+struct swap_eb {
+ struct rb_node rb;
+ struct rb_root *root;
+
+ unsigned int flags;
+ unsigned int active_count;
+ unsigned int erase_count;
+ unsigned int pad; /* speeds up pointer decrement */
+};
+
+#define MTDSWAP_ECNT_MIN(rbroot) (rb_entry(rb_first(rbroot), struct swap_eb, \
+ rb)->erase_count)
+#define MTDSWAP_ECNT_MAX(rbroot) (rb_entry(rb_last(rbroot), struct swap_eb, \
+ rb)->erase_count)
+
+struct mtdswap_tree {
+ struct rb_root root;
+ unsigned int count;
+};
+
+enum {
+ MTDSWAP_CLEAN,
+ MTDSWAP_USED,
+ MTDSWAP_LOWFRAG,
+ MTDSWAP_HIFRAG,
+ MTDSWAP_DIRTY,
+ MTDSWAP_BITFLIP,
+ MTDSWAP_FAILING,
+ MTDSWAP_TREE_CNT,
+};
+
+struct mtdswap_dev {
+ struct mtd_blktrans_dev *mbd_dev;
+ struct mtd_info *mtd;
+ struct device *dev;
+
+ unsigned int *page_data;
+ unsigned int *revmap;
+
+ unsigned int eblks;
+ unsigned int spare_eblks;
+ unsigned int pages_per_eblk;
+ unsigned int max_erase_count;
+ struct swap_eb *eb_data;
+
+ struct mtdswap_tree trees[MTDSWAP_TREE_CNT];
+
+ unsigned long long sect_read_count;
+ unsigned long long sect_write_count;
+ unsigned long long mtd_write_count;
+ unsigned long long mtd_read_count;
+ unsigned long long discard_count;
+ unsigned long long discard_page_count;
+
+ unsigned int curr_write_pos;
+ struct swap_eb *curr_write;
+
+ char *page_buf;
+ char *oob_buf;
+};
+
+struct mtdswap_oobdata {
+ __le16 magic;
+ __le32 count;
+} __packed;
+
+#define MTDSWAP_MAGIC_CLEAN 0x2095
+#define MTDSWAP_MAGIC_DIRTY (MTDSWAP_MAGIC_CLEAN + 1)
+#define MTDSWAP_TYPE_CLEAN 0
+#define MTDSWAP_TYPE_DIRTY 1
+#define MTDSWAP_OOBSIZE sizeof(struct mtdswap_oobdata)
+
+#define MTDSWAP_ERASE_RETRIES 3 /* Before marking erase block bad */
+#define MTDSWAP_IO_RETRIES 3
+
+enum {
+ MTDSWAP_SCANNED_CLEAN,
+ MTDSWAP_SCANNED_DIRTY,
+ MTDSWAP_SCANNED_BITFLIP,
+ MTDSWAP_SCANNED_BAD,
+};
+
+/*
+ * In the worst case mtdswap_writesect() has allocated the last clean
+ * page from the current block and is then pre-empted by the GC
+ * thread. The thread can consume a full erase block when moving a
+ * block.
+ */
+#define MIN_SPARE_EBLOCKS 2
+#define MIN_ERASE_BLOCKS (MIN_SPARE_EBLOCKS + 1)
+
+#define TREE_ROOT(d, name) (&d->trees[MTDSWAP_ ## name].root)
+#define TREE_EMPTY(d, name) (TREE_ROOT(d, name)->rb_node == NULL)
+#define TREE_NONEMPTY(d, name) (!TREE_EMPTY(d, name))
+#define TREE_COUNT(d, name) (d->trees[MTDSWAP_ ## name].count)
+
+#define MTDSWAP_MBD_TO_MTDSWAP(dev) ((struct mtdswap_dev *)dev->priv)
+
+static char partitions[128] = "";
+module_param_string(partitions, partitions, sizeof(partitions), 0444);
+MODULE_PARM_DESC(partitions, "MTD partition numbers to use as swap "
+ "partitions=\"1,3,5\"");
+
+static unsigned int spare_eblocks = 10;
+module_param(spare_eblocks, uint, 0444);
+MODULE_PARM_DESC(spare_eblocks, "Percentage of spare erase blocks for "
+ "garbage collection (default 10%)");
+
+static bool header; /* false */
+module_param(header, bool, 0444);
+MODULE_PARM_DESC(header,
+ "Include builtin swap header (default 0, without header)");
+
+static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background);
+
+static loff_t mtdswap_eb_offset(struct mtdswap_dev *d, struct swap_eb *eb)
+{
+ return (loff_t)(eb - d->eb_data) * d->mtd->erasesize;
+}
+
+static void mtdswap_eb_detach(struct mtdswap_dev *d, struct swap_eb *eb)
+{
+ unsigned int oldidx;
+ struct mtdswap_tree *tp;
+
+ if (eb->root) {
+ tp = container_of(eb->root, struct mtdswap_tree, root);
+ oldidx = tp - &d->trees[0];
+
+ d->trees[oldidx].count--;
+ rb_erase(&eb->rb, eb->root);
+ }
+}
+
+static void __mtdswap_rb_add(struct rb_root *root, struct swap_eb *eb)
+{
+ struct rb_node **p, *parent = NULL;
+ struct swap_eb *cur;
+
+ p = &root->rb_node;
+ while (*p) {
+ parent = *p;
+ cur = rb_entry(parent, struct swap_eb, rb);
+ if (eb->erase_count > cur->erase_count)
+ p = &(*p)->rb_right;
+ else
+ p = &(*p)->rb_left;
+ }
+
+ rb_link_node(&eb->rb, parent, p);
+ rb_insert_color(&eb->rb, root);
+}
+
+static void mtdswap_rb_add(struct mtdswap_dev *d, struct swap_eb *eb, int idx)
+{
+ struct rb_root *root;
+
+ if (eb->root == &d->trees[idx].root)
+ return;
+
+ mtdswap_eb_detach(d, eb);
+ root = &d->trees[idx].root;
+ __mtdswap_rb_add(root, eb);
+ eb->root = root;
+ d->trees[idx].count++;
+}
+
+static struct rb_node *mtdswap_rb_index(struct rb_root *root, unsigned int idx)
+{
+ struct rb_node *p;
+ unsigned int i;
+
+ p = rb_first(root);
+ i = 0;
+ while (i < idx && p) {
+ p = rb_next(p);
+ i++;
+ }
+
+ return p;
+}
+
+static int mtdswap_handle_badblock(struct mtdswap_dev *d, struct swap_eb *eb)
+{
+ int ret;
+ loff_t offset;
+
+ d->spare_eblks--;
+ eb->flags |= EBLOCK_BAD;
+ mtdswap_eb_detach(d, eb);
+ eb->root = NULL;
+
+ /* badblocks not supported */
+ if (!mtd_can_have_bb(d->mtd))
+ return 1;
+
+ offset = mtdswap_eb_offset(d, eb);
+ dev_warn(d->dev, "Marking bad block at %08llx\n", offset);
+ ret = mtd_block_markbad(d->mtd, offset);
+
+ if (ret) {
+ dev_warn(d->dev, "Mark block bad failed for block at %08llx "
+ "error %d\n", offset, ret);
+ return ret;
+ }
+
+ return 1;
+
+}
+
+static int mtdswap_handle_write_error(struct mtdswap_dev *d, struct swap_eb *eb)
+{
+ unsigned int marked = eb->flags & EBLOCK_FAILED;
+ struct swap_eb *curr_write = d->curr_write;
+
+ eb->flags |= EBLOCK_FAILED;
+ if (curr_write == eb) {
+ d->curr_write = NULL;
+
+ if (!marked && d->curr_write_pos != 0) {
+ mtdswap_rb_add(d, eb, MTDSWAP_FAILING);
+ return 0;
+ }
+ }
+
+ return mtdswap_handle_badblock(d, eb);
+}
+
+static int mtdswap_read_oob(struct mtdswap_dev *d, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ int ret = mtd_read_oob(d->mtd, from, ops);
+
+ if (mtd_is_bitflip(ret))
+ return ret;
+
+ if (ret) {
+ dev_warn(d->dev, "Read OOB failed %d for block at %08llx\n",
+ ret, from);
+ return ret;
+ }
+
+ if (ops->oobretlen < ops->ooblen) {
+ dev_warn(d->dev, "Read OOB return short read (%zd bytes not "
+ "%zd) for block at %08llx\n",
+ ops->oobretlen, ops->ooblen, from);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
+{
+ struct mtdswap_oobdata *data, *data2;
+ int ret;
+ loff_t offset;
+ struct mtd_oob_ops ops = { };
+
+ offset = mtdswap_eb_offset(d, eb);
+
+ /* Check first if the block is bad. */
+ if (mtd_can_have_bb(d->mtd) && mtd_block_isbad(d->mtd, offset))
+ return MTDSWAP_SCANNED_BAD;
+
+ ops.ooblen = 2 * d->mtd->oobavail;
+ ops.oobbuf = d->oob_buf;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.mode = MTD_OPS_AUTO_OOB;
+
+ ret = mtdswap_read_oob(d, offset, &ops);
+
+ if (ret && !mtd_is_bitflip(ret))
+ return ret;
+
+ data = (struct mtdswap_oobdata *)d->oob_buf;
+ data2 = (struct mtdswap_oobdata *)
+ (d->oob_buf + d->mtd->oobavail);
+
+ if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) {
+ eb->erase_count = le32_to_cpu(data->count);
+ if (mtd_is_bitflip(ret))
+ ret = MTDSWAP_SCANNED_BITFLIP;
+ else {
+ if (le16_to_cpu(data2->magic) == MTDSWAP_MAGIC_DIRTY)
+ ret = MTDSWAP_SCANNED_DIRTY;
+ else
+ ret = MTDSWAP_SCANNED_CLEAN;
+ }
+ } else {
+ eb->flags |= EBLOCK_NOMAGIC;
+ ret = MTDSWAP_SCANNED_DIRTY;
+ }
+
+ return ret;
+}
+
+static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb,
+ u16 marker)
+{
+ struct mtdswap_oobdata n;
+ int ret;
+ loff_t offset;
+ struct mtd_oob_ops ops = { };
+
+ ops.ooboffs = 0;
+ ops.oobbuf = (uint8_t *)&n;
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.datbuf = NULL;
+
+ if (marker == MTDSWAP_TYPE_CLEAN) {
+ n.magic = cpu_to_le16(MTDSWAP_MAGIC_CLEAN);
+ n.count = cpu_to_le32(eb->erase_count);
+ ops.ooblen = MTDSWAP_OOBSIZE;
+ offset = mtdswap_eb_offset(d, eb);
+ } else {
+ n.magic = cpu_to_le16(MTDSWAP_MAGIC_DIRTY);
+ ops.ooblen = sizeof(n.magic);
+ offset = mtdswap_eb_offset(d, eb) + d->mtd->writesize;
+ }
+
+ ret = mtd_write_oob(d->mtd, offset, &ops);
+
+ if (ret) {
+ dev_warn(d->dev, "Write OOB failed for block at %08llx "
+ "error %d\n", offset, ret);
+ if (ret == -EIO || mtd_is_eccerr(ret))
+ mtdswap_handle_write_error(d, eb);
+ return ret;
+ }
+
+ if (ops.oobretlen != ops.ooblen) {
+ dev_warn(d->dev, "Short OOB write for block at %08llx: "
+ "%zd not %zd\n",
+ offset, ops.oobretlen, ops.ooblen);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Are there any erase blocks without MAGIC_CLEAN header, presumably
+ * because power was cut off after erase but before header write? We
+ * need to guestimate the erase count.
+ */
+static void mtdswap_check_counts(struct mtdswap_dev *d)
+{
+ struct rb_root hist_root = RB_ROOT;
+ struct rb_node *medrb;
+ struct swap_eb *eb;
+ unsigned int i, cnt, median;
+
+ cnt = 0;
+ for (i = 0; i < d->eblks; i++) {
+ eb = d->eb_data + i;
+
+ if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR))
+ continue;
+
+ __mtdswap_rb_add(&hist_root, eb);
+ cnt++;
+ }
+
+ if (cnt == 0)
+ return;
+
+ medrb = mtdswap_rb_index(&hist_root, cnt / 2);
+ median = rb_entry(medrb, struct swap_eb, rb)->erase_count;
+
+ d->max_erase_count = MTDSWAP_ECNT_MAX(&hist_root);
+
+ for (i = 0; i < d->eblks; i++) {
+ eb = d->eb_data + i;
+
+ if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_READERR))
+ eb->erase_count = median;
+
+ if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR))
+ continue;
+
+ rb_erase(&eb->rb, &hist_root);
+ }
+}
+
+static void mtdswap_scan_eblks(struct mtdswap_dev *d)
+{
+ int status;
+ unsigned int i, idx;
+ struct swap_eb *eb;
+
+ for (i = 0; i < d->eblks; i++) {
+ eb = d->eb_data + i;
+
+ status = mtdswap_read_markers(d, eb);
+ if (status < 0)
+ eb->flags |= EBLOCK_READERR;
+ else if (status == MTDSWAP_SCANNED_BAD) {
+ eb->flags |= EBLOCK_BAD;
+ continue;
+ }
+
+ switch (status) {
+ case MTDSWAP_SCANNED_CLEAN:
+ idx = MTDSWAP_CLEAN;
+ break;
+ case MTDSWAP_SCANNED_DIRTY:
+ case MTDSWAP_SCANNED_BITFLIP:
+ idx = MTDSWAP_DIRTY;
+ break;
+ default:
+ idx = MTDSWAP_FAILING;
+ }
+
+ eb->flags |= (idx << EBLOCK_IDX_SHIFT);
+ }
+
+ mtdswap_check_counts(d);
+
+ for (i = 0; i < d->eblks; i++) {
+ eb = d->eb_data + i;
+
+ if (eb->flags & EBLOCK_BAD)
+ continue;
+
+ idx = eb->flags >> EBLOCK_IDX_SHIFT;
+ mtdswap_rb_add(d, eb, idx);
+ }
+}
+
+/*
+ * Place eblk into a tree corresponding to its number of active blocks
+ * it contains.
+ */
+static void mtdswap_store_eb(struct mtdswap_dev *d, struct swap_eb *eb)
+{
+ unsigned int weight = eb->active_count;
+ unsigned int maxweight = d->pages_per_eblk;
+
+ if (eb == d->curr_write)
+ return;
+
+ if (eb->flags & EBLOCK_BITFLIP)
+ mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP);
+ else if (eb->flags & (EBLOCK_READERR | EBLOCK_FAILED))
+ mtdswap_rb_add(d, eb, MTDSWAP_FAILING);
+ if (weight == maxweight)
+ mtdswap_rb_add(d, eb, MTDSWAP_USED);
+ else if (weight == 0)
+ mtdswap_rb_add(d, eb, MTDSWAP_DIRTY);
+ else if (weight > (maxweight/2))
+ mtdswap_rb_add(d, eb, MTDSWAP_LOWFRAG);
+ else
+ mtdswap_rb_add(d, eb, MTDSWAP_HIFRAG);
+}
+
+static int mtdswap_erase_block(struct mtdswap_dev *d, struct swap_eb *eb)
+{
+ struct mtd_info *mtd = d->mtd;
+ struct erase_info erase;
+ unsigned int retries = 0;
+ int ret;
+
+ eb->erase_count++;
+ if (eb->erase_count > d->max_erase_count)
+ d->max_erase_count = eb->erase_count;
+
+retry:
+ memset(&erase, 0, sizeof(struct erase_info));
+ erase.addr = mtdswap_eb_offset(d, eb);
+ erase.len = mtd->erasesize;
+
+ ret = mtd_erase(mtd, &erase);
+ if (ret) {
+ if (retries++ < MTDSWAP_ERASE_RETRIES) {
+ dev_warn(d->dev,
+ "erase of erase block %#llx on %s failed",
+ erase.addr, mtd->name);
+ yield();
+ goto retry;
+ }
+
+ dev_err(d->dev, "Cannot erase erase block %#llx on %s\n",
+ erase.addr, mtd->name);
+
+ mtdswap_handle_badblock(d, eb);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mtdswap_map_free_block(struct mtdswap_dev *d, unsigned int page,
+ unsigned int *block)
+{
+ int ret;
+ struct swap_eb *old_eb = d->curr_write;
+ struct rb_root *clean_root;
+ struct swap_eb *eb;
+
+ if (old_eb == NULL || d->curr_write_pos >= d->pages_per_eblk) {
+ do {
+ if (TREE_EMPTY(d, CLEAN))
+ return -ENOSPC;
+
+ clean_root = TREE_ROOT(d, CLEAN);
+ eb = rb_entry(rb_first(clean_root), struct swap_eb, rb);
+ rb_erase(&eb->rb, clean_root);
+ eb->root = NULL;
+ TREE_COUNT(d, CLEAN)--;
+
+ ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_DIRTY);
+ } while (ret == -EIO || mtd_is_eccerr(ret));
+
+ if (ret)
+ return ret;
+
+ d->curr_write_pos = 0;
+ d->curr_write = eb;
+ if (old_eb)
+ mtdswap_store_eb(d, old_eb);
+ }
+
+ *block = (d->curr_write - d->eb_data) * d->pages_per_eblk +
+ d->curr_write_pos;
+
+ d->curr_write->active_count++;
+ d->revmap[*block] = page;
+ d->curr_write_pos++;
+
+ return 0;
+}
+
+static unsigned int mtdswap_free_page_cnt(struct mtdswap_dev *d)
+{
+ return TREE_COUNT(d, CLEAN) * d->pages_per_eblk +
+ d->pages_per_eblk - d->curr_write_pos;
+}
+
+static unsigned int mtdswap_enough_free_pages(struct mtdswap_dev *d)
+{
+ return mtdswap_free_page_cnt(d) > d->pages_per_eblk;
+}
+
+static int mtdswap_write_block(struct mtdswap_dev *d, char *buf,
+ unsigned int page, unsigned int *bp, int gc_context)
+{
+ struct mtd_info *mtd = d->mtd;
+ struct swap_eb *eb;
+ size_t retlen;
+ loff_t writepos;
+ int ret;
+
+retry:
+ if (!gc_context)
+ while (!mtdswap_enough_free_pages(d))
+ if (mtdswap_gc(d, 0) > 0)
+ return -ENOSPC;
+
+ ret = mtdswap_map_free_block(d, page, bp);
+ eb = d->eb_data + (*bp / d->pages_per_eblk);
+
+ if (ret == -EIO || mtd_is_eccerr(ret)) {
+ d->curr_write = NULL;
+ eb->active_count--;
+ d->revmap[*bp] = PAGE_UNDEF;
+ goto retry;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ writepos = (loff_t)*bp << PAGE_SHIFT;
+ ret = mtd_write(mtd, writepos, PAGE_SIZE, &retlen, buf);
+ if (ret == -EIO || mtd_is_eccerr(ret)) {
+ d->curr_write_pos--;
+ eb->active_count--;
+ d->revmap[*bp] = PAGE_UNDEF;
+ mtdswap_handle_write_error(d, eb);
+ goto retry;
+ }
+
+ if (ret < 0) {
+ dev_err(d->dev, "Write to MTD device failed: %d (%zd written)",
+ ret, retlen);
+ goto err;
+ }
+
+ if (retlen != PAGE_SIZE) {
+ dev_err(d->dev, "Short write to MTD device: %zd written",
+ retlen);
+ ret = -EIO;
+ goto err;
+ }
+
+ return ret;
+
+err:
+ d->curr_write_pos--;
+ eb->active_count--;
+ d->revmap[*bp] = PAGE_UNDEF;
+
+ return ret;
+}
+
+static int mtdswap_move_block(struct mtdswap_dev *d, unsigned int oldblock,
+ unsigned int *newblock)
+{
+ struct mtd_info *mtd = d->mtd;
+ struct swap_eb *eb, *oldeb;
+ int ret;
+ size_t retlen;
+ unsigned int page, retries;
+ loff_t readpos;
+
+ page = d->revmap[oldblock];
+ readpos = (loff_t) oldblock << PAGE_SHIFT;
+ retries = 0;
+
+retry:
+ ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf);
+
+ if (ret < 0 && !mtd_is_bitflip(ret)) {
+ oldeb = d->eb_data + oldblock / d->pages_per_eblk;
+ oldeb->flags |= EBLOCK_READERR;
+
+ dev_err(d->dev, "Read Error: %d (block %u)\n", ret,
+ oldblock);
+ retries++;
+ if (retries < MTDSWAP_IO_RETRIES)
+ goto retry;
+
+ goto read_error;
+ }
+
+ if (retlen != PAGE_SIZE) {
+ dev_err(d->dev, "Short read: %zd (block %u)\n", retlen,
+ oldblock);
+ ret = -EIO;
+ goto read_error;
+ }
+
+ ret = mtdswap_write_block(d, d->page_buf, page, newblock, 1);
+ if (ret < 0) {
+ d->page_data[page] = BLOCK_ERROR;
+ dev_err(d->dev, "Write error: %d\n", ret);
+ return ret;
+ }
+
+ d->page_data[page] = *newblock;
+ d->revmap[oldblock] = PAGE_UNDEF;
+ eb = d->eb_data + oldblock / d->pages_per_eblk;
+ eb->active_count--;
+
+ return 0;
+
+read_error:
+ d->page_data[page] = BLOCK_ERROR;
+ d->revmap[oldblock] = PAGE_UNDEF;
+ return ret;
+}
+
+static int mtdswap_gc_eblock(struct mtdswap_dev *d, struct swap_eb *eb)
+{
+ unsigned int i, block, eblk_base, newblock;
+ int ret, errcode;
+
+ errcode = 0;
+ eblk_base = (eb - d->eb_data) * d->pages_per_eblk;
+
+ for (i = 0; i < d->pages_per_eblk; i++) {
+ if (d->spare_eblks < MIN_SPARE_EBLOCKS)
+ return -ENOSPC;
+
+ block = eblk_base + i;
+ if (d->revmap[block] == PAGE_UNDEF)
+ continue;
+
+ ret = mtdswap_move_block(d, block, &newblock);
+ if (ret < 0 && !errcode)
+ errcode = ret;
+ }
+
+ return errcode;
+}
+
+static int __mtdswap_choose_gc_tree(struct mtdswap_dev *d)
+{
+ int idx, stopat;
+
+ if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_THRESHOLD)
+ stopat = MTDSWAP_LOWFRAG;
+ else
+ stopat = MTDSWAP_HIFRAG;
+
+ for (idx = MTDSWAP_BITFLIP; idx >= stopat; idx--)
+ if (d->trees[idx].root.rb_node != NULL)
+ return idx;
+
+ return -1;
+}
+
+static int mtdswap_wlfreq(unsigned int maxdiff)
+{
+ unsigned int h, x, y, dist, base;
+
+ /*
+ * Calculate linear ramp down from f1 to f2 when maxdiff goes from
+ * MAX_ERASE_DIFF to MAX_ERASE_DIFF + COLLECT_NONDIRTY_BASE. Similar
+ * to triangle with height f1 - f1 and width COLLECT_NONDIRTY_BASE.
+ */
+
+ dist = maxdiff - MAX_ERASE_DIFF;
+ if (dist > COLLECT_NONDIRTY_BASE)
+ dist = COLLECT_NONDIRTY_BASE;
+
+ /*
+ * Modelling the slop as right angular triangle with base
+ * COLLECT_NONDIRTY_BASE and height freq1 - freq2. The ratio y/x is
+ * equal to the ratio h/base.
+ */
+ h = COLLECT_NONDIRTY_FREQ1 - COLLECT_NONDIRTY_FREQ2;
+ base = COLLECT_NONDIRTY_BASE;
+
+ x = dist - base;
+ y = (x * h + base / 2) / base;
+
+ return COLLECT_NONDIRTY_FREQ2 + y;
+}
+
+static int mtdswap_choose_wl_tree(struct mtdswap_dev *d)
+{
+ static unsigned int pick_cnt;
+ unsigned int i, idx = -1, wear, max;
+ struct rb_root *root;
+
+ max = 0;
+ for (i = 0; i <= MTDSWAP_DIRTY; i++) {
+ root = &d->trees[i].root;
+ if (root->rb_node == NULL)
+ continue;
+
+ wear = d->max_erase_count - MTDSWAP_ECNT_MIN(root);
+ if (wear > max) {
+ max = wear;
+ idx = i;
+ }
+ }
+
+ if (max > MAX_ERASE_DIFF && pick_cnt >= mtdswap_wlfreq(max) - 1) {
+ pick_cnt = 0;
+ return idx;
+ }
+
+ pick_cnt++;
+ return -1;
+}
+
+static int mtdswap_choose_gc_tree(struct mtdswap_dev *d,
+ unsigned int background)
+{
+ int idx;
+
+ if (TREE_NONEMPTY(d, FAILING) &&
+ (background || (TREE_EMPTY(d, CLEAN) && TREE_EMPTY(d, DIRTY))))
+ return MTDSWAP_FAILING;
+
+ idx = mtdswap_choose_wl_tree(d);
+ if (idx >= MTDSWAP_CLEAN)
+ return idx;
+
+ return __mtdswap_choose_gc_tree(d);
+}
+
+static struct swap_eb *mtdswap_pick_gc_eblk(struct mtdswap_dev *d,
+ unsigned int background)
+{
+ struct rb_root *rp = NULL;
+ struct swap_eb *eb = NULL;
+ int idx;
+
+ if (background && TREE_COUNT(d, CLEAN) > CLEAN_BLOCK_THRESHOLD &&
+ TREE_EMPTY(d, DIRTY) && TREE_EMPTY(d, FAILING))
+ return NULL;
+
+ idx = mtdswap_choose_gc_tree(d, background);
+ if (idx < 0)
+ return NULL;
+
+ rp = &d->trees[idx].root;
+ eb = rb_entry(rb_first(rp), struct swap_eb, rb);
+
+ rb_erase(&eb->rb, rp);
+ eb->root = NULL;
+ d->trees[idx].count--;
+ return eb;
+}
+
+static unsigned int mtdswap_test_patt(unsigned int i)
+{
+ return i % 2 ? 0x55555555 : 0xAAAAAAAA;
+}
+
+static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
+ struct swap_eb *eb)
+{
+ struct mtd_info *mtd = d->mtd;
+ unsigned int test, i, j, patt, mtd_pages;
+ loff_t base, pos;
+ unsigned int *p1 = (unsigned int *)d->page_buf;
+ unsigned char *p2 = (unsigned char *)d->oob_buf;
+ struct mtd_oob_ops ops = { };
+ int ret;
+
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = mtd->writesize;
+ ops.ooblen = mtd->oobavail;
+ ops.ooboffs = 0;
+ ops.datbuf = d->page_buf;
+ ops.oobbuf = d->oob_buf;
+ base = mtdswap_eb_offset(d, eb);
+ mtd_pages = d->pages_per_eblk * PAGE_SIZE / mtd->writesize;
+
+ for (test = 0; test < 2; test++) {
+ pos = base;
+ for (i = 0; i < mtd_pages; i++) {
+ patt = mtdswap_test_patt(test + i);
+ memset(d->page_buf, patt, mtd->writesize);
+ memset(d->oob_buf, patt, mtd->oobavail);
+ ret = mtd_write_oob(mtd, pos, &ops);
+ if (ret)
+ goto error;
+
+ pos += mtd->writesize;
+ }
+
+ pos = base;
+ for (i = 0; i < mtd_pages; i++) {
+ ret = mtd_read_oob(mtd, pos, &ops);
+ if (ret)
+ goto error;
+
+ patt = mtdswap_test_patt(test + i);
+ for (j = 0; j < mtd->writesize/sizeof(int); j++)
+ if (p1[j] != patt)
+ goto error;
+
+ for (j = 0; j < mtd->oobavail; j++)
+ if (p2[j] != (unsigned char)patt)
+ goto error;
+
+ pos += mtd->writesize;
+ }
+
+ ret = mtdswap_erase_block(d, eb);
+ if (ret)
+ goto error;
+ }
+
+ eb->flags &= ~EBLOCK_READERR;
+ return 1;
+
+error:
+ mtdswap_handle_badblock(d, eb);
+ return 0;
+}
+
+static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background)
+{
+ struct swap_eb *eb;
+ int ret;
+
+ if (d->spare_eblks < MIN_SPARE_EBLOCKS)
+ return 1;
+
+ eb = mtdswap_pick_gc_eblk(d, background);
+ if (!eb)
+ return 1;
+
+ ret = mtdswap_gc_eblock(d, eb);
+ if (ret == -ENOSPC)
+ return 1;
+
+ if (eb->flags & EBLOCK_FAILED) {
+ mtdswap_handle_badblock(d, eb);
+ return 0;
+ }
+
+ eb->flags &= ~EBLOCK_BITFLIP;
+ ret = mtdswap_erase_block(d, eb);
+ if ((eb->flags & EBLOCK_READERR) &&
+ (ret || !mtdswap_eblk_passes(d, eb)))
+ return 0;
+
+ if (ret == 0)
+ ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_CLEAN);
+
+ if (ret == 0)
+ mtdswap_rb_add(d, eb, MTDSWAP_CLEAN);
+ else if (ret != -EIO && !mtd_is_eccerr(ret))
+ mtdswap_rb_add(d, eb, MTDSWAP_DIRTY);
+
+ return 0;
+}
+
+static void mtdswap_background(struct mtd_blktrans_dev *dev)
+{
+ struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
+ int ret;
+
+ while (1) {
+ ret = mtdswap_gc(d, 1);
+ if (ret || mtd_blktrans_cease_background(dev))
+ return;
+ }
+}
+
+static void mtdswap_cleanup(struct mtdswap_dev *d)
+{
+ vfree(d->eb_data);
+ vfree(d->revmap);
+ vfree(d->page_data);
+ kfree(d->oob_buf);
+ kfree(d->page_buf);
+}
+
+static int mtdswap_flush(struct mtd_blktrans_dev *dev)
+{
+ struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
+
+ mtd_sync(d->mtd);
+ return 0;
+}
+
+static unsigned int mtdswap_badblocks(struct mtd_info *mtd, uint64_t size)
+{
+ loff_t offset;
+ unsigned int badcnt;
+
+ badcnt = 0;
+
+ if (mtd_can_have_bb(mtd))
+ for (offset = 0; offset < size; offset += mtd->erasesize)
+ if (mtd_block_isbad(mtd, offset))
+ badcnt++;
+
+ return badcnt;
+}
+
+static int mtdswap_writesect(struct mtd_blktrans_dev *dev,
+ unsigned long page, char *buf)
+{
+ struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
+ unsigned int newblock, mapped;
+ struct swap_eb *eb;
+ int ret;
+
+ d->sect_write_count++;
+
+ if (d->spare_eblks < MIN_SPARE_EBLOCKS)
+ return -ENOSPC;
+
+ if (header) {
+ /* Ignore writes to the header page */
+ if (unlikely(page == 0))
+ return 0;
+
+ page--;
+ }
+
+ mapped = d->page_data[page];
+ if (mapped <= BLOCK_MAX) {
+ eb = d->eb_data + (mapped / d->pages_per_eblk);
+ eb->active_count--;
+ mtdswap_store_eb(d, eb);
+ d->page_data[page] = BLOCK_UNDEF;
+ d->revmap[mapped] = PAGE_UNDEF;
+ }
+
+ ret = mtdswap_write_block(d, buf, page, &newblock, 0);
+ d->mtd_write_count++;
+
+ if (ret < 0)
+ return ret;
+
+ d->page_data[page] = newblock;
+
+ return 0;
+}
+
+/* Provide a dummy swap header for the kernel */
+static int mtdswap_auto_header(struct mtdswap_dev *d, char *buf)
+{
+ union swap_header *hd = (union swap_header *)(buf);
+
+ memset(buf, 0, PAGE_SIZE - 10);
+
+ hd->info.version = 1;
+ hd->info.last_page = d->mbd_dev->size - 1;
+ hd->info.nr_badpages = 0;
+
+ memcpy(buf + PAGE_SIZE - 10, "SWAPSPACE2", 10);
+
+ return 0;
+}
+
+static int mtdswap_readsect(struct mtd_blktrans_dev *dev,
+ unsigned long page, char *buf)
+{
+ struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
+ struct mtd_info *mtd = d->mtd;
+ unsigned int realblock, retries;
+ loff_t readpos;
+ struct swap_eb *eb;
+ size_t retlen;
+ int ret;
+
+ d->sect_read_count++;
+
+ if (header) {
+ if (unlikely(page == 0))
+ return mtdswap_auto_header(d, buf);
+
+ page--;
+ }
+
+ realblock = d->page_data[page];
+ if (realblock > BLOCK_MAX) {
+ memset(buf, 0x0, PAGE_SIZE);
+ if (realblock == BLOCK_UNDEF)
+ return 0;
+ else
+ return -EIO;
+ }
+
+ eb = d->eb_data + (realblock / d->pages_per_eblk);
+ BUG_ON(d->revmap[realblock] == PAGE_UNDEF);
+
+ readpos = (loff_t)realblock << PAGE_SHIFT;
+ retries = 0;
+
+retry:
+ ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, buf);
+
+ d->mtd_read_count++;
+ if (mtd_is_bitflip(ret)) {
+ eb->flags |= EBLOCK_BITFLIP;
+ mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP);
+ ret = 0;
+ }
+
+ if (ret < 0) {
+ dev_err(d->dev, "Read error %d\n", ret);
+ eb->flags |= EBLOCK_READERR;
+ mtdswap_rb_add(d, eb, MTDSWAP_FAILING);
+ retries++;
+ if (retries < MTDSWAP_IO_RETRIES)
+ goto retry;
+
+ return ret;
+ }
+
+ if (retlen != PAGE_SIZE) {
+ dev_err(d->dev, "Short read %zd\n", retlen);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mtdswap_discard(struct mtd_blktrans_dev *dev, unsigned long first,
+ unsigned nr_pages)
+{
+ struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
+ unsigned long page;
+ struct swap_eb *eb;
+ unsigned int mapped;
+
+ d->discard_count++;
+
+ for (page = first; page < first + nr_pages; page++) {
+ mapped = d->page_data[page];
+ if (mapped <= BLOCK_MAX) {
+ eb = d->eb_data + (mapped / d->pages_per_eblk);
+ eb->active_count--;
+ mtdswap_store_eb(d, eb);
+ d->page_data[page] = BLOCK_UNDEF;
+ d->revmap[mapped] = PAGE_UNDEF;
+ d->discard_page_count++;
+ } else if (mapped == BLOCK_ERROR) {
+ d->page_data[page] = BLOCK_UNDEF;
+ d->discard_page_count++;
+ }
+ }
+
+ return 0;
+}
+
+static int mtdswap_show(struct seq_file *s, void *data)
+{
+ struct mtdswap_dev *d = (struct mtdswap_dev *) s->private;
+ unsigned long sum;
+ unsigned int count[MTDSWAP_TREE_CNT];
+ unsigned int min[MTDSWAP_TREE_CNT];
+ unsigned int max[MTDSWAP_TREE_CNT];
+ unsigned int i, cw = 0, cwp = 0, cwecount = 0, bb_cnt, mapped, pages;
+ uint64_t use_size;
+ static const char * const name[] = {
+ "clean", "used", "low", "high", "dirty", "bitflip", "failing"
+ };
+
+ mutex_lock(&d->mbd_dev->lock);
+
+ for (i = 0; i < MTDSWAP_TREE_CNT; i++) {
+ struct rb_root *root = &d->trees[i].root;
+
+ if (root->rb_node) {
+ count[i] = d->trees[i].count;
+ min[i] = MTDSWAP_ECNT_MIN(root);
+ max[i] = MTDSWAP_ECNT_MAX(root);
+ } else
+ count[i] = 0;
+ }
+
+ if (d->curr_write) {
+ cw = 1;
+ cwp = d->curr_write_pos;
+ cwecount = d->curr_write->erase_count;
+ }
+
+ sum = 0;
+ for (i = 0; i < d->eblks; i++)
+ sum += d->eb_data[i].erase_count;
+
+ use_size = (uint64_t)d->eblks * d->mtd->erasesize;
+ bb_cnt = mtdswap_badblocks(d->mtd, use_size);
+
+ mapped = 0;
+ pages = d->mbd_dev->size;
+ for (i = 0; i < pages; i++)
+ if (d->page_data[i] != BLOCK_UNDEF)
+ mapped++;
+
+ mutex_unlock(&d->mbd_dev->lock);
+
+ for (i = 0; i < MTDSWAP_TREE_CNT; i++) {
+ if (!count[i])
+ continue;
+
+ if (min[i] != max[i])
+ seq_printf(s, "%s:\t%5d erase blocks, erased min %d, "
+ "max %d times\n",
+ name[i], count[i], min[i], max[i]);
+ else
+ seq_printf(s, "%s:\t%5d erase blocks, all erased %d "
+ "times\n", name[i], count[i], min[i]);
+ }
+
+ if (bb_cnt)
+ seq_printf(s, "bad:\t%5u erase blocks\n", bb_cnt);
+
+ if (cw)
+ seq_printf(s, "current erase block: %u pages used, %u free, "
+ "erased %u times\n",
+ cwp, d->pages_per_eblk - cwp, cwecount);
+
+ seq_printf(s, "total erasures: %lu\n", sum);
+
+ seq_puts(s, "\n");
+
+ seq_printf(s, "mtdswap_readsect count: %llu\n", d->sect_read_count);
+ seq_printf(s, "mtdswap_writesect count: %llu\n", d->sect_write_count);
+ seq_printf(s, "mtdswap_discard count: %llu\n", d->discard_count);
+ seq_printf(s, "mtd read count: %llu\n", d->mtd_read_count);
+ seq_printf(s, "mtd write count: %llu\n", d->mtd_write_count);
+ seq_printf(s, "discarded pages count: %llu\n", d->discard_page_count);
+
+ seq_puts(s, "\n");
+ seq_printf(s, "total pages: %u\n", pages);
+ seq_printf(s, "pages mapped: %u\n", mapped);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(mtdswap);
+
+static int mtdswap_add_debugfs(struct mtdswap_dev *d)
+{
+ struct dentry *root = d->mtd->dbg.dfs_dir;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+
+ if (IS_ERR_OR_NULL(root))
+ return -1;
+
+ debugfs_create_file("mtdswap_stats", S_IRUSR, root, d, &mtdswap_fops);
+
+ return 0;
+}
+
+static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks,
+ unsigned int spare_cnt)
+{
+ struct mtd_info *mtd = d->mbd_dev->mtd;
+ unsigned int i, eblk_bytes, pages, blocks;
+ int ret = -ENOMEM;
+
+ d->mtd = mtd;
+ d->eblks = eblocks;
+ d->spare_eblks = spare_cnt;
+ d->pages_per_eblk = mtd->erasesize >> PAGE_SHIFT;
+
+ pages = d->mbd_dev->size;
+ blocks = eblocks * d->pages_per_eblk;
+
+ for (i = 0; i < MTDSWAP_TREE_CNT; i++)
+ d->trees[i].root = RB_ROOT;
+
+ d->page_data = vmalloc(array_size(pages, sizeof(int)));
+ if (!d->page_data)
+ goto page_data_fail;
+
+ d->revmap = vmalloc(array_size(blocks, sizeof(int)));
+ if (!d->revmap)
+ goto revmap_fail;
+
+ eblk_bytes = sizeof(struct swap_eb)*d->eblks;
+ d->eb_data = vzalloc(eblk_bytes);
+ if (!d->eb_data)
+ goto eb_data_fail;
+
+ for (i = 0; i < pages; i++)
+ d->page_data[i] = BLOCK_UNDEF;
+
+ for (i = 0; i < blocks; i++)
+ d->revmap[i] = PAGE_UNDEF;
+
+ d->page_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!d->page_buf)
+ goto page_buf_fail;
+
+ d->oob_buf = kmalloc_array(2, mtd->oobavail, GFP_KERNEL);
+ if (!d->oob_buf)
+ goto oob_buf_fail;
+
+ mtdswap_scan_eblks(d);
+
+ return 0;
+
+oob_buf_fail:
+ kfree(d->page_buf);
+page_buf_fail:
+ vfree(d->eb_data);
+eb_data_fail:
+ vfree(d->revmap);
+revmap_fail:
+ vfree(d->page_data);
+page_data_fail:
+ printk(KERN_ERR "%s: init failed (%d)\n", MTDSWAP_PREFIX, ret);
+ return ret;
+}
+
+static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+{
+ struct mtdswap_dev *d;
+ struct mtd_blktrans_dev *mbd_dev;
+ char *parts;
+ char *this_opt;
+ unsigned long part;
+ unsigned int eblocks, eavailable, bad_blocks, spare_cnt;
+ uint64_t swap_size, use_size, size_limit;
+ int ret;
+
+ parts = &partitions[0];
+ if (!*parts)
+ return;
+
+ while ((this_opt = strsep(&parts, ",")) != NULL) {
+ if (kstrtoul(this_opt, 0, &part) < 0)
+ return;
+
+ if (mtd->index == part)
+ break;
+ }
+
+ if (mtd->index != part)
+ return;
+
+ if (mtd->erasesize < PAGE_SIZE || mtd->erasesize % PAGE_SIZE) {
+ printk(KERN_ERR "%s: Erase size %u not multiple of PAGE_SIZE "
+ "%lu\n", MTDSWAP_PREFIX, mtd->erasesize, PAGE_SIZE);
+ return;
+ }
+
+ if (PAGE_SIZE % mtd->writesize || mtd->writesize > PAGE_SIZE) {
+ printk(KERN_ERR "%s: PAGE_SIZE %lu not multiple of write size"
+ " %u\n", MTDSWAP_PREFIX, PAGE_SIZE, mtd->writesize);
+ return;
+ }
+
+ if (!mtd->oobsize || mtd->oobavail < MTDSWAP_OOBSIZE) {
+ printk(KERN_ERR "%s: Not enough free bytes in OOB, "
+ "%d available, %zu needed.\n",
+ MTDSWAP_PREFIX, mtd->oobavail, MTDSWAP_OOBSIZE);
+ return;
+ }
+
+ if (spare_eblocks > 100)
+ spare_eblocks = 100;
+
+ use_size = mtd->size;
+ size_limit = (uint64_t) BLOCK_MAX * PAGE_SIZE;
+
+ if (mtd->size > size_limit) {
+ printk(KERN_WARNING "%s: Device too large. Limiting size to "
+ "%llu bytes\n", MTDSWAP_PREFIX, size_limit);
+ use_size = size_limit;
+ }
+
+ eblocks = mtd_div_by_eb(use_size, mtd);
+ use_size = (uint64_t)eblocks * mtd->erasesize;
+ bad_blocks = mtdswap_badblocks(mtd, use_size);
+ eavailable = eblocks - bad_blocks;
+
+ if (eavailable < MIN_ERASE_BLOCKS) {
+ printk(KERN_ERR "%s: Not enough erase blocks. %u available, "
+ "%d needed\n", MTDSWAP_PREFIX, eavailable,
+ MIN_ERASE_BLOCKS);
+ return;
+ }
+
+ spare_cnt = div_u64((uint64_t)eavailable * spare_eblocks, 100);
+
+ if (spare_cnt < MIN_SPARE_EBLOCKS)
+ spare_cnt = MIN_SPARE_EBLOCKS;
+
+ if (spare_cnt > eavailable - 1)
+ spare_cnt = eavailable - 1;
+
+ swap_size = (uint64_t)(eavailable - spare_cnt) * mtd->erasesize +
+ (header ? PAGE_SIZE : 0);
+
+ printk(KERN_INFO "%s: Enabling MTD swap on device %lu, size %llu KB, "
+ "%u spare, %u bad blocks\n",
+ MTDSWAP_PREFIX, part, swap_size / 1024, spare_cnt, bad_blocks);
+
+ d = kzalloc(sizeof(struct mtdswap_dev), GFP_KERNEL);
+ if (!d)
+ return;
+
+ mbd_dev = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL);
+ if (!mbd_dev) {
+ kfree(d);
+ return;
+ }
+
+ d->mbd_dev = mbd_dev;
+ mbd_dev->priv = d;
+
+ mbd_dev->mtd = mtd;
+ mbd_dev->devnum = mtd->index;
+ mbd_dev->size = swap_size >> PAGE_SHIFT;
+ mbd_dev->tr = tr;
+
+ if (!(mtd->flags & MTD_WRITEABLE))
+ mbd_dev->readonly = 1;
+
+ if (mtdswap_init(d, eblocks, spare_cnt) < 0)
+ goto init_failed;
+
+ if (add_mtd_blktrans_dev(mbd_dev) < 0)
+ goto cleanup;
+
+ d->dev = disk_to_dev(mbd_dev->disk);
+
+ ret = mtdswap_add_debugfs(d);
+ if (ret < 0)
+ goto debugfs_failed;
+
+ return;
+
+debugfs_failed:
+ del_mtd_blktrans_dev(mbd_dev);
+
+cleanup:
+ mtdswap_cleanup(d);
+
+init_failed:
+ kfree(mbd_dev);
+ kfree(d);
+}
+
+static void mtdswap_remove_dev(struct mtd_blktrans_dev *dev)
+{
+ struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
+
+ del_mtd_blktrans_dev(dev);
+ mtdswap_cleanup(d);
+ kfree(d);
+}
+
+static struct mtd_blktrans_ops mtdswap_ops = {
+ .name = "mtdswap",
+ .major = 0,
+ .part_bits = 0,
+ .blksize = PAGE_SIZE,
+ .flush = mtdswap_flush,
+ .readsect = mtdswap_readsect,
+ .writesect = mtdswap_writesect,
+ .discard = mtdswap_discard,
+ .background = mtdswap_background,
+ .add_mtd = mtdswap_add_mtd,
+ .remove_dev = mtdswap_remove_dev,
+ .owner = THIS_MODULE,
+};
+
+module_mtd_blktrans(mtdswap_ops);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
+MODULE_DESCRIPTION("Block device access to an MTD suitable for using as "
+ "swap space");
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
new file mode 100644
index 0000000000..5b0c2c95f1
--- /dev/null
+++ b/drivers/mtd/nand/Kconfig
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+menu "NAND"
+
+config MTD_NAND_CORE
+ tristate
+
+source "drivers/mtd/nand/onenand/Kconfig"
+source "drivers/mtd/nand/raw/Kconfig"
+source "drivers/mtd/nand/spi/Kconfig"
+
+menu "ECC engine support"
+
+config MTD_NAND_ECC
+ bool
+ select MTD_NAND_CORE
+
+config MTD_NAND_ECC_SW_HAMMING
+ bool "Software Hamming ECC engine"
+ default y if MTD_RAW_NAND
+ select MTD_NAND_ECC
+ help
+ This enables support for software Hamming error
+ correction. This correction can correct up to 1 bit error
+ per chunk and detect up to 2 bit errors. While it used to be
+ widely used with old parts, newer NAND chips usually require
+ more strength correction and in this case BCH or RS will be
+ preferred.
+
+config MTD_NAND_ECC_SW_HAMMING_SMC
+ bool "NAND ECC Smart Media byte order"
+ depends on MTD_NAND_ECC_SW_HAMMING
+ default n
+ help
+ Software ECC according to the Smart Media Specification.
+ The original Linux implementation had byte 0 and 1 swapped.
+
+config MTD_NAND_ECC_SW_BCH
+ bool "Software BCH ECC engine"
+ select BCH
+ select MTD_NAND_ECC
+ default n
+ help
+ This enables support for software BCH error correction. Binary BCH
+ codes are more powerful and cpu intensive than traditional Hamming
+ ECC codes. They are used with NAND devices requiring more than 1 bit
+ of error correction.
+
+config MTD_NAND_ECC_MXIC
+ bool "Macronix external hardware ECC engine"
+ depends on HAS_IOMEM
+ select MTD_NAND_ECC
+ help
+ This enables support for the hardware ECC engine from Macronix.
+
+config MTD_NAND_ECC_MEDIATEK
+ tristate "Mediatek hardware ECC engine"
+ depends on HAS_IOMEM
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select MTD_NAND_ECC
+ help
+ This enables support for the hardware ECC engine from Mediatek.
+
+endmenu
+
+endmenu
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
new file mode 100644
index 0000000000..19e1291ac4
--- /dev/null
+++ b/drivers/mtd/nand/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+
+nandcore-objs := core.o bbt.o
+obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
+obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o
+
+obj-y += onenand/
+obj-y += raw/
+obj-y += spi/
+
+nandcore-$(CONFIG_MTD_NAND_ECC) += ecc.o
+nandcore-$(CONFIG_MTD_NAND_ECC_SW_HAMMING) += ecc-sw-hamming.o
+nandcore-$(CONFIG_MTD_NAND_ECC_SW_BCH) += ecc-sw-bch.o
+nandcore-$(CONFIG_MTD_NAND_ECC_MXIC) += ecc-mxic.o
diff --git a/drivers/mtd/nand/bbt.c b/drivers/mtd/nand/bbt.c
new file mode 100644
index 0000000000..db4f93a903
--- /dev/null
+++ b/drivers/mtd/nand/bbt.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017 Free Electrons
+ *
+ * Authors:
+ * Boris Brezillon <boris.brezillon@free-electrons.com>
+ * Peter Pan <peterpandong@micron.com>
+ */
+
+#define pr_fmt(fmt) "nand-bbt: " fmt
+
+#include <linux/mtd/nand.h>
+#include <linux/slab.h>
+
+/**
+ * nanddev_bbt_init() - Initialize the BBT (Bad Block Table)
+ * @nand: NAND device
+ *
+ * Initialize the in-memory BBT.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int nanddev_bbt_init(struct nand_device *nand)
+{
+ unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
+ unsigned int nblocks = nanddev_neraseblocks(nand);
+
+ nand->bbt.cache = bitmap_zalloc(nblocks * bits_per_block, GFP_KERNEL);
+ if (!nand->bbt.cache)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nanddev_bbt_init);
+
+/**
+ * nanddev_bbt_cleanup() - Cleanup the BBT (Bad Block Table)
+ * @nand: NAND device
+ *
+ * Undoes what has been done in nanddev_bbt_init()
+ */
+void nanddev_bbt_cleanup(struct nand_device *nand)
+{
+ bitmap_free(nand->bbt.cache);
+}
+EXPORT_SYMBOL_GPL(nanddev_bbt_cleanup);
+
+/**
+ * nanddev_bbt_update() - Update a BBT
+ * @nand: nand device
+ *
+ * Update the BBT. Currently a NOP function since on-flash bbt is not yet
+ * supported.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int nanddev_bbt_update(struct nand_device *nand)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nanddev_bbt_update);
+
+/**
+ * nanddev_bbt_get_block_status() - Return the status of an eraseblock
+ * @nand: nand device
+ * @entry: the BBT entry
+ *
+ * Return: a positive number nand_bbt_block_status status or -%ERANGE if @entry
+ * is bigger than the BBT size.
+ */
+int nanddev_bbt_get_block_status(const struct nand_device *nand,
+ unsigned int entry)
+{
+ unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
+ unsigned long *pos = nand->bbt.cache +
+ ((entry * bits_per_block) / BITS_PER_LONG);
+ unsigned int offs = (entry * bits_per_block) % BITS_PER_LONG;
+ unsigned long status;
+
+ if (entry >= nanddev_neraseblocks(nand))
+ return -ERANGE;
+
+ status = pos[0] >> offs;
+ if (bits_per_block + offs > BITS_PER_LONG)
+ status |= pos[1] << (BITS_PER_LONG - offs);
+
+ return status & GENMASK(bits_per_block - 1, 0);
+}
+EXPORT_SYMBOL_GPL(nanddev_bbt_get_block_status);
+
+/**
+ * nanddev_bbt_set_block_status() - Update the status of an eraseblock in the
+ * in-memory BBT
+ * @nand: nand device
+ * @entry: the BBT entry to update
+ * @status: the new status
+ *
+ * Update an entry of the in-memory BBT. If you want to push the updated BBT
+ * the NAND you should call nanddev_bbt_update().
+ *
+ * Return: 0 in case of success or -%ERANGE if @entry is bigger than the BBT
+ * size.
+ */
+int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
+ enum nand_bbt_block_status status)
+{
+ unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
+ unsigned long *pos = nand->bbt.cache +
+ ((entry * bits_per_block) / BITS_PER_LONG);
+ unsigned int offs = (entry * bits_per_block) % BITS_PER_LONG;
+ unsigned long val = status & GENMASK(bits_per_block - 1, 0);
+
+ if (entry >= nanddev_neraseblocks(nand))
+ return -ERANGE;
+
+ pos[0] &= ~GENMASK(offs + bits_per_block - 1, offs);
+ pos[0] |= val << offs;
+
+ if (bits_per_block + offs > BITS_PER_LONG) {
+ unsigned int rbits = bits_per_block + offs - BITS_PER_LONG;
+
+ pos[1] &= ~GENMASK(rbits - 1, 0);
+ pos[1] |= val >> (bits_per_block - rbits);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nanddev_bbt_set_block_status);
diff --git a/drivers/mtd/nand/core.c b/drivers/mtd/nand/core.c
new file mode 100644
index 0000000000..7737b1a4a1
--- /dev/null
+++ b/drivers/mtd/nand/core.c
@@ -0,0 +1,405 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017 Free Electrons
+ *
+ * Authors:
+ * Boris Brezillon <boris.brezillon@free-electrons.com>
+ * Peter Pan <peterpandong@micron.com>
+ */
+
+#define pr_fmt(fmt) "nand: " fmt
+
+#include <linux/module.h>
+#include <linux/mtd/nand.h>
+
+/**
+ * nanddev_isbad() - Check if a block is bad
+ * @nand: NAND device
+ * @pos: position pointing to the block we want to check
+ *
+ * Return: true if the block is bad, false otherwise.
+ */
+bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos)
+{
+ if (mtd_check_expert_analysis_mode())
+ return false;
+
+ if (nanddev_bbt_is_initialized(nand)) {
+ unsigned int entry;
+ int status;
+
+ entry = nanddev_bbt_pos_to_entry(nand, pos);
+ status = nanddev_bbt_get_block_status(nand, entry);
+ /* Lazy block status retrieval */
+ if (status == NAND_BBT_BLOCK_STATUS_UNKNOWN) {
+ if (nand->ops->isbad(nand, pos))
+ status = NAND_BBT_BLOCK_FACTORY_BAD;
+ else
+ status = NAND_BBT_BLOCK_GOOD;
+
+ nanddev_bbt_set_block_status(nand, entry, status);
+ }
+
+ if (status == NAND_BBT_BLOCK_WORN ||
+ status == NAND_BBT_BLOCK_FACTORY_BAD)
+ return true;
+
+ return false;
+ }
+
+ return nand->ops->isbad(nand, pos);
+}
+EXPORT_SYMBOL_GPL(nanddev_isbad);
+
+/**
+ * nanddev_markbad() - Mark a block as bad
+ * @nand: NAND device
+ * @pos: position of the block to mark bad
+ *
+ * Mark a block bad. This function is updating the BBT if available and
+ * calls the low-level markbad hook (nand->ops->markbad()).
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos)
+{
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ unsigned int entry;
+ int ret = 0;
+
+ if (nanddev_isbad(nand, pos))
+ return 0;
+
+ ret = nand->ops->markbad(nand, pos);
+ if (ret)
+ pr_warn("failed to write BBM to block @%llx (err = %d)\n",
+ nanddev_pos_to_offs(nand, pos), ret);
+
+ if (!nanddev_bbt_is_initialized(nand))
+ goto out;
+
+ entry = nanddev_bbt_pos_to_entry(nand, pos);
+ ret = nanddev_bbt_set_block_status(nand, entry, NAND_BBT_BLOCK_WORN);
+ if (ret)
+ goto out;
+
+ ret = nanddev_bbt_update(nand);
+
+out:
+ if (!ret)
+ mtd->ecc_stats.badblocks++;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nanddev_markbad);
+
+/**
+ * nanddev_isreserved() - Check whether an eraseblock is reserved or not
+ * @nand: NAND device
+ * @pos: NAND position to test
+ *
+ * Checks whether the eraseblock pointed by @pos is reserved or not.
+ *
+ * Return: true if the eraseblock is reserved, false otherwise.
+ */
+bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos)
+{
+ unsigned int entry;
+ int status;
+
+ if (!nanddev_bbt_is_initialized(nand))
+ return false;
+
+ /* Return info from the table */
+ entry = nanddev_bbt_pos_to_entry(nand, pos);
+ status = nanddev_bbt_get_block_status(nand, entry);
+ return status == NAND_BBT_BLOCK_RESERVED;
+}
+EXPORT_SYMBOL_GPL(nanddev_isreserved);
+
+/**
+ * nanddev_erase() - Erase a NAND portion
+ * @nand: NAND device
+ * @pos: position of the block to erase
+ *
+ * Erases the block if it's not bad.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+static int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos)
+{
+ if (nanddev_isbad(nand, pos) || nanddev_isreserved(nand, pos)) {
+ pr_warn("attempt to erase a bad/reserved block @%llx\n",
+ nanddev_pos_to_offs(nand, pos));
+ return -EIO;
+ }
+
+ return nand->ops->erase(nand, pos);
+}
+
+/**
+ * nanddev_mtd_erase() - Generic mtd->_erase() implementation for NAND devices
+ * @mtd: MTD device
+ * @einfo: erase request
+ *
+ * This is a simple mtd->_erase() implementation iterating over all blocks
+ * concerned by @einfo and calling nand->ops->erase() on each of them.
+ *
+ * Note that mtd->_erase should not be directly assigned to this helper,
+ * because there's no locking here. NAND specialized layers should instead
+ * implement there own wrapper around nanddev_mtd_erase() taking the
+ * appropriate lock before calling nanddev_mtd_erase().
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct nand_pos pos, last;
+ int ret;
+
+ nanddev_offs_to_pos(nand, einfo->addr, &pos);
+ nanddev_offs_to_pos(nand, einfo->addr + einfo->len - 1, &last);
+ while (nanddev_pos_cmp(&pos, &last) <= 0) {
+ ret = nanddev_erase(nand, &pos);
+ if (ret) {
+ einfo->fail_addr = nanddev_pos_to_offs(nand, &pos);
+
+ return ret;
+ }
+
+ nanddev_pos_next_eraseblock(nand, &pos);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nanddev_mtd_erase);
+
+/**
+ * nanddev_mtd_max_bad_blocks() - Get the maximum number of bad eraseblock on
+ * a specific region of the NAND device
+ * @mtd: MTD device
+ * @offs: offset of the NAND region
+ * @len: length of the NAND region
+ *
+ * Default implementation for mtd->_max_bad_blocks(). Only works if
+ * nand->memorg.max_bad_eraseblocks_per_lun is > 0.
+ *
+ * Return: a positive number encoding the maximum number of eraseblocks on a
+ * portion of memory, a negative error code otherwise.
+ */
+int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct nand_pos pos, end;
+ unsigned int max_bb = 0;
+
+ if (!nand->memorg.max_bad_eraseblocks_per_lun)
+ return -ENOTSUPP;
+
+ nanddev_offs_to_pos(nand, offs, &pos);
+ nanddev_offs_to_pos(nand, offs + len, &end);
+
+ for (nanddev_offs_to_pos(nand, offs, &pos);
+ nanddev_pos_cmp(&pos, &end) < 0;
+ nanddev_pos_next_lun(nand, &pos))
+ max_bb += nand->memorg.max_bad_eraseblocks_per_lun;
+
+ return max_bb;
+}
+EXPORT_SYMBOL_GPL(nanddev_mtd_max_bad_blocks);
+
+/**
+ * nanddev_get_ecc_engine() - Find and get a suitable ECC engine
+ * @nand: NAND device
+ */
+static int nanddev_get_ecc_engine(struct nand_device *nand)
+{
+ int engine_type;
+
+ /* Read the user desires in terms of ECC engine/configuration */
+ of_get_nand_ecc_user_config(nand);
+
+ engine_type = nand->ecc.user_conf.engine_type;
+ if (engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
+ engine_type = nand->ecc.defaults.engine_type;
+
+ switch (engine_type) {
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ return 0;
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ nand->ecc.engine = nand_ecc_get_sw_engine(nand);
+ break;
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
+ nand->ecc.engine = nand_ecc_get_on_die_hw_engine(nand);
+ break;
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ nand->ecc.engine = nand_ecc_get_on_host_hw_engine(nand);
+ if (PTR_ERR(nand->ecc.engine) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ break;
+ default:
+ pr_err("Missing ECC engine type\n");
+ }
+
+ if (!nand->ecc.engine)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * nanddev_put_ecc_engine() - Dettach and put the in-use ECC engine
+ * @nand: NAND device
+ */
+static int nanddev_put_ecc_engine(struct nand_device *nand)
+{
+ switch (nand->ecc.ctx.conf.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ nand_ecc_put_on_host_hw_engine(nand);
+ break;
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * nanddev_find_ecc_configuration() - Find a suitable ECC configuration
+ * @nand: NAND device
+ */
+static int nanddev_find_ecc_configuration(struct nand_device *nand)
+{
+ int ret;
+
+ if (!nand->ecc.engine)
+ return -ENOTSUPP;
+
+ ret = nand_ecc_init_ctx(nand);
+ if (ret)
+ return ret;
+
+ if (!nand_ecc_is_strong_enough(nand))
+ pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
+ nand->mtd.name);
+
+ return 0;
+}
+
+/**
+ * nanddev_ecc_engine_init() - Initialize an ECC engine for the chip
+ * @nand: NAND device
+ */
+int nanddev_ecc_engine_init(struct nand_device *nand)
+{
+ int ret;
+
+ /* Look for the ECC engine to use */
+ ret = nanddev_get_ecc_engine(nand);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ pr_err("No ECC engine found\n");
+
+ return ret;
+ }
+
+ /* No ECC engine requested */
+ if (!nand->ecc.engine)
+ return 0;
+
+ /* Configure the engine: balance user input and chip requirements */
+ ret = nanddev_find_ecc_configuration(nand);
+ if (ret) {
+ pr_err("No suitable ECC configuration\n");
+ nanddev_put_ecc_engine(nand);
+
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nanddev_ecc_engine_init);
+
+/**
+ * nanddev_ecc_engine_cleanup() - Cleanup ECC engine initializations
+ * @nand: NAND device
+ */
+void nanddev_ecc_engine_cleanup(struct nand_device *nand)
+{
+ if (nand->ecc.engine)
+ nand_ecc_cleanup_ctx(nand);
+
+ nanddev_put_ecc_engine(nand);
+}
+EXPORT_SYMBOL_GPL(nanddev_ecc_engine_cleanup);
+
+/**
+ * nanddev_init() - Initialize a NAND device
+ * @nand: NAND device
+ * @ops: NAND device operations
+ * @owner: NAND device owner
+ *
+ * Initializes a NAND device object. Consistency checks are done on @ops and
+ * @nand->memorg. Also takes care of initializing the BBT.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
+ struct module *owner)
+{
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ struct nand_memory_organization *memorg = nanddev_get_memorg(nand);
+
+ if (!nand || !ops)
+ return -EINVAL;
+
+ if (!ops->erase || !ops->markbad || !ops->isbad)
+ return -EINVAL;
+
+ if (!memorg->bits_per_cell || !memorg->pagesize ||
+ !memorg->pages_per_eraseblock || !memorg->eraseblocks_per_lun ||
+ !memorg->planes_per_lun || !memorg->luns_per_target ||
+ !memorg->ntargets)
+ return -EINVAL;
+
+ nand->rowconv.eraseblock_addr_shift =
+ fls(memorg->pages_per_eraseblock - 1);
+ nand->rowconv.lun_addr_shift = fls(memorg->eraseblocks_per_lun - 1) +
+ nand->rowconv.eraseblock_addr_shift;
+
+ nand->ops = ops;
+
+ mtd->type = memorg->bits_per_cell == 1 ?
+ MTD_NANDFLASH : MTD_MLCNANDFLASH;
+ mtd->flags = MTD_CAP_NANDFLASH;
+ mtd->erasesize = memorg->pagesize * memorg->pages_per_eraseblock;
+ mtd->writesize = memorg->pagesize;
+ mtd->writebufsize = memorg->pagesize;
+ mtd->oobsize = memorg->oobsize;
+ mtd->size = nanddev_size(nand);
+ mtd->owner = owner;
+
+ return nanddev_bbt_init(nand);
+}
+EXPORT_SYMBOL_GPL(nanddev_init);
+
+/**
+ * nanddev_cleanup() - Release resources allocated in nanddev_init()
+ * @nand: NAND device
+ *
+ * Basically undoes what has been done in nanddev_init().
+ */
+void nanddev_cleanup(struct nand_device *nand)
+{
+ if (nanddev_bbt_is_initialized(nand))
+ nanddev_bbt_cleanup(nand);
+}
+EXPORT_SYMBOL_GPL(nanddev_cleanup);
+
+MODULE_DESCRIPTION("Generic NAND framework");
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/ecc-mtk.c b/drivers/mtd/nand/ecc-mtk.c
new file mode 100644
index 0000000000..c75bb8b80c
--- /dev/null
+++ b/drivers/mtd/nand/ecc-mtk.c
@@ -0,0 +1,621 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * MTK ECC controller driver.
+ * Copyright (C) 2016 MediaTek Inc.
+ * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
+ * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/mutex.h>
+#include <linux/mtd/nand-ecc-mtk.h>
+
+#define ECC_IDLE_MASK BIT(0)
+#define ECC_IRQ_EN BIT(0)
+#define ECC_PG_IRQ_SEL BIT(1)
+#define ECC_OP_ENABLE (1)
+#define ECC_OP_DISABLE (0)
+
+#define ECC_ENCCON (0x00)
+#define ECC_ENCCNFG (0x04)
+#define ECC_MS_SHIFT (16)
+#define ECC_ENCDIADDR (0x08)
+#define ECC_ENCIDLE (0x0C)
+#define ECC_DECCON (0x100)
+#define ECC_DECCNFG (0x104)
+#define DEC_EMPTY_EN BIT(31)
+#define DEC_CNFG_CORRECT (0x3 << 12)
+#define ECC_DECIDLE (0x10C)
+#define ECC_DECENUM0 (0x114)
+
+#define ECC_TIMEOUT (500000)
+
+#define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE)
+#define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON)
+
+#define ECC_ERRMASK_MT7622 GENMASK(4, 0)
+#define ECC_ERRMASK_MT2701 GENMASK(5, 0)
+#define ECC_ERRMASK_MT2712 GENMASK(6, 0)
+
+struct mtk_ecc_caps {
+ u32 err_mask;
+ u32 err_shift;
+ const u8 *ecc_strength;
+ const u32 *ecc_regs;
+ u8 num_ecc_strength;
+ u8 ecc_mode_shift;
+ u32 parity_bits;
+ int pg_irq_sel;
+};
+
+struct mtk_ecc {
+ struct device *dev;
+ const struct mtk_ecc_caps *caps;
+ void __iomem *regs;
+ struct clk *clk;
+
+ struct completion done;
+ struct mutex lock;
+ u32 sectors;
+
+ u8 *eccdata;
+};
+
+/* ecc strength that each IP supports */
+static const u8 ecc_strength_mt2701[] = {
+ 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
+ 40, 44, 48, 52, 56, 60
+};
+
+static const u8 ecc_strength_mt2712[] = {
+ 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
+ 40, 44, 48, 52, 56, 60, 68, 72, 80
+};
+
+static const u8 ecc_strength_mt7622[] = {
+ 4, 6, 8, 10, 12
+};
+
+static const u8 ecc_strength_mt7986[] = {
+ 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24
+};
+
+enum mtk_ecc_regs {
+ ECC_ENCPAR00,
+ ECC_ENCIRQ_EN,
+ ECC_ENCIRQ_STA,
+ ECC_DECDONE,
+ ECC_DECIRQ_EN,
+ ECC_DECIRQ_STA,
+};
+
+static int mt2701_ecc_regs[] = {
+ [ECC_ENCPAR00] = 0x10,
+ [ECC_ENCIRQ_EN] = 0x80,
+ [ECC_ENCIRQ_STA] = 0x84,
+ [ECC_DECDONE] = 0x124,
+ [ECC_DECIRQ_EN] = 0x200,
+ [ECC_DECIRQ_STA] = 0x204,
+};
+
+static int mt2712_ecc_regs[] = {
+ [ECC_ENCPAR00] = 0x300,
+ [ECC_ENCIRQ_EN] = 0x80,
+ [ECC_ENCIRQ_STA] = 0x84,
+ [ECC_DECDONE] = 0x124,
+ [ECC_DECIRQ_EN] = 0x200,
+ [ECC_DECIRQ_STA] = 0x204,
+};
+
+static int mt7622_ecc_regs[] = {
+ [ECC_ENCPAR00] = 0x10,
+ [ECC_ENCIRQ_EN] = 0x30,
+ [ECC_ENCIRQ_STA] = 0x34,
+ [ECC_DECDONE] = 0x11c,
+ [ECC_DECIRQ_EN] = 0x140,
+ [ECC_DECIRQ_STA] = 0x144,
+};
+
+static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
+ enum mtk_ecc_operation op)
+{
+ struct device *dev = ecc->dev;
+ u32 val;
+ int ret;
+
+ ret = readl_poll_timeout_atomic(ecc->regs + ECC_IDLE_REG(op), val,
+ val & ECC_IDLE_MASK,
+ 10, ECC_TIMEOUT);
+ if (ret)
+ dev_warn(dev, "%s NOT idle\n",
+ op == ECC_ENCODE ? "encoder" : "decoder");
+}
+
+static irqreturn_t mtk_ecc_irq(int irq, void *id)
+{
+ struct mtk_ecc *ecc = id;
+ u32 dec, enc;
+
+ dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA])
+ & ECC_IRQ_EN;
+ if (dec) {
+ dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]);
+ if (dec & ecc->sectors) {
+ /*
+ * Clear decode IRQ status once again to ensure that
+ * there will be no extra IRQ.
+ */
+ readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA]);
+ ecc->sectors = 0;
+ complete(&ecc->done);
+ } else {
+ return IRQ_HANDLED;
+ }
+ } else {
+ enc = readl(ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_STA])
+ & ECC_IRQ_EN;
+ if (enc)
+ complete(&ecc->done);
+ else
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
+{
+ u32 ecc_bit, dec_sz, enc_sz;
+ u32 reg, i;
+
+ for (i = 0; i < ecc->caps->num_ecc_strength; i++) {
+ if (ecc->caps->ecc_strength[i] == config->strength)
+ break;
+ }
+
+ if (i == ecc->caps->num_ecc_strength) {
+ dev_err(ecc->dev, "invalid ecc strength %d\n",
+ config->strength);
+ return -EINVAL;
+ }
+
+ ecc_bit = i;
+
+ if (config->op == ECC_ENCODE) {
+ /* configure ECC encoder (in bits) */
+ enc_sz = config->len << 3;
+
+ reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift);
+ reg |= (enc_sz << ECC_MS_SHIFT);
+ writel(reg, ecc->regs + ECC_ENCCNFG);
+
+ if (config->mode != ECC_NFI_MODE)
+ writel(lower_32_bits(config->addr),
+ ecc->regs + ECC_ENCDIADDR);
+
+ } else {
+ /* configure ECC decoder (in bits) */
+ dec_sz = (config->len << 3) +
+ config->strength * ecc->caps->parity_bits;
+
+ reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift);
+ reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT;
+ reg |= DEC_EMPTY_EN;
+ writel(reg, ecc->regs + ECC_DECCNFG);
+
+ if (config->sectors)
+ ecc->sectors = 1 << (config->sectors - 1);
+ }
+
+ return 0;
+}
+
+void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats,
+ int sectors)
+{
+ u32 offset, i, err;
+ u32 bitflips = 0;
+
+ stats->corrected = 0;
+ stats->failed = 0;
+
+ for (i = 0; i < sectors; i++) {
+ offset = (i >> 2) << 2;
+ err = readl(ecc->regs + ECC_DECENUM0 + offset);
+ err = err >> ((i % 4) * ecc->caps->err_shift);
+ err &= ecc->caps->err_mask;
+ if (err == ecc->caps->err_mask) {
+ /* uncorrectable errors */
+ stats->failed++;
+ continue;
+ }
+
+ stats->corrected += err;
+ bitflips = max_t(u32, bitflips, err);
+ }
+
+ stats->bitflips = bitflips;
+}
+EXPORT_SYMBOL(mtk_ecc_get_stats);
+
+void mtk_ecc_release(struct mtk_ecc *ecc)
+{
+ clk_disable_unprepare(ecc->clk);
+ put_device(ecc->dev);
+}
+EXPORT_SYMBOL(mtk_ecc_release);
+
+static void mtk_ecc_hw_init(struct mtk_ecc *ecc)
+{
+ mtk_ecc_wait_idle(ecc, ECC_ENCODE);
+ writew(ECC_OP_DISABLE, ecc->regs + ECC_ENCCON);
+
+ mtk_ecc_wait_idle(ecc, ECC_DECODE);
+ writel(ECC_OP_DISABLE, ecc->regs + ECC_DECCON);
+}
+
+static struct mtk_ecc *mtk_ecc_get(struct device_node *np)
+{
+ struct platform_device *pdev;
+ struct mtk_ecc *ecc;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ ecc = platform_get_drvdata(pdev);
+ if (!ecc) {
+ put_device(&pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ clk_prepare_enable(ecc->clk);
+ mtk_ecc_hw_init(ecc);
+
+ return ecc;
+}
+
+struct mtk_ecc *of_mtk_ecc_get(struct device_node *of_node)
+{
+ struct mtk_ecc *ecc = NULL;
+ struct device_node *np;
+
+ np = of_parse_phandle(of_node, "nand-ecc-engine", 0);
+ /* for backward compatibility */
+ if (!np)
+ np = of_parse_phandle(of_node, "ecc-engine", 0);
+ if (np) {
+ ecc = mtk_ecc_get(np);
+ of_node_put(np);
+ }
+
+ return ecc;
+}
+EXPORT_SYMBOL(of_mtk_ecc_get);
+
+int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
+{
+ enum mtk_ecc_operation op = config->op;
+ u16 reg_val;
+ int ret;
+
+ ret = mutex_lock_interruptible(&ecc->lock);
+ if (ret) {
+ dev_err(ecc->dev, "interrupted when attempting to lock\n");
+ return ret;
+ }
+
+ mtk_ecc_wait_idle(ecc, op);
+
+ ret = mtk_ecc_config(ecc, config);
+ if (ret) {
+ mutex_unlock(&ecc->lock);
+ return ret;
+ }
+
+ if (config->mode != ECC_NFI_MODE || op != ECC_ENCODE) {
+ init_completion(&ecc->done);
+ reg_val = ECC_IRQ_EN;
+ /*
+ * For ECC_NFI_MODE, if ecc->caps->pg_irq_sel is 1, then it
+ * means this chip can only generate one ecc irq during page
+ * read / write. If is 0, generate one ecc irq each ecc step.
+ */
+ if (ecc->caps->pg_irq_sel && config->mode == ECC_NFI_MODE)
+ reg_val |= ECC_PG_IRQ_SEL;
+ if (op == ECC_ENCODE)
+ writew(reg_val, ecc->regs +
+ ecc->caps->ecc_regs[ECC_ENCIRQ_EN]);
+ else
+ writew(reg_val, ecc->regs +
+ ecc->caps->ecc_regs[ECC_DECIRQ_EN]);
+ }
+
+ writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op));
+
+ return 0;
+}
+EXPORT_SYMBOL(mtk_ecc_enable);
+
+void mtk_ecc_disable(struct mtk_ecc *ecc)
+{
+ enum mtk_ecc_operation op = ECC_ENCODE;
+
+ /* find out the running operation */
+ if (readw(ecc->regs + ECC_CTL_REG(op)) != ECC_OP_ENABLE)
+ op = ECC_DECODE;
+
+ /* disable it */
+ mtk_ecc_wait_idle(ecc, op);
+ if (op == ECC_DECODE) {
+ /*
+ * Clear decode IRQ status in case there is a timeout to wait
+ * decode IRQ.
+ */
+ readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]);
+ writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_EN]);
+ } else {
+ writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_EN]);
+ }
+
+ writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
+
+ mutex_unlock(&ecc->lock);
+}
+EXPORT_SYMBOL(mtk_ecc_disable);
+
+int mtk_ecc_wait_done(struct mtk_ecc *ecc, enum mtk_ecc_operation op)
+{
+ int ret;
+
+ ret = wait_for_completion_timeout(&ecc->done, msecs_to_jiffies(500));
+ if (!ret) {
+ dev_err(ecc->dev, "%s timeout - interrupt did not arrive)\n",
+ (op == ECC_ENCODE) ? "encoder" : "decoder");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mtk_ecc_wait_done);
+
+int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
+ u8 *data, u32 bytes)
+{
+ dma_addr_t addr;
+ u32 len;
+ int ret;
+
+ addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
+ ret = dma_mapping_error(ecc->dev, addr);
+ if (ret) {
+ dev_err(ecc->dev, "dma mapping error\n");
+ return -EINVAL;
+ }
+
+ config->op = ECC_ENCODE;
+ config->addr = addr;
+ ret = mtk_ecc_enable(ecc, config);
+ if (ret) {
+ dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
+ return ret;
+ }
+
+ ret = mtk_ecc_wait_done(ecc, ECC_ENCODE);
+ if (ret)
+ goto timeout;
+
+ mtk_ecc_wait_idle(ecc, ECC_ENCODE);
+
+ /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
+ len = (config->strength * ecc->caps->parity_bits + 7) >> 3;
+
+ /* write the parity bytes generated by the ECC back to temp buffer */
+ __ioread32_copy(ecc->eccdata,
+ ecc->regs + ecc->caps->ecc_regs[ECC_ENCPAR00],
+ round_up(len, 4));
+
+ /* copy into possibly unaligned OOB region with actual length */
+ memcpy(data + bytes, ecc->eccdata, len);
+timeout:
+
+ dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
+ mtk_ecc_disable(ecc);
+
+ return ret;
+}
+EXPORT_SYMBOL(mtk_ecc_encode);
+
+void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p)
+{
+ const u8 *ecc_strength = ecc->caps->ecc_strength;
+ int i;
+
+ for (i = 0; i < ecc->caps->num_ecc_strength; i++) {
+ if (*p <= ecc_strength[i]) {
+ if (!i)
+ *p = ecc_strength[i];
+ else if (*p != ecc_strength[i])
+ *p = ecc_strength[i - 1];
+ return;
+ }
+ }
+
+ *p = ecc_strength[ecc->caps->num_ecc_strength - 1];
+}
+EXPORT_SYMBOL(mtk_ecc_adjust_strength);
+
+unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc)
+{
+ return ecc->caps->parity_bits;
+}
+EXPORT_SYMBOL(mtk_ecc_get_parity_bits);
+
+static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = {
+ .err_mask = ECC_ERRMASK_MT2701,
+ .err_shift = 8,
+ .ecc_strength = ecc_strength_mt2701,
+ .ecc_regs = mt2701_ecc_regs,
+ .num_ecc_strength = 20,
+ .ecc_mode_shift = 5,
+ .parity_bits = 14,
+ .pg_irq_sel = 0,
+};
+
+static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = {
+ .err_mask = ECC_ERRMASK_MT2712,
+ .err_shift = 8,
+ .ecc_strength = ecc_strength_mt2712,
+ .ecc_regs = mt2712_ecc_regs,
+ .num_ecc_strength = 23,
+ .ecc_mode_shift = 5,
+ .parity_bits = 14,
+ .pg_irq_sel = 1,
+};
+
+static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = {
+ .err_mask = ECC_ERRMASK_MT7622,
+ .err_shift = 5,
+ .ecc_strength = ecc_strength_mt7622,
+ .ecc_regs = mt7622_ecc_regs,
+ .num_ecc_strength = 5,
+ .ecc_mode_shift = 4,
+ .parity_bits = 13,
+ .pg_irq_sel = 0,
+};
+
+static const struct mtk_ecc_caps mtk_ecc_caps_mt7986 = {
+ .err_mask = ECC_ERRMASK_MT7622,
+ .err_shift = 8,
+ .ecc_strength = ecc_strength_mt7986,
+ .ecc_regs = mt2712_ecc_regs,
+ .num_ecc_strength = 11,
+ .ecc_mode_shift = 5,
+ .parity_bits = 14,
+ .pg_irq_sel = 1,
+};
+
+static const struct of_device_id mtk_ecc_dt_match[] = {
+ {
+ .compatible = "mediatek,mt2701-ecc",
+ .data = &mtk_ecc_caps_mt2701,
+ }, {
+ .compatible = "mediatek,mt2712-ecc",
+ .data = &mtk_ecc_caps_mt2712,
+ }, {
+ .compatible = "mediatek,mt7622-ecc",
+ .data = &mtk_ecc_caps_mt7622,
+ }, {
+ .compatible = "mediatek,mt7986-ecc",
+ .data = &mtk_ecc_caps_mt7986,
+ },
+ {},
+};
+
+static int mtk_ecc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_ecc *ecc;
+ u32 max_eccdata_size;
+ int irq, ret;
+
+ ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
+ if (!ecc)
+ return -ENOMEM;
+
+ ecc->caps = of_device_get_match_data(dev);
+
+ max_eccdata_size = ecc->caps->num_ecc_strength - 1;
+ max_eccdata_size = ecc->caps->ecc_strength[max_eccdata_size];
+ max_eccdata_size = (max_eccdata_size * ecc->caps->parity_bits + 7) >> 3;
+ max_eccdata_size = round_up(max_eccdata_size, 4);
+ ecc->eccdata = devm_kzalloc(dev, max_eccdata_size, GFP_KERNEL);
+ if (!ecc->eccdata)
+ return -ENOMEM;
+
+ ecc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ecc->regs))
+ return PTR_ERR(ecc->regs);
+
+ ecc->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ecc->clk)) {
+ dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk));
+ return PTR_ERR(ecc->clk);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "failed to set DMA mask\n");
+ return ret;
+ }
+
+ ret = devm_request_irq(dev, irq, mtk_ecc_irq, 0x0, "mtk-ecc", ecc);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ return -EINVAL;
+ }
+
+ ecc->dev = dev;
+ mutex_init(&ecc->lock);
+ platform_set_drvdata(pdev, ecc);
+ dev_info(dev, "probed\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_ecc_suspend(struct device *dev)
+{
+ struct mtk_ecc *ecc = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(ecc->clk);
+
+ return 0;
+}
+
+static int mtk_ecc_resume(struct device *dev)
+{
+ struct mtk_ecc *ecc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(ecc->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clk\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume);
+#endif
+
+MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match);
+
+static struct platform_driver mtk_ecc_driver = {
+ .probe = mtk_ecc_probe,
+ .driver = {
+ .name = "mtk-ecc",
+ .of_match_table = mtk_ecc_dt_match,
+#ifdef CONFIG_PM_SLEEP
+ .pm = &mtk_ecc_pm_ops,
+#endif
+ },
+};
+
+module_platform_driver(mtk_ecc_driver);
+
+MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
+MODULE_DESCRIPTION("MTK Nand ECC Driver");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/mtd/nand/ecc-mxic.c b/drivers/mtd/nand/ecc-mxic.c
new file mode 100644
index 0000000000..47e10945b8
--- /dev/null
+++ b/drivers/mtd/nand/ecc-mxic.c
@@ -0,0 +1,878 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Macronix external hardware ECC engine for NAND devices, also
+ * called DPE for Data Processing Engine.
+ *
+ * Copyright © 2019 Macronix
+ * Author: Miquel Raynal <miquel.raynal@bootlin.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand-ecc-mxic.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* DPE Configuration */
+#define DP_CONFIG 0x00
+#define ECC_EN BIT(0)
+#define ECC_TYP(idx) (((idx) << 3) & GENMASK(6, 3))
+/* DPE Interrupt Status */
+#define INTRPT_STS 0x04
+#define TRANS_CMPLT BIT(0)
+#define SDMA_MAIN BIT(1)
+#define SDMA_SPARE BIT(2)
+#define ECC_ERR BIT(3)
+#define TO_SPARE BIT(4)
+#define TO_MAIN BIT(5)
+/* DPE Interrupt Status Enable */
+#define INTRPT_STS_EN 0x08
+/* DPE Interrupt Signal Enable */
+#define INTRPT_SIG_EN 0x0C
+/* Host Controller Configuration */
+#define HC_CONFIG 0x10
+#define DEV2MEM 0 /* TRANS_TYP_DMA in the spec */
+#define MEM2MEM BIT(4) /* TRANS_TYP_IO in the spec */
+#define MAPPING BIT(5) /* TRANS_TYP_MAPPING in the spec */
+#define ECC_PACKED 0 /* LAYOUT_TYP_INTEGRATED in the spec */
+#define ECC_INTERLEAVED BIT(2) /* LAYOUT_TYP_DISTRIBUTED in the spec */
+#define BURST_TYP_FIXED 0
+#define BURST_TYP_INCREASING BIT(0)
+/* Host Controller Slave Address */
+#define HC_SLV_ADDR 0x14
+/* ECC Chunk Size */
+#define CHUNK_SIZE 0x20
+/* Main Data Size */
+#define MAIN_SIZE 0x24
+/* Spare Data Size */
+#define SPARE_SIZE 0x28
+#define META_SZ(reg) ((reg) & GENMASK(7, 0))
+#define PARITY_SZ(reg) (((reg) & GENMASK(15, 8)) >> 8)
+#define RSV_SZ(reg) (((reg) & GENMASK(23, 16)) >> 16)
+#define SPARE_SZ(reg) ((reg) >> 24)
+/* ECC Chunk Count */
+#define CHUNK_CNT 0x30
+/* SDMA Control */
+#define SDMA_CTRL 0x40
+#define WRITE_NAND 0
+#define READ_NAND BIT(1)
+#define CONT_NAND BIT(29)
+#define CONT_SYSM BIT(30) /* Continue System Memory? */
+#define SDMA_STRT BIT(31)
+/* SDMA Address of Main Data */
+#define SDMA_MAIN_ADDR 0x44
+/* SDMA Address of Spare Data */
+#define SDMA_SPARE_ADDR 0x48
+/* DPE Version Number */
+#define DP_VER 0xD0
+#define DP_VER_OFFSET 16
+
+/* Status bytes between each chunk of spare data */
+#define STAT_BYTES 4
+#define NO_ERR 0x00
+#define MAX_CORR_ERR 0x28
+#define UNCORR_ERR 0xFE
+#define ERASED_CHUNK 0xFF
+
+struct mxic_ecc_engine {
+ struct device *dev;
+ void __iomem *regs;
+ int irq;
+ struct completion complete;
+ struct nand_ecc_engine external_engine;
+ struct nand_ecc_engine pipelined_engine;
+ struct mutex lock;
+};
+
+struct mxic_ecc_ctx {
+ /* ECC machinery */
+ unsigned int data_step_sz;
+ unsigned int oob_step_sz;
+ unsigned int parity_sz;
+ unsigned int meta_sz;
+ u8 *status;
+ int steps;
+
+ /* DMA boilerplate */
+ struct nand_ecc_req_tweak_ctx req_ctx;
+ u8 *oobwithstat;
+ struct scatterlist sg[2];
+ struct nand_page_io_req *req;
+ unsigned int pageoffs;
+};
+
+static struct mxic_ecc_engine *ext_ecc_eng_to_mxic(struct nand_ecc_engine *eng)
+{
+ return container_of(eng, struct mxic_ecc_engine, external_engine);
+}
+
+static struct mxic_ecc_engine *pip_ecc_eng_to_mxic(struct nand_ecc_engine *eng)
+{
+ return container_of(eng, struct mxic_ecc_engine, pipelined_engine);
+}
+
+static struct mxic_ecc_engine *nand_to_mxic(struct nand_device *nand)
+{
+ struct nand_ecc_engine *eng = nand->ecc.engine;
+
+ if (eng->integration == NAND_ECC_ENGINE_INTEGRATION_EXTERNAL)
+ return ext_ecc_eng_to_mxic(eng);
+ else
+ return pip_ecc_eng_to_mxic(eng);
+}
+
+static int mxic_ecc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
+
+ if (section < 0 || section >= ctx->steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * ctx->oob_step_sz) + ctx->meta_sz;
+ oobregion->length = ctx->parity_sz;
+
+ return 0;
+}
+
+static int mxic_ecc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
+
+ if (section < 0 || section >= ctx->steps)
+ return -ERANGE;
+
+ if (!section) {
+ oobregion->offset = 2;
+ oobregion->length = ctx->meta_sz - 2;
+ } else {
+ oobregion->offset = section * ctx->oob_step_sz;
+ oobregion->length = ctx->meta_sz;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops mxic_ecc_ooblayout_ops = {
+ .ecc = mxic_ecc_ooblayout_ecc,
+ .free = mxic_ecc_ooblayout_free,
+};
+
+static void mxic_ecc_disable_engine(struct mxic_ecc_engine *mxic)
+{
+ u32 reg;
+
+ reg = readl(mxic->regs + DP_CONFIG);
+ reg &= ~ECC_EN;
+ writel(reg, mxic->regs + DP_CONFIG);
+}
+
+static void mxic_ecc_enable_engine(struct mxic_ecc_engine *mxic)
+{
+ u32 reg;
+
+ reg = readl(mxic->regs + DP_CONFIG);
+ reg |= ECC_EN;
+ writel(reg, mxic->regs + DP_CONFIG);
+}
+
+static void mxic_ecc_disable_int(struct mxic_ecc_engine *mxic)
+{
+ writel(0, mxic->regs + INTRPT_SIG_EN);
+}
+
+static void mxic_ecc_enable_int(struct mxic_ecc_engine *mxic)
+{
+ writel(TRANS_CMPLT, mxic->regs + INTRPT_SIG_EN);
+}
+
+static irqreturn_t mxic_ecc_isr(int irq, void *dev_id)
+{
+ struct mxic_ecc_engine *mxic = dev_id;
+ u32 sts;
+
+ sts = readl(mxic->regs + INTRPT_STS);
+ if (!sts)
+ return IRQ_NONE;
+
+ if (sts & TRANS_CMPLT)
+ complete(&mxic->complete);
+
+ writel(sts, mxic->regs + INTRPT_STS);
+
+ return IRQ_HANDLED;
+}
+
+static int mxic_ecc_init_ctx(struct nand_device *nand, struct device *dev)
+{
+ struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
+ struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
+ struct nand_ecc_props *reqs = &nand->ecc.requirements;
+ struct nand_ecc_props *user = &nand->ecc.user_conf;
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ int step_size = 0, strength = 0, desired_correction = 0, steps, idx;
+ static const int possible_strength[] = {4, 8, 40, 48};
+ static const int spare_size[] = {32, 32, 96, 96};
+ struct mxic_ecc_ctx *ctx;
+ u32 spare_reg;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ nand->ecc.ctx.priv = ctx;
+
+ /* Only large page NAND chips may use BCH */
+ if (mtd->oobsize < 64) {
+ pr_err("BCH cannot be used with small page NAND chips\n");
+ return -EINVAL;
+ }
+
+ mtd_set_ooblayout(mtd, &mxic_ecc_ooblayout_ops);
+
+ /* Enable all status bits */
+ writel(TRANS_CMPLT | SDMA_MAIN | SDMA_SPARE | ECC_ERR |
+ TO_SPARE | TO_MAIN, mxic->regs + INTRPT_STS_EN);
+
+ /* Configure the correction depending on the NAND device topology */
+ if (user->step_size && user->strength) {
+ step_size = user->step_size;
+ strength = user->strength;
+ } else if (reqs->step_size && reqs->strength) {
+ step_size = reqs->step_size;
+ strength = reqs->strength;
+ }
+
+ if (step_size && strength) {
+ steps = mtd->writesize / step_size;
+ desired_correction = steps * strength;
+ }
+
+ /* Step size is fixed to 1kiB, strength may vary (4 possible values) */
+ conf->step_size = SZ_1K;
+ steps = mtd->writesize / conf->step_size;
+
+ ctx->status = devm_kzalloc(dev, steps * sizeof(u8), GFP_KERNEL);
+ if (!ctx->status)
+ return -ENOMEM;
+
+ if (desired_correction) {
+ strength = desired_correction / steps;
+
+ for (idx = 0; idx < ARRAY_SIZE(possible_strength); idx++)
+ if (possible_strength[idx] >= strength)
+ break;
+
+ idx = min_t(unsigned int, idx,
+ ARRAY_SIZE(possible_strength) - 1);
+ } else {
+ /* Missing data, maximize the correction */
+ idx = ARRAY_SIZE(possible_strength) - 1;
+ }
+
+ /* Tune the selected strength until it fits in the OOB area */
+ for (; idx >= 0; idx--) {
+ if (spare_size[idx] * steps <= mtd->oobsize)
+ break;
+ }
+
+ /* This engine cannot be used with this NAND device */
+ if (idx < 0)
+ return -EINVAL;
+
+ /* Configure the engine for the desired strength */
+ writel(ECC_TYP(idx), mxic->regs + DP_CONFIG);
+ conf->strength = possible_strength[idx];
+ spare_reg = readl(mxic->regs + SPARE_SIZE);
+
+ ctx->steps = steps;
+ ctx->data_step_sz = mtd->writesize / steps;
+ ctx->oob_step_sz = mtd->oobsize / steps;
+ ctx->parity_sz = PARITY_SZ(spare_reg);
+ ctx->meta_sz = META_SZ(spare_reg);
+
+ /* Ensure buffers will contain enough bytes to store the STAT_BYTES */
+ ctx->req_ctx.oob_buffer_size = nanddev_per_page_oobsize(nand) +
+ (ctx->steps * STAT_BYTES);
+ ret = nand_ecc_init_req_tweaking(&ctx->req_ctx, nand);
+ if (ret)
+ return ret;
+
+ ctx->oobwithstat = kmalloc(mtd->oobsize + (ctx->steps * STAT_BYTES),
+ GFP_KERNEL);
+ if (!ctx->oobwithstat) {
+ ret = -ENOMEM;
+ goto cleanup_req_tweak;
+ }
+
+ sg_init_table(ctx->sg, 2);
+
+ /* Configuration dump and sanity checks */
+ dev_err(dev, "DPE version number: %d\n",
+ readl(mxic->regs + DP_VER) >> DP_VER_OFFSET);
+ dev_err(dev, "Chunk size: %d\n", readl(mxic->regs + CHUNK_SIZE));
+ dev_err(dev, "Main size: %d\n", readl(mxic->regs + MAIN_SIZE));
+ dev_err(dev, "Spare size: %d\n", SPARE_SZ(spare_reg));
+ dev_err(dev, "Rsv size: %ld\n", RSV_SZ(spare_reg));
+ dev_err(dev, "Parity size: %d\n", ctx->parity_sz);
+ dev_err(dev, "Meta size: %d\n", ctx->meta_sz);
+
+ if ((ctx->meta_sz + ctx->parity_sz + RSV_SZ(spare_reg)) !=
+ SPARE_SZ(spare_reg)) {
+ dev_err(dev, "Wrong OOB configuration: %d + %d + %ld != %d\n",
+ ctx->meta_sz, ctx->parity_sz, RSV_SZ(spare_reg),
+ SPARE_SZ(spare_reg));
+ ret = -EINVAL;
+ goto free_oobwithstat;
+ }
+
+ if (ctx->oob_step_sz != SPARE_SZ(spare_reg)) {
+ dev_err(dev, "Wrong OOB configuration: %d != %d\n",
+ ctx->oob_step_sz, SPARE_SZ(spare_reg));
+ ret = -EINVAL;
+ goto free_oobwithstat;
+ }
+
+ return 0;
+
+free_oobwithstat:
+ kfree(ctx->oobwithstat);
+cleanup_req_tweak:
+ nand_ecc_cleanup_req_tweaking(&ctx->req_ctx);
+
+ return ret;
+}
+
+static int mxic_ecc_init_ctx_external(struct nand_device *nand)
+{
+ struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
+ struct device *dev = nand->ecc.engine->dev;
+ int ret;
+
+ dev_info(dev, "Macronix ECC engine in external mode\n");
+
+ ret = mxic_ecc_init_ctx(nand, dev);
+ if (ret)
+ return ret;
+
+ /* Trigger each step manually */
+ writel(1, mxic->regs + CHUNK_CNT);
+ writel(BURST_TYP_INCREASING | ECC_PACKED | MEM2MEM,
+ mxic->regs + HC_CONFIG);
+
+ return 0;
+}
+
+static int mxic_ecc_init_ctx_pipelined(struct nand_device *nand)
+{
+ struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
+ struct mxic_ecc_ctx *ctx;
+ struct device *dev;
+ int ret;
+
+ dev = nand_ecc_get_engine_dev(nand->ecc.engine->dev);
+ if (!dev)
+ return -EINVAL;
+
+ dev_info(dev, "Macronix ECC engine in pipelined/mapping mode\n");
+
+ ret = mxic_ecc_init_ctx(nand, dev);
+ if (ret)
+ return ret;
+
+ ctx = nand_to_ecc_ctx(nand);
+
+ /* All steps should be handled in one go directly by the internal DMA */
+ writel(ctx->steps, mxic->regs + CHUNK_CNT);
+
+ /*
+ * Interleaved ECC scheme cannot be used otherwise factory bad block
+ * markers would be lost. A packed layout is mandatory.
+ */
+ writel(BURST_TYP_INCREASING | ECC_PACKED | MAPPING,
+ mxic->regs + HC_CONFIG);
+
+ return 0;
+}
+
+static void mxic_ecc_cleanup_ctx(struct nand_device *nand)
+{
+ struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
+
+ if (ctx) {
+ nand_ecc_cleanup_req_tweaking(&ctx->req_ctx);
+ kfree(ctx->oobwithstat);
+ }
+}
+
+static int mxic_ecc_data_xfer_wait_for_completion(struct mxic_ecc_engine *mxic)
+{
+ u32 val;
+ int ret;
+
+ if (mxic->irq) {
+ reinit_completion(&mxic->complete);
+ mxic_ecc_enable_int(mxic);
+ ret = wait_for_completion_timeout(&mxic->complete,
+ msecs_to_jiffies(1000));
+ ret = ret ? 0 : -ETIMEDOUT;
+ mxic_ecc_disable_int(mxic);
+ } else {
+ ret = readl_poll_timeout(mxic->regs + INTRPT_STS, val,
+ val & TRANS_CMPLT, 10, USEC_PER_SEC);
+ writel(val, mxic->regs + INTRPT_STS);
+ }
+
+ if (ret) {
+ dev_err(mxic->dev, "Timeout on data xfer completion\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int mxic_ecc_process_data(struct mxic_ecc_engine *mxic,
+ unsigned int direction)
+{
+ unsigned int dir = (direction == NAND_PAGE_READ) ?
+ READ_NAND : WRITE_NAND;
+ int ret;
+
+ mxic_ecc_enable_engine(mxic);
+
+ /* Trigger processing */
+ writel(SDMA_STRT | dir, mxic->regs + SDMA_CTRL);
+
+ /* Wait for completion */
+ ret = mxic_ecc_data_xfer_wait_for_completion(mxic);
+
+ mxic_ecc_disable_engine(mxic);
+
+ return ret;
+}
+
+int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng,
+ unsigned int direction, dma_addr_t dirmap)
+{
+ struct mxic_ecc_engine *mxic = pip_ecc_eng_to_mxic(eng);
+
+ if (dirmap)
+ writel(dirmap, mxic->regs + HC_SLV_ADDR);
+
+ return mxic_ecc_process_data(mxic, direction);
+}
+EXPORT_SYMBOL_GPL(mxic_ecc_process_data_pipelined);
+
+static void mxic_ecc_extract_status_bytes(struct mxic_ecc_ctx *ctx)
+{
+ u8 *buf = ctx->oobwithstat;
+ int next_stat_pos;
+ int step;
+
+ /* Extract the ECC status */
+ for (step = 0; step < ctx->steps; step++) {
+ next_stat_pos = ctx->oob_step_sz +
+ ((STAT_BYTES + ctx->oob_step_sz) * step);
+
+ ctx->status[step] = buf[next_stat_pos];
+ }
+}
+
+static void mxic_ecc_reconstruct_oobbuf(struct mxic_ecc_ctx *ctx,
+ u8 *dst, const u8 *src)
+{
+ int step;
+
+ /* Reconstruct the OOB buffer linearly (without the ECC status bytes) */
+ for (step = 0; step < ctx->steps; step++)
+ memcpy(dst + (step * ctx->oob_step_sz),
+ src + (step * (ctx->oob_step_sz + STAT_BYTES)),
+ ctx->oob_step_sz);
+}
+
+static void mxic_ecc_add_room_in_oobbuf(struct mxic_ecc_ctx *ctx,
+ u8 *dst, const u8 *src)
+{
+ int step;
+
+ /* Add some space in the OOB buffer for the status bytes */
+ for (step = 0; step < ctx->steps; step++)
+ memcpy(dst + (step * (ctx->oob_step_sz + STAT_BYTES)),
+ src + (step * ctx->oob_step_sz),
+ ctx->oob_step_sz);
+}
+
+static int mxic_ecc_count_biterrs(struct mxic_ecc_engine *mxic,
+ struct nand_device *nand)
+{
+ struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ struct device *dev = mxic->dev;
+ unsigned int max_bf = 0;
+ bool failure = false;
+ int step;
+
+ for (step = 0; step < ctx->steps; step++) {
+ u8 stat = ctx->status[step];
+
+ if (stat == NO_ERR) {
+ dev_dbg(dev, "ECC step %d: no error\n", step);
+ } else if (stat == ERASED_CHUNK) {
+ dev_dbg(dev, "ECC step %d: erased\n", step);
+ } else if (stat == UNCORR_ERR || stat > MAX_CORR_ERR) {
+ dev_dbg(dev, "ECC step %d: uncorrectable\n", step);
+ mtd->ecc_stats.failed++;
+ failure = true;
+ } else {
+ dev_dbg(dev, "ECC step %d: %d bits corrected\n",
+ step, stat);
+ max_bf = max_t(unsigned int, max_bf, stat);
+ mtd->ecc_stats.corrected += stat;
+ }
+ }
+
+ return failure ? -EBADMSG : max_bf;
+}
+
+/* External ECC engine helpers */
+static int mxic_ecc_prepare_io_req_external(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
+ struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ int offset, nents, step, ret;
+
+ if (req->mode == MTD_OPS_RAW)
+ return 0;
+
+ nand_ecc_tweak_req(&ctx->req_ctx, req);
+ ctx->req = req;
+
+ if (req->type == NAND_PAGE_READ)
+ return 0;
+
+ mxic_ecc_add_room_in_oobbuf(ctx, ctx->oobwithstat,
+ ctx->req->oobbuf.out);
+
+ sg_set_buf(&ctx->sg[0], req->databuf.out, req->datalen);
+ sg_set_buf(&ctx->sg[1], ctx->oobwithstat,
+ req->ooblen + (ctx->steps * STAT_BYTES));
+
+ nents = dma_map_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
+ if (!nents)
+ return -EINVAL;
+
+ mutex_lock(&mxic->lock);
+
+ for (step = 0; step < ctx->steps; step++) {
+ writel(sg_dma_address(&ctx->sg[0]) + (step * ctx->data_step_sz),
+ mxic->regs + SDMA_MAIN_ADDR);
+ writel(sg_dma_address(&ctx->sg[1]) + (step * (ctx->oob_step_sz + STAT_BYTES)),
+ mxic->regs + SDMA_SPARE_ADDR);
+ ret = mxic_ecc_process_data(mxic, ctx->req->type);
+ if (ret)
+ break;
+ }
+
+ mutex_unlock(&mxic->lock);
+
+ dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
+
+ if (ret)
+ return ret;
+
+ /* Retrieve the calculated ECC bytes */
+ for (step = 0; step < ctx->steps; step++) {
+ offset = ctx->meta_sz + (step * ctx->oob_step_sz);
+ mtd_ooblayout_get_eccbytes(mtd,
+ (u8 *)ctx->req->oobbuf.out + offset,
+ ctx->oobwithstat + (step * STAT_BYTES),
+ step * ctx->parity_sz,
+ ctx->parity_sz);
+ }
+
+ return 0;
+}
+
+static int mxic_ecc_finish_io_req_external(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
+ struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
+ int nents, step, ret;
+
+ if (req->mode == MTD_OPS_RAW)
+ return 0;
+
+ if (req->type == NAND_PAGE_WRITE) {
+ nand_ecc_restore_req(&ctx->req_ctx, req);
+ return 0;
+ }
+
+ /* Copy the OOB buffer and add room for the ECC engine status bytes */
+ mxic_ecc_add_room_in_oobbuf(ctx, ctx->oobwithstat, ctx->req->oobbuf.in);
+
+ sg_set_buf(&ctx->sg[0], req->databuf.in, req->datalen);
+ sg_set_buf(&ctx->sg[1], ctx->oobwithstat,
+ req->ooblen + (ctx->steps * STAT_BYTES));
+ nents = dma_map_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
+ if (!nents)
+ return -EINVAL;
+
+ mutex_lock(&mxic->lock);
+
+ for (step = 0; step < ctx->steps; step++) {
+ writel(sg_dma_address(&ctx->sg[0]) + (step * ctx->data_step_sz),
+ mxic->regs + SDMA_MAIN_ADDR);
+ writel(sg_dma_address(&ctx->sg[1]) + (step * (ctx->oob_step_sz + STAT_BYTES)),
+ mxic->regs + SDMA_SPARE_ADDR);
+ ret = mxic_ecc_process_data(mxic, ctx->req->type);
+ if (ret)
+ break;
+ }
+
+ mutex_unlock(&mxic->lock);
+
+ dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
+
+ if (ret) {
+ nand_ecc_restore_req(&ctx->req_ctx, req);
+ return ret;
+ }
+
+ /* Extract the status bytes and reconstruct the buffer */
+ mxic_ecc_extract_status_bytes(ctx);
+ mxic_ecc_reconstruct_oobbuf(ctx, ctx->req->oobbuf.in, ctx->oobwithstat);
+
+ nand_ecc_restore_req(&ctx->req_ctx, req);
+
+ return mxic_ecc_count_biterrs(mxic, nand);
+}
+
+/* Pipelined ECC engine helpers */
+static int mxic_ecc_prepare_io_req_pipelined(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
+ struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
+ int nents;
+
+ if (req->mode == MTD_OPS_RAW)
+ return 0;
+
+ nand_ecc_tweak_req(&ctx->req_ctx, req);
+ ctx->req = req;
+
+ /* Copy the OOB buffer and add room for the ECC engine status bytes */
+ mxic_ecc_add_room_in_oobbuf(ctx, ctx->oobwithstat, ctx->req->oobbuf.in);
+
+ sg_set_buf(&ctx->sg[0], req->databuf.in, req->datalen);
+ sg_set_buf(&ctx->sg[1], ctx->oobwithstat,
+ req->ooblen + (ctx->steps * STAT_BYTES));
+
+ nents = dma_map_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
+ if (!nents)
+ return -EINVAL;
+
+ mutex_lock(&mxic->lock);
+
+ writel(sg_dma_address(&ctx->sg[0]), mxic->regs + SDMA_MAIN_ADDR);
+ writel(sg_dma_address(&ctx->sg[1]), mxic->regs + SDMA_SPARE_ADDR);
+
+ return 0;
+}
+
+static int mxic_ecc_finish_io_req_pipelined(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
+ struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
+ int ret = 0;
+
+ if (req->mode == MTD_OPS_RAW)
+ return 0;
+
+ mutex_unlock(&mxic->lock);
+
+ dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
+
+ if (req->type == NAND_PAGE_READ) {
+ mxic_ecc_extract_status_bytes(ctx);
+ mxic_ecc_reconstruct_oobbuf(ctx, ctx->req->oobbuf.in,
+ ctx->oobwithstat);
+ ret = mxic_ecc_count_biterrs(mxic, nand);
+ }
+
+ nand_ecc_restore_req(&ctx->req_ctx, req);
+
+ return ret;
+}
+
+static struct nand_ecc_engine_ops mxic_ecc_engine_external_ops = {
+ .init_ctx = mxic_ecc_init_ctx_external,
+ .cleanup_ctx = mxic_ecc_cleanup_ctx,
+ .prepare_io_req = mxic_ecc_prepare_io_req_external,
+ .finish_io_req = mxic_ecc_finish_io_req_external,
+};
+
+static struct nand_ecc_engine_ops mxic_ecc_engine_pipelined_ops = {
+ .init_ctx = mxic_ecc_init_ctx_pipelined,
+ .cleanup_ctx = mxic_ecc_cleanup_ctx,
+ .prepare_io_req = mxic_ecc_prepare_io_req_pipelined,
+ .finish_io_req = mxic_ecc_finish_io_req_pipelined,
+};
+
+struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void)
+{
+ return &mxic_ecc_engine_pipelined_ops;
+}
+EXPORT_SYMBOL_GPL(mxic_ecc_get_pipelined_ops);
+
+static struct platform_device *
+mxic_ecc_get_pdev(struct platform_device *spi_pdev)
+{
+ struct platform_device *eng_pdev;
+ struct device_node *np;
+
+ /* Retrieve the nand-ecc-engine phandle */
+ np = of_parse_phandle(spi_pdev->dev.of_node, "nand-ecc-engine", 0);
+ if (!np)
+ return NULL;
+
+ /* Jump to the engine's device node */
+ eng_pdev = of_find_device_by_node(np);
+ of_node_put(np);
+
+ return eng_pdev;
+}
+
+void mxic_ecc_put_pipelined_engine(struct nand_ecc_engine *eng)
+{
+ struct mxic_ecc_engine *mxic = pip_ecc_eng_to_mxic(eng);
+
+ platform_device_put(to_platform_device(mxic->dev));
+}
+EXPORT_SYMBOL_GPL(mxic_ecc_put_pipelined_engine);
+
+struct nand_ecc_engine *
+mxic_ecc_get_pipelined_engine(struct platform_device *spi_pdev)
+{
+ struct platform_device *eng_pdev;
+ struct mxic_ecc_engine *mxic;
+
+ eng_pdev = mxic_ecc_get_pdev(spi_pdev);
+ if (!eng_pdev)
+ return ERR_PTR(-ENODEV);
+
+ mxic = platform_get_drvdata(eng_pdev);
+ if (!mxic) {
+ platform_device_put(eng_pdev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ return &mxic->pipelined_engine;
+}
+EXPORT_SYMBOL_GPL(mxic_ecc_get_pipelined_engine);
+
+/*
+ * Only the external ECC engine is exported as the pipelined is SoC specific, so
+ * it is registered directly by the drivers that wrap it.
+ */
+static int mxic_ecc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mxic_ecc_engine *mxic;
+ int ret;
+
+ mxic = devm_kzalloc(&pdev->dev, sizeof(*mxic), GFP_KERNEL);
+ if (!mxic)
+ return -ENOMEM;
+
+ mxic->dev = &pdev->dev;
+
+ /*
+ * Both memory regions for the ECC engine itself and the AXI slave
+ * address are mandatory.
+ */
+ mxic->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mxic->regs)) {
+ dev_err(&pdev->dev, "Missing memory region\n");
+ return PTR_ERR(mxic->regs);
+ }
+
+ mxic_ecc_disable_engine(mxic);
+ mxic_ecc_disable_int(mxic);
+
+ /* IRQ is optional yet much more efficient */
+ mxic->irq = platform_get_irq_byname_optional(pdev, "ecc-engine");
+ if (mxic->irq > 0) {
+ ret = devm_request_irq(&pdev->dev, mxic->irq, mxic_ecc_isr, 0,
+ "mxic-ecc", mxic);
+ if (ret)
+ return ret;
+ } else {
+ dev_info(dev, "Invalid or missing IRQ, fallback to polling\n");
+ mxic->irq = 0;
+ }
+
+ mutex_init(&mxic->lock);
+
+ /*
+ * In external mode, the device is the ECC engine. In pipelined mode,
+ * the device is the host controller. The device is used to match the
+ * right ECC engine based on the DT properties.
+ */
+ mxic->external_engine.dev = &pdev->dev;
+ mxic->external_engine.integration = NAND_ECC_ENGINE_INTEGRATION_EXTERNAL;
+ mxic->external_engine.ops = &mxic_ecc_engine_external_ops;
+
+ nand_ecc_register_on_host_hw_engine(&mxic->external_engine);
+
+ platform_set_drvdata(pdev, mxic);
+
+ return 0;
+}
+
+static void mxic_ecc_remove(struct platform_device *pdev)
+{
+ struct mxic_ecc_engine *mxic = platform_get_drvdata(pdev);
+
+ nand_ecc_unregister_on_host_hw_engine(&mxic->external_engine);
+}
+
+static const struct of_device_id mxic_ecc_of_ids[] = {
+ {
+ .compatible = "mxicy,nand-ecc-engine-rev3",
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mxic_ecc_of_ids);
+
+static struct platform_driver mxic_ecc_driver = {
+ .driver = {
+ .name = "mxic-nand-ecc-engine",
+ .of_match_table = mxic_ecc_of_ids,
+ },
+ .probe = mxic_ecc_probe,
+ .remove_new = mxic_ecc_remove,
+};
+module_platform_driver(mxic_ecc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
+MODULE_DESCRIPTION("Macronix NAND hardware ECC controller");
diff --git a/drivers/mtd/nand/ecc-sw-bch.c b/drivers/mtd/nand/ecc-sw-bch.c
new file mode 100644
index 0000000000..405552d014
--- /dev/null
+++ b/drivers/mtd/nand/ecc-sw-bch.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * This file provides ECC correction for more than 1 bit per block of data,
+ * using binary BCH codes. It relies on the generic BCH library lib/bch.c.
+ *
+ * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com>
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand-ecc-sw-bch.h>
+
+/**
+ * nand_ecc_sw_bch_calculate - Calculate the ECC corresponding to a data block
+ * @nand: NAND device
+ * @buf: Input buffer with raw data
+ * @code: Output buffer with ECC
+ */
+int nand_ecc_sw_bch_calculate(struct nand_device *nand,
+ const unsigned char *buf, unsigned char *code)
+{
+ struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
+ unsigned int i;
+
+ memset(code, 0, engine_conf->code_size);
+ bch_encode(engine_conf->bch, buf, nand->ecc.ctx.conf.step_size, code);
+
+ /* apply mask so that an erased page is a valid codeword */
+ for (i = 0; i < engine_conf->code_size; i++)
+ code[i] ^= engine_conf->eccmask[i];
+
+ return 0;
+}
+EXPORT_SYMBOL(nand_ecc_sw_bch_calculate);
+
+/**
+ * nand_ecc_sw_bch_correct - Detect, correct and report bit error(s)
+ * @nand: NAND device
+ * @buf: Raw data read from the chip
+ * @read_ecc: ECC bytes from the chip
+ * @calc_ecc: ECC calculated from the raw data
+ *
+ * Detect and correct bit errors for a data block.
+ */
+int nand_ecc_sw_bch_correct(struct nand_device *nand, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+ struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
+ unsigned int step_size = nand->ecc.ctx.conf.step_size;
+ unsigned int *errloc = engine_conf->errloc;
+ int i, count;
+
+ count = bch_decode(engine_conf->bch, NULL, step_size, read_ecc,
+ calc_ecc, NULL, errloc);
+ if (count > 0) {
+ for (i = 0; i < count; i++) {
+ if (errloc[i] < (step_size * 8))
+ /* The error is in the data area: correct it */
+ buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7));
+
+ /* Otherwise the error is in the ECC area: nothing to do */
+ pr_debug("%s: corrected bitflip %u\n", __func__,
+ errloc[i]);
+ }
+ } else if (count < 0) {
+ pr_err("ECC unrecoverable error\n");
+ count = -EBADMSG;
+ }
+
+ return count;
+}
+EXPORT_SYMBOL(nand_ecc_sw_bch_correct);
+
+/**
+ * nand_ecc_sw_bch_cleanup - Cleanup software BCH ECC resources
+ * @nand: NAND device
+ */
+static void nand_ecc_sw_bch_cleanup(struct nand_device *nand)
+{
+ struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
+
+ bch_free(engine_conf->bch);
+ kfree(engine_conf->errloc);
+ kfree(engine_conf->eccmask);
+}
+
+/**
+ * nand_ecc_sw_bch_init - Initialize software BCH ECC engine
+ * @nand: NAND device
+ *
+ * Returns: a pointer to a new NAND BCH control structure, or NULL upon failure
+ *
+ * Initialize NAND BCH error correction. @nand.ecc parameters 'step_size' and
+ * 'bytes' are used to compute the following BCH parameters:
+ * m, the Galois field order
+ * t, the error correction capability
+ * 'bytes' should be equal to the number of bytes required to store m * t
+ * bits, where m is such that 2^m - 1 > step_size * 8.
+ *
+ * Example: to configure 4 bit correction per 512 bytes, you should pass
+ * step_size = 512 (thus, m = 13 is the smallest integer such that 2^m - 1 > 512 * 8)
+ * bytes = 7 (7 bytes are required to store m * t = 13 * 4 = 52 bits)
+ */
+static int nand_ecc_sw_bch_init(struct nand_device *nand)
+{
+ struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
+ unsigned int eccsize = nand->ecc.ctx.conf.step_size;
+ unsigned int eccbytes = engine_conf->code_size;
+ unsigned int m, t, i;
+ unsigned char *erased_page;
+ int ret;
+
+ m = fls(1 + (8 * eccsize));
+ t = (eccbytes * 8) / m;
+
+ engine_conf->bch = bch_init(m, t, 0, false);
+ if (!engine_conf->bch)
+ return -EINVAL;
+
+ engine_conf->eccmask = kzalloc(eccbytes, GFP_KERNEL);
+ engine_conf->errloc = kmalloc_array(t, sizeof(*engine_conf->errloc),
+ GFP_KERNEL);
+ if (!engine_conf->eccmask || !engine_conf->errloc) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ /* Compute and store the inverted ECC of an erased step */
+ erased_page = kmalloc(eccsize, GFP_KERNEL);
+ if (!erased_page) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ memset(erased_page, 0xff, eccsize);
+ bch_encode(engine_conf->bch, erased_page, eccsize,
+ engine_conf->eccmask);
+ kfree(erased_page);
+
+ for (i = 0; i < eccbytes; i++)
+ engine_conf->eccmask[i] ^= 0xff;
+
+ /* Verify that the number of code bytes has the expected value */
+ if (engine_conf->bch->ecc_bytes != eccbytes) {
+ pr_err("Invalid number of ECC bytes: %u, expected: %u\n",
+ eccbytes, engine_conf->bch->ecc_bytes);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /* Sanity checks */
+ if (8 * (eccsize + eccbytes) >= (1 << m)) {
+ pr_err("ECC step size is too large (%u)\n", eccsize);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ nand_ecc_sw_bch_cleanup(nand);
+
+ return ret;
+}
+
+int nand_ecc_sw_bch_init_ctx(struct nand_device *nand)
+{
+ struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ struct nand_ecc_sw_bch_conf *engine_conf;
+ unsigned int code_size = 0, nsteps;
+ int ret;
+
+ /* Only large page NAND chips may use BCH */
+ if (mtd->oobsize < 64) {
+ pr_err("BCH cannot be used with small page NAND chips\n");
+ return -EINVAL;
+ }
+
+ if (!mtd->ooblayout)
+ mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
+
+ conf->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ conf->algo = NAND_ECC_ALGO_BCH;
+ conf->step_size = nand->ecc.user_conf.step_size;
+ conf->strength = nand->ecc.user_conf.strength;
+
+ /*
+ * Board driver should supply ECC size and ECC strength
+ * values to select how many bits are correctable.
+ * Otherwise, default to 512 bytes for large page devices and 256 for
+ * small page devices.
+ */
+ if (!conf->step_size) {
+ if (mtd->oobsize >= 64)
+ conf->step_size = 512;
+ else
+ conf->step_size = 256;
+
+ conf->strength = 4;
+ }
+
+ nsteps = mtd->writesize / conf->step_size;
+
+ /* Maximize */
+ if (nand->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH) {
+ conf->step_size = 1024;
+ nsteps = mtd->writesize / conf->step_size;
+ /* Reserve 2 bytes for the BBM */
+ code_size = (mtd->oobsize - 2) / nsteps;
+ conf->strength = code_size * 8 / fls(8 * conf->step_size);
+ }
+
+ if (!code_size)
+ code_size = DIV_ROUND_UP(conf->strength *
+ fls(8 * conf->step_size), 8);
+
+ if (!conf->strength)
+ conf->strength = (code_size * 8) / fls(8 * conf->step_size);
+
+ if (!code_size && !conf->strength) {
+ pr_err("Missing ECC parameters\n");
+ return -EINVAL;
+ }
+
+ engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL);
+ if (!engine_conf)
+ return -ENOMEM;
+
+ ret = nand_ecc_init_req_tweaking(&engine_conf->req_ctx, nand);
+ if (ret)
+ goto free_engine_conf;
+
+ engine_conf->code_size = code_size;
+ engine_conf->calc_buf = kzalloc(mtd->oobsize, GFP_KERNEL);
+ engine_conf->code_buf = kzalloc(mtd->oobsize, GFP_KERNEL);
+ if (!engine_conf->calc_buf || !engine_conf->code_buf) {
+ ret = -ENOMEM;
+ goto free_bufs;
+ }
+
+ nand->ecc.ctx.priv = engine_conf;
+ nand->ecc.ctx.nsteps = nsteps;
+ nand->ecc.ctx.total = nsteps * code_size;
+
+ ret = nand_ecc_sw_bch_init(nand);
+ if (ret)
+ goto free_bufs;
+
+ /* Verify the layout validity */
+ if (mtd_ooblayout_count_eccbytes(mtd) !=
+ nand->ecc.ctx.nsteps * engine_conf->code_size) {
+ pr_err("Invalid ECC layout\n");
+ ret = -EINVAL;
+ goto cleanup_bch_ctx;
+ }
+
+ return 0;
+
+cleanup_bch_ctx:
+ nand_ecc_sw_bch_cleanup(nand);
+free_bufs:
+ nand_ecc_cleanup_req_tweaking(&engine_conf->req_ctx);
+ kfree(engine_conf->calc_buf);
+ kfree(engine_conf->code_buf);
+free_engine_conf:
+ kfree(engine_conf);
+
+ return ret;
+}
+EXPORT_SYMBOL(nand_ecc_sw_bch_init_ctx);
+
+void nand_ecc_sw_bch_cleanup_ctx(struct nand_device *nand)
+{
+ struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
+
+ if (engine_conf) {
+ nand_ecc_sw_bch_cleanup(nand);
+ nand_ecc_cleanup_req_tweaking(&engine_conf->req_ctx);
+ kfree(engine_conf->calc_buf);
+ kfree(engine_conf->code_buf);
+ kfree(engine_conf);
+ }
+}
+EXPORT_SYMBOL(nand_ecc_sw_bch_cleanup_ctx);
+
+static int nand_ecc_sw_bch_prepare_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ int eccsize = nand->ecc.ctx.conf.step_size;
+ int eccbytes = engine_conf->code_size;
+ int eccsteps = nand->ecc.ctx.nsteps;
+ int total = nand->ecc.ctx.total;
+ u8 *ecccalc = engine_conf->calc_buf;
+ const u8 *data;
+ int i;
+
+ /* Nothing to do for a raw operation */
+ if (req->mode == MTD_OPS_RAW)
+ return 0;
+
+ /* This engine does not provide BBM/free OOB bytes protection */
+ if (!req->datalen)
+ return 0;
+
+ nand_ecc_tweak_req(&engine_conf->req_ctx, req);
+
+ /* No more preparation for page read */
+ if (req->type == NAND_PAGE_READ)
+ return 0;
+
+ /* Preparation for page write: derive the ECC bytes and place them */
+ for (i = 0, data = req->databuf.out;
+ eccsteps;
+ eccsteps--, i += eccbytes, data += eccsize)
+ nand_ecc_sw_bch_calculate(nand, data, &ecccalc[i]);
+
+ return mtd_ooblayout_set_eccbytes(mtd, ecccalc, (void *)req->oobbuf.out,
+ 0, total);
+}
+
+static int nand_ecc_sw_bch_finish_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ int eccsize = nand->ecc.ctx.conf.step_size;
+ int total = nand->ecc.ctx.total;
+ int eccbytes = engine_conf->code_size;
+ int eccsteps = nand->ecc.ctx.nsteps;
+ u8 *ecccalc = engine_conf->calc_buf;
+ u8 *ecccode = engine_conf->code_buf;
+ unsigned int max_bitflips = 0;
+ u8 *data = req->databuf.in;
+ int i, ret;
+
+ /* Nothing to do for a raw operation */
+ if (req->mode == MTD_OPS_RAW)
+ return 0;
+
+ /* This engine does not provide BBM/free OOB bytes protection */
+ if (!req->datalen)
+ return 0;
+
+ /* No more preparation for page write */
+ if (req->type == NAND_PAGE_WRITE) {
+ nand_ecc_restore_req(&engine_conf->req_ctx, req);
+ return 0;
+ }
+
+ /* Finish a page read: retrieve the (raw) ECC bytes*/
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecccode, req->oobbuf.in, 0,
+ total);
+ if (ret)
+ return ret;
+
+ /* Calculate the ECC bytes */
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, data += eccsize)
+ nand_ecc_sw_bch_calculate(nand, data, &ecccalc[i]);
+
+ /* Finish a page read: compare and correct */
+ for (eccsteps = nand->ecc.ctx.nsteps, i = 0, data = req->databuf.in;
+ eccsteps;
+ eccsteps--, i += eccbytes, data += eccsize) {
+ int stat = nand_ecc_sw_bch_correct(nand, data,
+ &ecccode[i],
+ &ecccalc[i]);
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+
+ nand_ecc_restore_req(&engine_conf->req_ctx, req);
+
+ return max_bitflips;
+}
+
+static struct nand_ecc_engine_ops nand_ecc_sw_bch_engine_ops = {
+ .init_ctx = nand_ecc_sw_bch_init_ctx,
+ .cleanup_ctx = nand_ecc_sw_bch_cleanup_ctx,
+ .prepare_io_req = nand_ecc_sw_bch_prepare_io_req,
+ .finish_io_req = nand_ecc_sw_bch_finish_io_req,
+};
+
+static struct nand_ecc_engine nand_ecc_sw_bch_engine = {
+ .ops = &nand_ecc_sw_bch_engine_ops,
+};
+
+struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void)
+{
+ return &nand_ecc_sw_bch_engine;
+}
+EXPORT_SYMBOL(nand_ecc_sw_bch_get_engine);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ivan Djelic <ivan.djelic@parrot.com>");
+MODULE_DESCRIPTION("NAND software BCH ECC support");
diff --git a/drivers/mtd/nand/ecc-sw-hamming.c b/drivers/mtd/nand/ecc-sw-hamming.c
new file mode 100644
index 0000000000..254db2e7f8
--- /dev/null
+++ b/drivers/mtd/nand/ecc-sw-hamming.c
@@ -0,0 +1,660 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * This file contains an ECC algorithm that detects and corrects 1 bit
+ * errors in a 256 byte block of data.
+ *
+ * Copyright © 2008 Koninklijke Philips Electronics NV.
+ * Author: Frans Meulenbroeks
+ *
+ * Completely replaces the previous ECC implementation which was written by:
+ * Steven J. Hill (sjhill@realitydiluted.com)
+ * Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Information on how this algorithm works and how it was developed
+ * can be found in Documentation/driver-api/mtd/nand_ecc.rst
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+
+/*
+ * invparity is a 256 byte table that contains the odd parity
+ * for each byte. So if the number of bits in a byte is even,
+ * the array element is 1, and when the number of bits is odd
+ * the array eleemnt is 0.
+ */
+static const char invparity[256] = {
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1
+};
+
+/*
+ * bitsperbyte contains the number of bits per byte
+ * this is only used for testing and repairing parity
+ * (a precalculated value slightly improves performance)
+ */
+static const char bitsperbyte[256] = {
+ 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
+};
+
+/*
+ * addressbits is a lookup table to filter out the bits from the xor-ed
+ * ECC data that identify the faulty location.
+ * this is only used for repairing parity
+ * see the comments in nand_ecc_sw_hamming_correct for more details
+ */
+static const char addressbits[256] = {
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+ 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+ 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+ 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+ 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+ 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+ 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+ 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+ 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+ 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+ 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+ 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+ 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+ 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+ 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+ 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+ 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+ 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+ 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
+ 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+ 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
+ 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+ 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+ 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+ 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+ 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+ 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
+ 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+ 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f
+};
+
+int ecc_sw_hamming_calculate(const unsigned char *buf, unsigned int step_size,
+ unsigned char *code, bool sm_order)
+{
+ const u32 *bp = (uint32_t *)buf;
+ const u32 eccsize_mult = (step_size == 256) ? 1 : 2;
+ /* current value in buffer */
+ u32 cur;
+ /* rp0..rp17 are the various accumulated parities (per byte) */
+ u32 rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7, rp8, rp9, rp10, rp11, rp12,
+ rp13, rp14, rp15, rp16, rp17;
+ /* Cumulative parity for all data */
+ u32 par;
+ /* Cumulative parity at the end of the loop (rp12, rp14, rp16) */
+ u32 tmppar;
+ int i;
+
+ par = 0;
+ rp4 = 0;
+ rp6 = 0;
+ rp8 = 0;
+ rp10 = 0;
+ rp12 = 0;
+ rp14 = 0;
+ rp16 = 0;
+ rp17 = 0;
+
+ /*
+ * The loop is unrolled a number of times;
+ * This avoids if statements to decide on which rp value to update
+ * Also we process the data by longwords.
+ * Note: passing unaligned data might give a performance penalty.
+ * It is assumed that the buffers are aligned.
+ * tmppar is the cumulative sum of this iteration.
+ * needed for calculating rp12, rp14, rp16 and par
+ * also used as a performance improvement for rp6, rp8 and rp10
+ */
+ for (i = 0; i < eccsize_mult << 2; i++) {
+ cur = *bp++;
+ tmppar = cur;
+ rp4 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp6 ^= tmppar;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp8 ^= tmppar;
+
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ rp6 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp6 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp10 ^= tmppar;
+
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ rp6 ^= cur;
+ rp8 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp6 ^= cur;
+ rp8 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ rp8 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp8 ^= cur;
+
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ rp6 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp6 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+
+ par ^= tmppar;
+ if ((i & 0x1) == 0)
+ rp12 ^= tmppar;
+ if ((i & 0x2) == 0)
+ rp14 ^= tmppar;
+ if (eccsize_mult == 2 && (i & 0x4) == 0)
+ rp16 ^= tmppar;
+ }
+
+ /*
+ * handle the fact that we use longword operations
+ * we'll bring rp4..rp14..rp16 back to single byte entities by
+ * shifting and xoring first fold the upper and lower 16 bits,
+ * then the upper and lower 8 bits.
+ */
+ rp4 ^= (rp4 >> 16);
+ rp4 ^= (rp4 >> 8);
+ rp4 &= 0xff;
+ rp6 ^= (rp6 >> 16);
+ rp6 ^= (rp6 >> 8);
+ rp6 &= 0xff;
+ rp8 ^= (rp8 >> 16);
+ rp8 ^= (rp8 >> 8);
+ rp8 &= 0xff;
+ rp10 ^= (rp10 >> 16);
+ rp10 ^= (rp10 >> 8);
+ rp10 &= 0xff;
+ rp12 ^= (rp12 >> 16);
+ rp12 ^= (rp12 >> 8);
+ rp12 &= 0xff;
+ rp14 ^= (rp14 >> 16);
+ rp14 ^= (rp14 >> 8);
+ rp14 &= 0xff;
+ if (eccsize_mult == 2) {
+ rp16 ^= (rp16 >> 16);
+ rp16 ^= (rp16 >> 8);
+ rp16 &= 0xff;
+ }
+
+ /*
+ * we also need to calculate the row parity for rp0..rp3
+ * This is present in par, because par is now
+ * rp3 rp3 rp2 rp2 in little endian and
+ * rp2 rp2 rp3 rp3 in big endian
+ * as well as
+ * rp1 rp0 rp1 rp0 in little endian and
+ * rp0 rp1 rp0 rp1 in big endian
+ * First calculate rp2 and rp3
+ */
+#ifdef __BIG_ENDIAN
+ rp2 = (par >> 16);
+ rp2 ^= (rp2 >> 8);
+ rp2 &= 0xff;
+ rp3 = par & 0xffff;
+ rp3 ^= (rp3 >> 8);
+ rp3 &= 0xff;
+#else
+ rp3 = (par >> 16);
+ rp3 ^= (rp3 >> 8);
+ rp3 &= 0xff;
+ rp2 = par & 0xffff;
+ rp2 ^= (rp2 >> 8);
+ rp2 &= 0xff;
+#endif
+
+ /* reduce par to 16 bits then calculate rp1 and rp0 */
+ par ^= (par >> 16);
+#ifdef __BIG_ENDIAN
+ rp0 = (par >> 8) & 0xff;
+ rp1 = (par & 0xff);
+#else
+ rp1 = (par >> 8) & 0xff;
+ rp0 = (par & 0xff);
+#endif
+
+ /* finally reduce par to 8 bits */
+ par ^= (par >> 8);
+ par &= 0xff;
+
+ /*
+ * and calculate rp5..rp15..rp17
+ * note that par = rp4 ^ rp5 and due to the commutative property
+ * of the ^ operator we can say:
+ * rp5 = (par ^ rp4);
+ * The & 0xff seems superfluous, but benchmarking learned that
+ * leaving it out gives slightly worse results. No idea why, probably
+ * it has to do with the way the pipeline in pentium is organized.
+ */
+ rp5 = (par ^ rp4) & 0xff;
+ rp7 = (par ^ rp6) & 0xff;
+ rp9 = (par ^ rp8) & 0xff;
+ rp11 = (par ^ rp10) & 0xff;
+ rp13 = (par ^ rp12) & 0xff;
+ rp15 = (par ^ rp14) & 0xff;
+ if (eccsize_mult == 2)
+ rp17 = (par ^ rp16) & 0xff;
+
+ /*
+ * Finally calculate the ECC bits.
+ * Again here it might seem that there are performance optimisations
+ * possible, but benchmarks showed that on the system this is developed
+ * the code below is the fastest
+ */
+ if (sm_order) {
+ code[0] = (invparity[rp7] << 7) | (invparity[rp6] << 6) |
+ (invparity[rp5] << 5) | (invparity[rp4] << 4) |
+ (invparity[rp3] << 3) | (invparity[rp2] << 2) |
+ (invparity[rp1] << 1) | (invparity[rp0]);
+ code[1] = (invparity[rp15] << 7) | (invparity[rp14] << 6) |
+ (invparity[rp13] << 5) | (invparity[rp12] << 4) |
+ (invparity[rp11] << 3) | (invparity[rp10] << 2) |
+ (invparity[rp9] << 1) | (invparity[rp8]);
+ } else {
+ code[1] = (invparity[rp7] << 7) | (invparity[rp6] << 6) |
+ (invparity[rp5] << 5) | (invparity[rp4] << 4) |
+ (invparity[rp3] << 3) | (invparity[rp2] << 2) |
+ (invparity[rp1] << 1) | (invparity[rp0]);
+ code[0] = (invparity[rp15] << 7) | (invparity[rp14] << 6) |
+ (invparity[rp13] << 5) | (invparity[rp12] << 4) |
+ (invparity[rp11] << 3) | (invparity[rp10] << 2) |
+ (invparity[rp9] << 1) | (invparity[rp8]);
+ }
+
+ if (eccsize_mult == 1)
+ code[2] =
+ (invparity[par & 0xf0] << 7) |
+ (invparity[par & 0x0f] << 6) |
+ (invparity[par & 0xcc] << 5) |
+ (invparity[par & 0x33] << 4) |
+ (invparity[par & 0xaa] << 3) |
+ (invparity[par & 0x55] << 2) |
+ 3;
+ else
+ code[2] =
+ (invparity[par & 0xf0] << 7) |
+ (invparity[par & 0x0f] << 6) |
+ (invparity[par & 0xcc] << 5) |
+ (invparity[par & 0x33] << 4) |
+ (invparity[par & 0xaa] << 3) |
+ (invparity[par & 0x55] << 2) |
+ (invparity[rp17] << 1) |
+ (invparity[rp16] << 0);
+
+ return 0;
+}
+EXPORT_SYMBOL(ecc_sw_hamming_calculate);
+
+/**
+ * nand_ecc_sw_hamming_calculate - Calculate 3-byte ECC for 256/512-byte block
+ * @nand: NAND device
+ * @buf: Input buffer with raw data
+ * @code: Output buffer with ECC
+ */
+int nand_ecc_sw_hamming_calculate(struct nand_device *nand,
+ const unsigned char *buf, unsigned char *code)
+{
+ struct nand_ecc_sw_hamming_conf *engine_conf = nand->ecc.ctx.priv;
+ unsigned int step_size = nand->ecc.ctx.conf.step_size;
+ bool sm_order = engine_conf ? engine_conf->sm_order : false;
+
+ return ecc_sw_hamming_calculate(buf, step_size, code, sm_order);
+}
+EXPORT_SYMBOL(nand_ecc_sw_hamming_calculate);
+
+int ecc_sw_hamming_correct(unsigned char *buf, unsigned char *read_ecc,
+ unsigned char *calc_ecc, unsigned int step_size,
+ bool sm_order)
+{
+ const u32 eccsize_mult = step_size >> 8;
+ unsigned char b0, b1, b2, bit_addr;
+ unsigned int byte_addr;
+
+ /*
+ * b0 to b2 indicate which bit is faulty (if any)
+ * we might need the xor result more than once,
+ * so keep them in a local var
+ */
+ if (sm_order) {
+ b0 = read_ecc[0] ^ calc_ecc[0];
+ b1 = read_ecc[1] ^ calc_ecc[1];
+ } else {
+ b0 = read_ecc[1] ^ calc_ecc[1];
+ b1 = read_ecc[0] ^ calc_ecc[0];
+ }
+
+ b2 = read_ecc[2] ^ calc_ecc[2];
+
+ /* check if there are any bitfaults */
+
+ /* repeated if statements are slightly more efficient than switch ... */
+ /* ordered in order of likelihood */
+
+ if ((b0 | b1 | b2) == 0)
+ return 0; /* no error */
+
+ if ((((b0 ^ (b0 >> 1)) & 0x55) == 0x55) &&
+ (((b1 ^ (b1 >> 1)) & 0x55) == 0x55) &&
+ ((eccsize_mult == 1 && ((b2 ^ (b2 >> 1)) & 0x54) == 0x54) ||
+ (eccsize_mult == 2 && ((b2 ^ (b2 >> 1)) & 0x55) == 0x55))) {
+ /* single bit error */
+ /*
+ * rp17/rp15/13/11/9/7/5/3/1 indicate which byte is the faulty
+ * byte, cp 5/3/1 indicate the faulty bit.
+ * A lookup table (called addressbits) is used to filter
+ * the bits from the byte they are in.
+ * A marginal optimisation is possible by having three
+ * different lookup tables.
+ * One as we have now (for b0), one for b2
+ * (that would avoid the >> 1), and one for b1 (with all values
+ * << 4). However it was felt that introducing two more tables
+ * hardly justify the gain.
+ *
+ * The b2 shift is there to get rid of the lowest two bits.
+ * We could also do addressbits[b2] >> 1 but for the
+ * performance it does not make any difference
+ */
+ if (eccsize_mult == 1)
+ byte_addr = (addressbits[b1] << 4) + addressbits[b0];
+ else
+ byte_addr = (addressbits[b2 & 0x3] << 8) +
+ (addressbits[b1] << 4) + addressbits[b0];
+ bit_addr = addressbits[b2 >> 2];
+ /* flip the bit */
+ buf[byte_addr] ^= (1 << bit_addr);
+ return 1;
+
+ }
+ /* count nr of bits; use table lookup, faster than calculating it */
+ if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
+ return 1; /* error in ECC data; no action needed */
+
+ pr_err("%s: uncorrectable ECC error\n", __func__);
+ return -EBADMSG;
+}
+EXPORT_SYMBOL(ecc_sw_hamming_correct);
+
+/**
+ * nand_ecc_sw_hamming_correct - Detect and correct bit error(s)
+ * @nand: NAND device
+ * @buf: Raw data read from the chip
+ * @read_ecc: ECC bytes read from the chip
+ * @calc_ecc: ECC calculated from the raw data
+ *
+ * Detect and correct up to 1 bit error per 256/512-byte block.
+ */
+int nand_ecc_sw_hamming_correct(struct nand_device *nand, unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ struct nand_ecc_sw_hamming_conf *engine_conf = nand->ecc.ctx.priv;
+ unsigned int step_size = nand->ecc.ctx.conf.step_size;
+ bool sm_order = engine_conf ? engine_conf->sm_order : false;
+
+ return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc, step_size,
+ sm_order);
+}
+EXPORT_SYMBOL(nand_ecc_sw_hamming_correct);
+
+int nand_ecc_sw_hamming_init_ctx(struct nand_device *nand)
+{
+ struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
+ struct nand_ecc_sw_hamming_conf *engine_conf;
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ int ret;
+
+ if (!mtd->ooblayout) {
+ switch (mtd->oobsize) {
+ case 8:
+ case 16:
+ mtd_set_ooblayout(mtd, nand_get_small_page_ooblayout());
+ break;
+ case 64:
+ case 128:
+ mtd_set_ooblayout(mtd,
+ nand_get_large_page_hamming_ooblayout());
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+ }
+
+ conf->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ conf->algo = NAND_ECC_ALGO_HAMMING;
+ conf->step_size = nand->ecc.user_conf.step_size;
+ conf->strength = 1;
+
+ /* Use the strongest configuration by default */
+ if (conf->step_size != 256 && conf->step_size != 512)
+ conf->step_size = 256;
+
+ engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL);
+ if (!engine_conf)
+ return -ENOMEM;
+
+ ret = nand_ecc_init_req_tweaking(&engine_conf->req_ctx, nand);
+ if (ret)
+ goto free_engine_conf;
+
+ engine_conf->code_size = 3;
+ engine_conf->calc_buf = kzalloc(mtd->oobsize, GFP_KERNEL);
+ engine_conf->code_buf = kzalloc(mtd->oobsize, GFP_KERNEL);
+ if (!engine_conf->calc_buf || !engine_conf->code_buf) {
+ ret = -ENOMEM;
+ goto free_bufs;
+ }
+
+ nand->ecc.ctx.priv = engine_conf;
+ nand->ecc.ctx.nsteps = mtd->writesize / conf->step_size;
+ nand->ecc.ctx.total = nand->ecc.ctx.nsteps * engine_conf->code_size;
+
+ return 0;
+
+free_bufs:
+ nand_ecc_cleanup_req_tweaking(&engine_conf->req_ctx);
+ kfree(engine_conf->calc_buf);
+ kfree(engine_conf->code_buf);
+free_engine_conf:
+ kfree(engine_conf);
+
+ return ret;
+}
+EXPORT_SYMBOL(nand_ecc_sw_hamming_init_ctx);
+
+void nand_ecc_sw_hamming_cleanup_ctx(struct nand_device *nand)
+{
+ struct nand_ecc_sw_hamming_conf *engine_conf = nand->ecc.ctx.priv;
+
+ if (engine_conf) {
+ nand_ecc_cleanup_req_tweaking(&engine_conf->req_ctx);
+ kfree(engine_conf->calc_buf);
+ kfree(engine_conf->code_buf);
+ kfree(engine_conf);
+ }
+}
+EXPORT_SYMBOL(nand_ecc_sw_hamming_cleanup_ctx);
+
+static int nand_ecc_sw_hamming_prepare_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct nand_ecc_sw_hamming_conf *engine_conf = nand->ecc.ctx.priv;
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ int eccsize = nand->ecc.ctx.conf.step_size;
+ int eccbytes = engine_conf->code_size;
+ int eccsteps = nand->ecc.ctx.nsteps;
+ int total = nand->ecc.ctx.total;
+ u8 *ecccalc = engine_conf->calc_buf;
+ const u8 *data;
+ int i;
+
+ /* Nothing to do for a raw operation */
+ if (req->mode == MTD_OPS_RAW)
+ return 0;
+
+ /* This engine does not provide BBM/free OOB bytes protection */
+ if (!req->datalen)
+ return 0;
+
+ nand_ecc_tweak_req(&engine_conf->req_ctx, req);
+
+ /* No more preparation for page read */
+ if (req->type == NAND_PAGE_READ)
+ return 0;
+
+ /* Preparation for page write: derive the ECC bytes and place them */
+ for (i = 0, data = req->databuf.out;
+ eccsteps;
+ eccsteps--, i += eccbytes, data += eccsize)
+ nand_ecc_sw_hamming_calculate(nand, data, &ecccalc[i]);
+
+ return mtd_ooblayout_set_eccbytes(mtd, ecccalc, (void *)req->oobbuf.out,
+ 0, total);
+}
+
+static int nand_ecc_sw_hamming_finish_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct nand_ecc_sw_hamming_conf *engine_conf = nand->ecc.ctx.priv;
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ int eccsize = nand->ecc.ctx.conf.step_size;
+ int total = nand->ecc.ctx.total;
+ int eccbytes = engine_conf->code_size;
+ int eccsteps = nand->ecc.ctx.nsteps;
+ u8 *ecccalc = engine_conf->calc_buf;
+ u8 *ecccode = engine_conf->code_buf;
+ unsigned int max_bitflips = 0;
+ u8 *data = req->databuf.in;
+ int i, ret;
+
+ /* Nothing to do for a raw operation */
+ if (req->mode == MTD_OPS_RAW)
+ return 0;
+
+ /* This engine does not provide BBM/free OOB bytes protection */
+ if (!req->datalen)
+ return 0;
+
+ /* No more preparation for page write */
+ if (req->type == NAND_PAGE_WRITE) {
+ nand_ecc_restore_req(&engine_conf->req_ctx, req);
+ return 0;
+ }
+
+ /* Finish a page read: retrieve the (raw) ECC bytes*/
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecccode, req->oobbuf.in, 0,
+ total);
+ if (ret)
+ return ret;
+
+ /* Calculate the ECC bytes */
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, data += eccsize)
+ nand_ecc_sw_hamming_calculate(nand, data, &ecccalc[i]);
+
+ /* Finish a page read: compare and correct */
+ for (eccsteps = nand->ecc.ctx.nsteps, i = 0, data = req->databuf.in;
+ eccsteps;
+ eccsteps--, i += eccbytes, data += eccsize) {
+ int stat = nand_ecc_sw_hamming_correct(nand, data,
+ &ecccode[i],
+ &ecccalc[i]);
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+
+ nand_ecc_restore_req(&engine_conf->req_ctx, req);
+
+ return max_bitflips;
+}
+
+static struct nand_ecc_engine_ops nand_ecc_sw_hamming_engine_ops = {
+ .init_ctx = nand_ecc_sw_hamming_init_ctx,
+ .cleanup_ctx = nand_ecc_sw_hamming_cleanup_ctx,
+ .prepare_io_req = nand_ecc_sw_hamming_prepare_io_req,
+ .finish_io_req = nand_ecc_sw_hamming_finish_io_req,
+};
+
+static struct nand_ecc_engine nand_ecc_sw_hamming_engine = {
+ .ops = &nand_ecc_sw_hamming_engine_ops,
+};
+
+struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void)
+{
+ return &nand_ecc_sw_hamming_engine;
+}
+EXPORT_SYMBOL(nand_ecc_sw_hamming_get_engine);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Frans Meulenbroeks <fransmeulenbroeks@gmail.com>");
+MODULE_DESCRIPTION("NAND software Hamming ECC support");
diff --git a/drivers/mtd/nand/ecc.c b/drivers/mtd/nand/ecc.c
new file mode 100644
index 0000000000..8f996e8d61
--- /dev/null
+++ b/drivers/mtd/nand/ecc.c
@@ -0,0 +1,735 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Generic Error-Correcting Code (ECC) engine
+ *
+ * Copyright (C) 2019 Macronix
+ * Author:
+ * Miquèl RAYNAL <miquel.raynal@bootlin.com>
+ *
+ *
+ * This file describes the abstraction of any NAND ECC engine. It has been
+ * designed to fit most cases, including parallel NANDs and SPI-NANDs.
+ *
+ * There are three main situations where instantiating this ECC engine makes
+ * sense:
+ * - external: The ECC engine is outside the NAND pipeline, typically this
+ * is a software ECC engine, or an hardware engine that is
+ * outside the NAND controller pipeline.
+ * - pipelined: The ECC engine is inside the NAND pipeline, ie. on the
+ * controller's side. This is the case of most of the raw NAND
+ * controllers. In the pipeline case, the ECC bytes are
+ * generated/data corrected on the fly when a page is
+ * written/read.
+ * - ondie: The ECC engine is inside the NAND pipeline, on the chip's side.
+ * Some NAND chips can correct themselves the data.
+ *
+ * Besides the initial setup and final cleanups, the interfaces are rather
+ * simple:
+ * - prepare: Prepare an I/O request. Enable/disable the ECC engine based on
+ * the I/O request type. In case of software correction or external
+ * engine, this step may involve to derive the ECC bytes and place
+ * them in the OOB area before a write.
+ * - finish: Finish an I/O request. Correct the data in case of a read
+ * request and report the number of corrected bits/uncorrectable
+ * errors. Most likely empty for write operations, unless you have
+ * hardware specific stuff to do, like shutting down the engine to
+ * save power.
+ *
+ * The I/O request should be enclosed in a prepare()/finish() pair of calls
+ * and will behave differently depending on the requested I/O type:
+ * - raw: Correction disabled
+ * - ecc: Correction enabled
+ *
+ * The request direction is impacting the logic as well:
+ * - read: Load data from the NAND chip
+ * - write: Store data in the NAND chip
+ *
+ * Mixing all this combinations together gives the following behavior.
+ * Those are just examples, drivers are free to add custom steps in their
+ * prepare/finish hook.
+ *
+ * [external ECC engine]
+ * - external + prepare + raw + read: do nothing
+ * - external + finish + raw + read: do nothing
+ * - external + prepare + raw + write: do nothing
+ * - external + finish + raw + write: do nothing
+ * - external + prepare + ecc + read: do nothing
+ * - external + finish + ecc + read: calculate expected ECC bytes, extract
+ * ECC bytes from OOB buffer, correct
+ * and report any bitflip/error
+ * - external + prepare + ecc + write: calculate ECC bytes and store them at
+ * the right place in the OOB buffer based
+ * on the OOB layout
+ * - external + finish + ecc + write: do nothing
+ *
+ * [pipelined ECC engine]
+ * - pipelined + prepare + raw + read: disable the controller's ECC engine if
+ * activated
+ * - pipelined + finish + raw + read: do nothing
+ * - pipelined + prepare + raw + write: disable the controller's ECC engine if
+ * activated
+ * - pipelined + finish + raw + write: do nothing
+ * - pipelined + prepare + ecc + read: enable the controller's ECC engine if
+ * deactivated
+ * - pipelined + finish + ecc + read: check the status, report any
+ * error/bitflip
+ * - pipelined + prepare + ecc + write: enable the controller's ECC engine if
+ * deactivated
+ * - pipelined + finish + ecc + write: do nothing
+ *
+ * [ondie ECC engine]
+ * - ondie + prepare + raw + read: send commands to disable the on-chip ECC
+ * engine if activated
+ * - ondie + finish + raw + read: do nothing
+ * - ondie + prepare + raw + write: send commands to disable the on-chip ECC
+ * engine if activated
+ * - ondie + finish + raw + write: do nothing
+ * - ondie + prepare + ecc + read: send commands to enable the on-chip ECC
+ * engine if deactivated
+ * - ondie + finish + ecc + read: send commands to check the status, report
+ * any error/bitflip
+ * - ondie + prepare + ecc + write: send commands to enable the on-chip ECC
+ * engine if deactivated
+ * - ondie + finish + ecc + write: do nothing
+ */
+
+#include <linux/module.h>
+#include <linux/mtd/nand.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+static LIST_HEAD(on_host_hw_engines);
+static DEFINE_MUTEX(on_host_hw_engines_mutex);
+
+/**
+ * nand_ecc_init_ctx - Init the ECC engine context
+ * @nand: the NAND device
+ *
+ * On success, the caller is responsible of calling @nand_ecc_cleanup_ctx().
+ */
+int nand_ecc_init_ctx(struct nand_device *nand)
+{
+ if (!nand->ecc.engine || !nand->ecc.engine->ops->init_ctx)
+ return 0;
+
+ return nand->ecc.engine->ops->init_ctx(nand);
+}
+EXPORT_SYMBOL(nand_ecc_init_ctx);
+
+/**
+ * nand_ecc_cleanup_ctx - Cleanup the ECC engine context
+ * @nand: the NAND device
+ */
+void nand_ecc_cleanup_ctx(struct nand_device *nand)
+{
+ if (nand->ecc.engine && nand->ecc.engine->ops->cleanup_ctx)
+ nand->ecc.engine->ops->cleanup_ctx(nand);
+}
+EXPORT_SYMBOL(nand_ecc_cleanup_ctx);
+
+/**
+ * nand_ecc_prepare_io_req - Prepare an I/O request
+ * @nand: the NAND device
+ * @req: the I/O request
+ */
+int nand_ecc_prepare_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ if (!nand->ecc.engine || !nand->ecc.engine->ops->prepare_io_req)
+ return 0;
+
+ return nand->ecc.engine->ops->prepare_io_req(nand, req);
+}
+EXPORT_SYMBOL(nand_ecc_prepare_io_req);
+
+/**
+ * nand_ecc_finish_io_req - Finish an I/O request
+ * @nand: the NAND device
+ * @req: the I/O request
+ */
+int nand_ecc_finish_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ if (!nand->ecc.engine || !nand->ecc.engine->ops->finish_io_req)
+ return 0;
+
+ return nand->ecc.engine->ops->finish_io_req(nand, req);
+}
+EXPORT_SYMBOL(nand_ecc_finish_io_req);
+
+/* Define default OOB placement schemes for large and small page devices */
+static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ unsigned int total_ecc_bytes = nand->ecc.ctx.total;
+
+ if (section > 1)
+ return -ERANGE;
+
+ if (!section) {
+ oobregion->offset = 0;
+ if (mtd->oobsize == 16)
+ oobregion->length = 4;
+ else
+ oobregion->length = 3;
+ } else {
+ if (mtd->oobsize == 8)
+ return -ERANGE;
+
+ oobregion->offset = 6;
+ oobregion->length = total_ecc_bytes - 4;
+ }
+
+ return 0;
+}
+
+static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 1)
+ return -ERANGE;
+
+ if (mtd->oobsize == 16) {
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = 8;
+ oobregion->offset = 8;
+ } else {
+ oobregion->length = 2;
+ if (!section)
+ oobregion->offset = 3;
+ else
+ oobregion->offset = 6;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
+ .ecc = nand_ooblayout_ecc_sp,
+ .free = nand_ooblayout_free_sp,
+};
+
+const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void)
+{
+ return &nand_ooblayout_sp_ops;
+}
+EXPORT_SYMBOL_GPL(nand_get_small_page_ooblayout);
+
+static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ unsigned int total_ecc_bytes = nand->ecc.ctx.total;
+
+ if (section || !total_ecc_bytes)
+ return -ERANGE;
+
+ oobregion->length = total_ecc_bytes;
+ oobregion->offset = mtd->oobsize - oobregion->length;
+
+ return 0;
+}
+
+static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ unsigned int total_ecc_bytes = nand->ecc.ctx.total;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = mtd->oobsize - total_ecc_bytes - 2;
+ oobregion->offset = 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
+ .ecc = nand_ooblayout_ecc_lp,
+ .free = nand_ooblayout_free_lp,
+};
+
+const struct mtd_ooblayout_ops *nand_get_large_page_ooblayout(void)
+{
+ return &nand_ooblayout_lp_ops;
+}
+EXPORT_SYMBOL_GPL(nand_get_large_page_ooblayout);
+
+/*
+ * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
+ * are placed at a fixed offset.
+ */
+static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ unsigned int total_ecc_bytes = nand->ecc.ctx.total;
+
+ if (section)
+ return -ERANGE;
+
+ switch (mtd->oobsize) {
+ case 64:
+ oobregion->offset = 40;
+ break;
+ case 128:
+ oobregion->offset = 80;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ oobregion->length = total_ecc_bytes;
+ if (oobregion->offset + oobregion->length > mtd->oobsize)
+ return -ERANGE;
+
+ return 0;
+}
+
+static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ unsigned int total_ecc_bytes = nand->ecc.ctx.total;
+ int ecc_offset = 0;
+
+ if (section < 0 || section > 1)
+ return -ERANGE;
+
+ switch (mtd->oobsize) {
+ case 64:
+ ecc_offset = 40;
+ break;
+ case 128:
+ ecc_offset = 80;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (section == 0) {
+ oobregion->offset = 2;
+ oobregion->length = ecc_offset - 2;
+ } else {
+ oobregion->offset = ecc_offset + total_ecc_bytes;
+ oobregion->length = mtd->oobsize - oobregion->offset;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
+ .ecc = nand_ooblayout_ecc_lp_hamming,
+ .free = nand_ooblayout_free_lp_hamming,
+};
+
+const struct mtd_ooblayout_ops *nand_get_large_page_hamming_ooblayout(void)
+{
+ return &nand_ooblayout_lp_hamming_ops;
+}
+EXPORT_SYMBOL_GPL(nand_get_large_page_hamming_ooblayout);
+
+static enum nand_ecc_engine_type
+of_get_nand_ecc_engine_type(struct device_node *np)
+{
+ struct device_node *eng_np;
+
+ if (of_property_read_bool(np, "nand-no-ecc-engine"))
+ return NAND_ECC_ENGINE_TYPE_NONE;
+
+ if (of_property_read_bool(np, "nand-use-soft-ecc-engine"))
+ return NAND_ECC_ENGINE_TYPE_SOFT;
+
+ eng_np = of_parse_phandle(np, "nand-ecc-engine", 0);
+ of_node_put(eng_np);
+
+ if (eng_np) {
+ if (eng_np == np)
+ return NAND_ECC_ENGINE_TYPE_ON_DIE;
+ else
+ return NAND_ECC_ENGINE_TYPE_ON_HOST;
+ }
+
+ return NAND_ECC_ENGINE_TYPE_INVALID;
+}
+
+static const char * const nand_ecc_placement[] = {
+ [NAND_ECC_PLACEMENT_OOB] = "oob",
+ [NAND_ECC_PLACEMENT_INTERLEAVED] = "interleaved",
+};
+
+static enum nand_ecc_placement of_get_nand_ecc_placement(struct device_node *np)
+{
+ enum nand_ecc_placement placement;
+ const char *pm;
+ int err;
+
+ err = of_property_read_string(np, "nand-ecc-placement", &pm);
+ if (!err) {
+ for (placement = NAND_ECC_PLACEMENT_OOB;
+ placement < ARRAY_SIZE(nand_ecc_placement); placement++) {
+ if (!strcasecmp(pm, nand_ecc_placement[placement]))
+ return placement;
+ }
+ }
+
+ return NAND_ECC_PLACEMENT_UNKNOWN;
+}
+
+static const char * const nand_ecc_algos[] = {
+ [NAND_ECC_ALGO_HAMMING] = "hamming",
+ [NAND_ECC_ALGO_BCH] = "bch",
+ [NAND_ECC_ALGO_RS] = "rs",
+};
+
+static enum nand_ecc_algo of_get_nand_ecc_algo(struct device_node *np)
+{
+ enum nand_ecc_algo ecc_algo;
+ const char *pm;
+ int err;
+
+ err = of_property_read_string(np, "nand-ecc-algo", &pm);
+ if (!err) {
+ for (ecc_algo = NAND_ECC_ALGO_HAMMING;
+ ecc_algo < ARRAY_SIZE(nand_ecc_algos);
+ ecc_algo++) {
+ if (!strcasecmp(pm, nand_ecc_algos[ecc_algo]))
+ return ecc_algo;
+ }
+ }
+
+ return NAND_ECC_ALGO_UNKNOWN;
+}
+
+static int of_get_nand_ecc_step_size(struct device_node *np)
+{
+ int ret;
+ u32 val;
+
+ ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
+ return ret ? ret : val;
+}
+
+static int of_get_nand_ecc_strength(struct device_node *np)
+{
+ int ret;
+ u32 val;
+
+ ret = of_property_read_u32(np, "nand-ecc-strength", &val);
+ return ret ? ret : val;
+}
+
+void of_get_nand_ecc_user_config(struct nand_device *nand)
+{
+ struct device_node *dn = nanddev_get_of_node(nand);
+ int strength, size;
+
+ nand->ecc.user_conf.engine_type = of_get_nand_ecc_engine_type(dn);
+ nand->ecc.user_conf.algo = of_get_nand_ecc_algo(dn);
+ nand->ecc.user_conf.placement = of_get_nand_ecc_placement(dn);
+
+ strength = of_get_nand_ecc_strength(dn);
+ if (strength >= 0)
+ nand->ecc.user_conf.strength = strength;
+
+ size = of_get_nand_ecc_step_size(dn);
+ if (size >= 0)
+ nand->ecc.user_conf.step_size = size;
+
+ if (of_property_read_bool(dn, "nand-ecc-maximize"))
+ nand->ecc.user_conf.flags |= NAND_ECC_MAXIMIZE_STRENGTH;
+}
+EXPORT_SYMBOL(of_get_nand_ecc_user_config);
+
+/**
+ * nand_ecc_is_strong_enough - Check if the chip configuration meets the
+ * datasheet requirements.
+ *
+ * @nand: Device to check
+ *
+ * If our configuration corrects A bits per B bytes and the minimum
+ * required correction level is X bits per Y bytes, then we must ensure
+ * both of the following are true:
+ *
+ * (1) A / B >= X / Y
+ * (2) A >= X
+ *
+ * Requirement (1) ensures we can correct for the required bitflip density.
+ * Requirement (2) ensures we can correct even when all bitflips are clumped
+ * in the same sector.
+ */
+bool nand_ecc_is_strong_enough(struct nand_device *nand)
+{
+ const struct nand_ecc_props *reqs = nanddev_get_ecc_requirements(nand);
+ const struct nand_ecc_props *conf = nanddev_get_ecc_conf(nand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ int corr, ds_corr;
+
+ if (conf->step_size == 0 || reqs->step_size == 0)
+ /* Not enough information */
+ return true;
+
+ /*
+ * We get the number of corrected bits per page to compare
+ * the correction density.
+ */
+ corr = (mtd->writesize * conf->strength) / conf->step_size;
+ ds_corr = (mtd->writesize * reqs->strength) / reqs->step_size;
+
+ return corr >= ds_corr && conf->strength >= reqs->strength;
+}
+EXPORT_SYMBOL(nand_ecc_is_strong_enough);
+
+/* ECC engine driver internal helpers */
+int nand_ecc_init_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx,
+ struct nand_device *nand)
+{
+ unsigned int total_buffer_size;
+
+ ctx->nand = nand;
+
+ /* Let the user decide the exact length of each buffer */
+ if (!ctx->page_buffer_size)
+ ctx->page_buffer_size = nanddev_page_size(nand);
+ if (!ctx->oob_buffer_size)
+ ctx->oob_buffer_size = nanddev_per_page_oobsize(nand);
+
+ total_buffer_size = ctx->page_buffer_size + ctx->oob_buffer_size;
+
+ ctx->spare_databuf = kzalloc(total_buffer_size, GFP_KERNEL);
+ if (!ctx->spare_databuf)
+ return -ENOMEM;
+
+ ctx->spare_oobbuf = ctx->spare_databuf + ctx->page_buffer_size;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_ecc_init_req_tweaking);
+
+void nand_ecc_cleanup_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx)
+{
+ kfree(ctx->spare_databuf);
+}
+EXPORT_SYMBOL_GPL(nand_ecc_cleanup_req_tweaking);
+
+/*
+ * Ensure data and OOB area is fully read/written otherwise the correction might
+ * not work as expected.
+ */
+void nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx *ctx,
+ struct nand_page_io_req *req)
+{
+ struct nand_device *nand = ctx->nand;
+ struct nand_page_io_req *orig, *tweak;
+
+ /* Save the original request */
+ ctx->orig_req = *req;
+ ctx->bounce_data = false;
+ ctx->bounce_oob = false;
+ orig = &ctx->orig_req;
+ tweak = req;
+
+ /* Ensure the request covers the entire page */
+ if (orig->datalen < nanddev_page_size(nand)) {
+ ctx->bounce_data = true;
+ tweak->dataoffs = 0;
+ tweak->datalen = nanddev_page_size(nand);
+ tweak->databuf.in = ctx->spare_databuf;
+ memset(tweak->databuf.in, 0xFF, ctx->page_buffer_size);
+ }
+
+ if (orig->ooblen < nanddev_per_page_oobsize(nand)) {
+ ctx->bounce_oob = true;
+ tweak->ooboffs = 0;
+ tweak->ooblen = nanddev_per_page_oobsize(nand);
+ tweak->oobbuf.in = ctx->spare_oobbuf;
+ memset(tweak->oobbuf.in, 0xFF, ctx->oob_buffer_size);
+ }
+
+ /* Copy the data that must be writen in the bounce buffers, if needed */
+ if (orig->type == NAND_PAGE_WRITE) {
+ if (ctx->bounce_data)
+ memcpy((void *)tweak->databuf.out + orig->dataoffs,
+ orig->databuf.out, orig->datalen);
+
+ if (ctx->bounce_oob)
+ memcpy((void *)tweak->oobbuf.out + orig->ooboffs,
+ orig->oobbuf.out, orig->ooblen);
+ }
+}
+EXPORT_SYMBOL_GPL(nand_ecc_tweak_req);
+
+void nand_ecc_restore_req(struct nand_ecc_req_tweak_ctx *ctx,
+ struct nand_page_io_req *req)
+{
+ struct nand_page_io_req *orig, *tweak;
+
+ orig = &ctx->orig_req;
+ tweak = req;
+
+ /* Restore the data read from the bounce buffers, if needed */
+ if (orig->type == NAND_PAGE_READ) {
+ if (ctx->bounce_data)
+ memcpy(orig->databuf.in,
+ tweak->databuf.in + orig->dataoffs,
+ orig->datalen);
+
+ if (ctx->bounce_oob)
+ memcpy(orig->oobbuf.in,
+ tweak->oobbuf.in + orig->ooboffs,
+ orig->ooblen);
+ }
+
+ /* Ensure the original request is restored */
+ *req = *orig;
+}
+EXPORT_SYMBOL_GPL(nand_ecc_restore_req);
+
+struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand)
+{
+ unsigned int algo = nand->ecc.user_conf.algo;
+
+ if (algo == NAND_ECC_ALGO_UNKNOWN)
+ algo = nand->ecc.defaults.algo;
+
+ switch (algo) {
+ case NAND_ECC_ALGO_HAMMING:
+ return nand_ecc_sw_hamming_get_engine();
+ case NAND_ECC_ALGO_BCH:
+ return nand_ecc_sw_bch_get_engine();
+ default:
+ break;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(nand_ecc_get_sw_engine);
+
+struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand)
+{
+ return nand->ecc.ondie_engine;
+}
+EXPORT_SYMBOL(nand_ecc_get_on_die_hw_engine);
+
+int nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine)
+{
+ struct nand_ecc_engine *item;
+
+ if (!engine)
+ return -EINVAL;
+
+ /* Prevent multiple registrations of one engine */
+ list_for_each_entry(item, &on_host_hw_engines, node)
+ if (item == engine)
+ return 0;
+
+ mutex_lock(&on_host_hw_engines_mutex);
+ list_add_tail(&engine->node, &on_host_hw_engines);
+ mutex_unlock(&on_host_hw_engines_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(nand_ecc_register_on_host_hw_engine);
+
+int nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine)
+{
+ if (!engine)
+ return -EINVAL;
+
+ mutex_lock(&on_host_hw_engines_mutex);
+ list_del(&engine->node);
+ mutex_unlock(&on_host_hw_engines_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(nand_ecc_unregister_on_host_hw_engine);
+
+static struct nand_ecc_engine *nand_ecc_match_on_host_hw_engine(struct device *dev)
+{
+ struct nand_ecc_engine *item;
+
+ list_for_each_entry(item, &on_host_hw_engines, node)
+ if (item->dev == dev)
+ return item;
+
+ return NULL;
+}
+
+struct nand_ecc_engine *nand_ecc_get_on_host_hw_engine(struct nand_device *nand)
+{
+ struct nand_ecc_engine *engine = NULL;
+ struct device *dev = &nand->mtd.dev;
+ struct platform_device *pdev;
+ struct device_node *np;
+
+ if (list_empty(&on_host_hw_engines))
+ return NULL;
+
+ /* Check for an explicit nand-ecc-engine property */
+ np = of_parse_phandle(dev->of_node, "nand-ecc-engine", 0);
+ if (np) {
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ engine = nand_ecc_match_on_host_hw_engine(&pdev->dev);
+ platform_device_put(pdev);
+ of_node_put(np);
+
+ if (!engine)
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ if (engine)
+ get_device(engine->dev);
+
+ return engine;
+}
+EXPORT_SYMBOL(nand_ecc_get_on_host_hw_engine);
+
+void nand_ecc_put_on_host_hw_engine(struct nand_device *nand)
+{
+ put_device(nand->ecc.engine->dev);
+}
+EXPORT_SYMBOL(nand_ecc_put_on_host_hw_engine);
+
+/*
+ * In the case of a pipelined engine, the device registering the ECC
+ * engine is not necessarily the ECC engine itself but may be a host controller.
+ * It is then useful to provide a helper to retrieve the right device object
+ * which actually represents the ECC engine.
+ */
+struct device *nand_ecc_get_engine_dev(struct device *host)
+{
+ struct platform_device *ecc_pdev;
+ struct device_node *np;
+
+ /*
+ * If the device node contains this property, it means we need to follow
+ * it in order to get the right ECC engine device we are looking for.
+ */
+ np = of_parse_phandle(host->of_node, "nand-ecc-engine", 0);
+ if (!np)
+ return host;
+
+ ecc_pdev = of_find_device_by_node(np);
+ if (!ecc_pdev) {
+ of_node_put(np);
+ return NULL;
+ }
+
+ platform_device_put(ecc_pdev);
+ of_node_put(np);
+
+ return &ecc_pdev->dev;
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
+MODULE_DESCRIPTION("Generic ECC engine");
diff --git a/drivers/mtd/nand/onenand/Kconfig b/drivers/mtd/nand/onenand/Kconfig
new file mode 100644
index 0000000000..7d57836bf3
--- /dev/null
+++ b/drivers/mtd/nand/onenand/Kconfig
@@ -0,0 +1,73 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig MTD_ONENAND
+ tristate "OneNAND Device Support"
+ depends on HAS_IOMEM
+ help
+ This enables support for accessing all type of OneNAND flash
+ devices.
+
+if MTD_ONENAND
+
+config MTD_ONENAND_VERIFY_WRITE
+ bool "Verify OneNAND page writes"
+ help
+ This adds an extra check when data is written to the flash. The
+ OneNAND flash device internally checks only bits transitioning
+ from 1 to 0. There is a rare possibility that even though the
+ device thinks the write was successful, a bit could have been
+ flipped accidentally due to device wear or something else.
+
+config MTD_ONENAND_GENERIC
+ tristate "OneNAND Flash device via platform device driver"
+ help
+ Support for OneNAND flash via platform device driver.
+
+config MTD_ONENAND_OMAP2
+ tristate "OneNAND on OMAP2/OMAP3 support"
+ depends on ARCH_OMAP2 || ARCH_OMAP3 || (COMPILE_TEST && ARM)
+ depends on OF
+ depends on OMAP_GPMC
+ help
+ Support for a OneNAND flash device connected to an OMAP2/OMAP3 SoC
+ via the GPMC memory controller.
+ Enable dmaengine and gpiolib for better performance.
+
+config MTD_ONENAND_SAMSUNG
+ tristate "OneNAND on Samsung SOC controller support"
+ depends on ARCH_S3C64XX || ARCH_S5PV210 || COMPILE_TEST
+ help
+ Support for a OneNAND flash device connected to Samsung S3C64XX
+ (using command mapping method) and S5PC110/S5PC210 (using generic
+ OneNAND method) SoCs.
+ Choose Y here only if you build for such Samsung SoC.
+
+config MTD_ONENAND_OTP
+ bool "OneNAND OTP Support"
+ help
+ One Block of the NAND Flash Array memory is reserved as
+ a One-Time Programmable Block memory area.
+ Also, 1st Block of NAND Flash Array can be used as OTP.
+
+ The OTP block can be read, programmed and locked using the same
+ operations as any other NAND Flash Array memory block.
+ OTP block cannot be erased.
+
+ OTP block is fully-guaranteed to be a valid block.
+
+config MTD_ONENAND_2X_PROGRAM
+ bool "OneNAND 2X program support"
+ help
+ The 2X Program is an extension of Program Operation.
+ Since the device is equipped with two DataRAMs, and two-plane NAND
+ Flash memory array, these two component enables simultaneous program
+ of 4KiB. Plane1 has only even blocks such as block0, block2, block4
+ while Plane2 has only odd blocks such as block1, block3, block5.
+ So MTD regards it as 4KiB page size and 256KiB block size
+
+ Now the following chips support it. (KFXXX16Q2M)
+ Demux: KFG2G16Q2M, KFH4G16Q2M, KFW8G16Q2M,
+ Mux: KFM2G16Q2M, KFN4G16Q2M,
+
+ And more recent chips
+
+endif # MTD_ONENAND
diff --git a/drivers/mtd/nand/onenand/Makefile b/drivers/mtd/nand/onenand/Makefile
new file mode 100644
index 0000000000..a0761c7e02
--- /dev/null
+++ b/drivers/mtd/nand/onenand/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the OneNAND MTD
+#
+
+# Core functionality.
+obj-$(CONFIG_MTD_ONENAND) += onenand.o
+
+# Board specific.
+obj-$(CONFIG_MTD_ONENAND_GENERIC) += generic.o
+obj-$(CONFIG_MTD_ONENAND_OMAP2) += onenand_omap2.o
+obj-$(CONFIG_MTD_ONENAND_SAMSUNG) += onenand_samsung.o
+
+onenand-objs = onenand_base.o onenand_bbt.o
diff --git a/drivers/mtd/nand/onenand/generic.c b/drivers/mtd/nand/onenand/generic.c
new file mode 100644
index 0000000000..4e7de48f07
--- /dev/null
+++ b/drivers/mtd/nand/onenand/generic.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2005 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * Overview:
+ * This is a device driver for the OneNAND flash for generic boards.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/onenand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/io.h>
+
+/*
+ * Note: Driver name and platform data format have been updated!
+ *
+ * This version of the driver is named "onenand-flash" and takes struct
+ * onenand_platform_data as platform data. The old ARM-specific version
+ * with the name "onenand" used to take struct flash_platform_data.
+ */
+#define DRIVER_NAME "onenand-flash"
+
+struct onenand_info {
+ struct mtd_info mtd;
+ struct onenand_chip onenand;
+};
+
+static int generic_onenand_probe(struct platform_device *pdev)
+{
+ struct onenand_info *info;
+ struct onenand_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct resource *res = pdev->resource;
+ unsigned long size = resource_size(res);
+ int err;
+
+ info = kzalloc(sizeof(struct onenand_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ if (!request_mem_region(res->start, size, dev_name(&pdev->dev))) {
+ err = -EBUSY;
+ goto out_free_info;
+ }
+
+ info->onenand.base = ioremap(res->start, size);
+ if (!info->onenand.base) {
+ err = -ENOMEM;
+ goto out_release_mem_region;
+ }
+
+ info->onenand.mmcontrol = pdata ? pdata->mmcontrol : NULL;
+
+ err = platform_get_irq(pdev, 0);
+ if (err < 0)
+ goto out_iounmap;
+
+ info->onenand.irq = err;
+
+ info->mtd.dev.parent = &pdev->dev;
+ info->mtd.priv = &info->onenand;
+
+ if (onenand_scan(&info->mtd, 1)) {
+ err = -ENXIO;
+ goto out_iounmap;
+ }
+
+ err = mtd_device_register(&info->mtd, pdata ? pdata->parts : NULL,
+ pdata ? pdata->nr_parts : 0);
+
+ platform_set_drvdata(pdev, info);
+
+ return 0;
+
+out_iounmap:
+ iounmap(info->onenand.base);
+out_release_mem_region:
+ release_mem_region(res->start, size);
+out_free_info:
+ kfree(info);
+
+ return err;
+}
+
+static void generic_onenand_remove(struct platform_device *pdev)
+{
+ struct onenand_info *info = platform_get_drvdata(pdev);
+ struct resource *res = pdev->resource;
+ unsigned long size = resource_size(res);
+
+ if (info) {
+ onenand_release(&info->mtd);
+ release_mem_region(res->start, size);
+ iounmap(info->onenand.base);
+ kfree(info);
+ }
+}
+
+static struct platform_driver generic_onenand_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = generic_onenand_probe,
+ .remove_new = generic_onenand_remove,
+};
+
+module_platform_driver(generic_onenand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
+MODULE_DESCRIPTION("Glue layer for OneNAND flash on generic boards");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
new file mode 100644
index 0000000000..f66385faf6
--- /dev/null
+++ b/drivers/mtd/nand/onenand/onenand_base.c
@@ -0,0 +1,4027 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2005-2009 Samsung Electronics
+ * Copyright © 2007 Nokia Corporation
+ *
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * Credits:
+ * Adrian Hunter <ext-adrian.hunter@nokia.com>:
+ * auto-placement support, read-while load support, various fixes
+ *
+ * Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com>
+ * Flex-OneNAND support
+ * Amul Kumar Saha <amul.saha at samsung.com>
+ * OTP support
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/onenand.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+
+/*
+ * Multiblock erase if number of blocks to erase is 2 or more.
+ * Maximum number of blocks for simultaneous erase is 64.
+ */
+#define MB_ERASE_MIN_BLK_COUNT 2
+#define MB_ERASE_MAX_BLK_COUNT 64
+
+/* Default Flex-OneNAND boundary and lock respectively */
+static int flex_bdry[MAX_DIES * 2] = { -1, 0, -1, 0 };
+
+module_param_array(flex_bdry, int, NULL, 0400);
+MODULE_PARM_DESC(flex_bdry, "SLC Boundary information for Flex-OneNAND"
+ "Syntax:flex_bdry=DIE_BDRY,LOCK,..."
+ "DIE_BDRY: SLC boundary of the die"
+ "LOCK: Locking information for SLC boundary"
+ " : 0->Set boundary in unlocked status"
+ " : 1->Set boundary in locked status");
+
+/* Default OneNAND/Flex-OneNAND OTP options*/
+static int otp;
+
+module_param(otp, int, 0400);
+MODULE_PARM_DESC(otp, "Corresponding behaviour of OneNAND in OTP"
+ "Syntax : otp=LOCK_TYPE"
+ "LOCK_TYPE : Keys issued, for specific OTP Lock type"
+ " : 0 -> Default (No Blocks Locked)"
+ " : 1 -> OTP Block lock"
+ " : 2 -> 1st Block lock"
+ " : 3 -> BOTH OTP Block and 1st Block lock");
+
+/*
+ * flexonenand_oob_128 - oob info for Flex-Onenand with 4KB page
+ * For now, we expose only 64 out of 80 ecc bytes
+ */
+static int flexonenand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 7)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 6;
+ oobregion->length = 10;
+
+ return 0;
+}
+
+static int flexonenand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 7)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 2;
+ oobregion->length = 4;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops flexonenand_ooblayout_ops = {
+ .ecc = flexonenand_ooblayout_ecc,
+ .free = flexonenand_ooblayout_free,
+};
+
+/*
+ * onenand_oob_128 - oob info for OneNAND with 4KB page
+ *
+ * Based on specification:
+ * 4Gb M-die OneNAND Flash (KFM4G16Q4M, KFN8G16Q4M). Rev. 1.3, Apr. 2010
+ *
+ */
+static int onenand_ooblayout_128_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 7)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 7;
+ oobregion->length = 9;
+
+ return 0;
+}
+
+static int onenand_ooblayout_128_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section >= 8)
+ return -ERANGE;
+
+ /*
+ * free bytes are using the spare area fields marked as
+ * "Managed by internal ECC logic for Logical Sector Number area"
+ */
+ oobregion->offset = (section * 16) + 2;
+ oobregion->length = 3;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops onenand_oob_128_ooblayout_ops = {
+ .ecc = onenand_ooblayout_128_ecc,
+ .free = onenand_ooblayout_128_free,
+};
+
+/*
+ * onenand_oob_32_64 - oob info for large (2KB) page
+ */
+static int onenand_ooblayout_32_64_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 8;
+ oobregion->length = 5;
+
+ return 0;
+}
+
+static int onenand_ooblayout_32_64_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ int sections = (mtd->oobsize / 32) * 2;
+
+ if (section >= sections)
+ return -ERANGE;
+
+ if (section & 1) {
+ oobregion->offset = ((section - 1) * 16) + 14;
+ oobregion->length = 2;
+ } else {
+ oobregion->offset = (section * 16) + 2;
+ oobregion->length = 3;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops onenand_oob_32_64_ooblayout_ops = {
+ .ecc = onenand_ooblayout_32_64_ecc,
+ .free = onenand_ooblayout_32_64_free,
+};
+
+static const unsigned char ffchars[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 16 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 32 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 48 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 64 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 80 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 96 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 112 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 128 */
+};
+
+/**
+ * onenand_readw - [OneNAND Interface] Read OneNAND register
+ * @addr: address to read
+ *
+ * Read OneNAND register
+ */
+static unsigned short onenand_readw(void __iomem *addr)
+{
+ return readw(addr);
+}
+
+/**
+ * onenand_writew - [OneNAND Interface] Write OneNAND register with value
+ * @value: value to write
+ * @addr: address to write
+ *
+ * Write OneNAND register with value
+ */
+static void onenand_writew(unsigned short value, void __iomem *addr)
+{
+ writew(value, addr);
+}
+
+/**
+ * onenand_block_address - [DEFAULT] Get block address
+ * @this: onenand chip data structure
+ * @block: the block
+ * @return translated block address if DDP, otherwise same
+ *
+ * Setup Start Address 1 Register (F100h)
+ */
+static int onenand_block_address(struct onenand_chip *this, int block)
+{
+ /* Device Flash Core select, NAND Flash Block Address */
+ if (block & this->density_mask)
+ return ONENAND_DDP_CHIP1 | (block ^ this->density_mask);
+
+ return block;
+}
+
+/**
+ * onenand_bufferram_address - [DEFAULT] Get bufferram address
+ * @this: onenand chip data structure
+ * @block: the block
+ * @return set DBS value if DDP, otherwise 0
+ *
+ * Setup Start Address 2 Register (F101h) for DDP
+ */
+static int onenand_bufferram_address(struct onenand_chip *this, int block)
+{
+ /* Device BufferRAM Select */
+ if (block & this->density_mask)
+ return ONENAND_DDP_CHIP1;
+
+ return ONENAND_DDP_CHIP0;
+}
+
+/**
+ * onenand_page_address - [DEFAULT] Get page address
+ * @page: the page address
+ * @sector: the sector address
+ * @return combined page and sector address
+ *
+ * Setup Start Address 8 Register (F107h)
+ */
+static int onenand_page_address(int page, int sector)
+{
+ /* Flash Page Address, Flash Sector Address */
+ int fpa, fsa;
+
+ fpa = page & ONENAND_FPA_MASK;
+ fsa = sector & ONENAND_FSA_MASK;
+
+ return ((fpa << ONENAND_FPA_SHIFT) | fsa);
+}
+
+/**
+ * onenand_buffer_address - [DEFAULT] Get buffer address
+ * @dataram1: DataRAM index
+ * @sectors: the sector address
+ * @count: the number of sectors
+ * Return: the start buffer value
+ *
+ * Setup Start Buffer Register (F200h)
+ */
+static int onenand_buffer_address(int dataram1, int sectors, int count)
+{
+ int bsa, bsc;
+
+ /* BufferRAM Sector Address */
+ bsa = sectors & ONENAND_BSA_MASK;
+
+ if (dataram1)
+ bsa |= ONENAND_BSA_DATARAM1; /* DataRAM1 */
+ else
+ bsa |= ONENAND_BSA_DATARAM0; /* DataRAM0 */
+
+ /* BufferRAM Sector Count */
+ bsc = count & ONENAND_BSC_MASK;
+
+ return ((bsa << ONENAND_BSA_SHIFT) | bsc);
+}
+
+/**
+ * flexonenand_block- For given address return block number
+ * @this: - OneNAND device structure
+ * @addr: - Address for which block number is needed
+ */
+static unsigned flexonenand_block(struct onenand_chip *this, loff_t addr)
+{
+ unsigned boundary, blk, die = 0;
+
+ if (ONENAND_IS_DDP(this) && addr >= this->diesize[0]) {
+ die = 1;
+ addr -= this->diesize[0];
+ }
+
+ boundary = this->boundary[die];
+
+ blk = addr >> (this->erase_shift - 1);
+ if (blk > boundary)
+ blk = (blk + boundary + 1) >> 1;
+
+ blk += die ? this->density_mask : 0;
+ return blk;
+}
+
+inline unsigned onenand_block(struct onenand_chip *this, loff_t addr)
+{
+ if (!FLEXONENAND(this))
+ return addr >> this->erase_shift;
+ return flexonenand_block(this, addr);
+}
+
+/**
+ * flexonenand_addr - Return address of the block
+ * @this: OneNAND device structure
+ * @block: Block number on Flex-OneNAND
+ *
+ * Return address of the block
+ */
+static loff_t flexonenand_addr(struct onenand_chip *this, int block)
+{
+ loff_t ofs = 0;
+ int die = 0, boundary;
+
+ if (ONENAND_IS_DDP(this) && block >= this->density_mask) {
+ block -= this->density_mask;
+ die = 1;
+ ofs = this->diesize[0];
+ }
+
+ boundary = this->boundary[die];
+ ofs += (loff_t)block << (this->erase_shift - 1);
+ if (block > (boundary + 1))
+ ofs += (loff_t)(block - boundary - 1) << (this->erase_shift - 1);
+ return ofs;
+}
+
+loff_t onenand_addr(struct onenand_chip *this, int block)
+{
+ if (!FLEXONENAND(this))
+ return (loff_t)block << this->erase_shift;
+ return flexonenand_addr(this, block);
+}
+EXPORT_SYMBOL(onenand_addr);
+
+/**
+ * onenand_get_density - [DEFAULT] Get OneNAND density
+ * @dev_id: OneNAND device ID
+ *
+ * Get OneNAND density from device ID
+ */
+static inline int onenand_get_density(int dev_id)
+{
+ int density = dev_id >> ONENAND_DEVICE_DENSITY_SHIFT;
+ return (density & ONENAND_DEVICE_DENSITY_MASK);
+}
+
+/**
+ * flexonenand_region - [Flex-OneNAND] Return erase region of addr
+ * @mtd: MTD device structure
+ * @addr: address whose erase region needs to be identified
+ */
+int flexonenand_region(struct mtd_info *mtd, loff_t addr)
+{
+ int i;
+
+ for (i = 0; i < mtd->numeraseregions; i++)
+ if (addr < mtd->eraseregions[i].offset)
+ break;
+ return i - 1;
+}
+EXPORT_SYMBOL(flexonenand_region);
+
+/**
+ * onenand_command - [DEFAULT] Send command to OneNAND device
+ * @mtd: MTD device structure
+ * @cmd: the command to be sent
+ * @addr: offset to read from or write to
+ * @len: number of bytes to read or write
+ *
+ * Send command to OneNAND device. This function is used for middle/large page
+ * devices (1KB/2KB Bytes per page)
+ */
+static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t len)
+{
+ struct onenand_chip *this = mtd->priv;
+ int value, block, page;
+
+ /* Address translation */
+ switch (cmd) {
+ case ONENAND_CMD_UNLOCK:
+ case ONENAND_CMD_LOCK:
+ case ONENAND_CMD_LOCK_TIGHT:
+ case ONENAND_CMD_UNLOCK_ALL:
+ block = -1;
+ page = -1;
+ break;
+
+ case FLEXONENAND_CMD_PI_ACCESS:
+ /* addr contains die index */
+ block = addr * this->density_mask;
+ page = -1;
+ break;
+
+ case ONENAND_CMD_ERASE:
+ case ONENAND_CMD_MULTIBLOCK_ERASE:
+ case ONENAND_CMD_ERASE_VERIFY:
+ case ONENAND_CMD_BUFFERRAM:
+ case ONENAND_CMD_OTP_ACCESS:
+ block = onenand_block(this, addr);
+ page = -1;
+ break;
+
+ case FLEXONENAND_CMD_READ_PI:
+ cmd = ONENAND_CMD_READ;
+ block = addr * this->density_mask;
+ page = 0;
+ break;
+
+ default:
+ block = onenand_block(this, addr);
+ if (FLEXONENAND(this))
+ page = (int) (addr - onenand_addr(this, block))>>\
+ this->page_shift;
+ else
+ page = (int) (addr >> this->page_shift);
+ if (ONENAND_IS_2PLANE(this)) {
+ /* Make the even block number */
+ block &= ~1;
+ /* Is it the odd plane? */
+ if (addr & this->writesize)
+ block++;
+ page >>= 1;
+ }
+ page &= this->page_mask;
+ break;
+ }
+
+ /* NOTE: The setting order of the registers is very important! */
+ if (cmd == ONENAND_CMD_BUFFERRAM) {
+ /* Select DataRAM for DDP */
+ value = onenand_bufferram_address(this, block);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
+
+ if (ONENAND_IS_2PLANE(this) || ONENAND_IS_4KB_PAGE(this))
+ /* It is always BufferRAM0 */
+ ONENAND_SET_BUFFERRAM0(this);
+ else
+ /* Switch to the next data buffer */
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+
+ return 0;
+ }
+
+ if (block != -1) {
+ /* Write 'DFS, FBA' of Flash */
+ value = onenand_block_address(this, block);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1);
+
+ /* Select DataRAM for DDP */
+ value = onenand_bufferram_address(this, block);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
+ }
+
+ if (page != -1) {
+ /* Now we use page size operation */
+ int sectors = 0, count = 0;
+ int dataram;
+
+ switch (cmd) {
+ case FLEXONENAND_CMD_RECOVER_LSB:
+ case ONENAND_CMD_READ:
+ case ONENAND_CMD_READOOB:
+ if (ONENAND_IS_4KB_PAGE(this))
+ /* It is always BufferRAM0 */
+ dataram = ONENAND_SET_BUFFERRAM0(this);
+ else
+ dataram = ONENAND_SET_NEXT_BUFFERRAM(this);
+ break;
+
+ default:
+ if (ONENAND_IS_2PLANE(this) && cmd == ONENAND_CMD_PROG)
+ cmd = ONENAND_CMD_2X_PROG;
+ dataram = ONENAND_CURRENT_BUFFERRAM(this);
+ break;
+ }
+
+ /* Write 'FPA, FSA' of Flash */
+ value = onenand_page_address(page, sectors);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS8);
+
+ /* Write 'BSA, BSC' of DataRAM */
+ value = onenand_buffer_address(dataram, sectors, count);
+ this->write_word(value, this->base + ONENAND_REG_START_BUFFER);
+ }
+
+ /* Interrupt clear */
+ this->write_word(ONENAND_INT_CLEAR, this->base + ONENAND_REG_INTERRUPT);
+
+ /* Write command */
+ this->write_word(cmd, this->base + ONENAND_REG_COMMAND);
+
+ return 0;
+}
+
+/**
+ * onenand_read_ecc - return ecc status
+ * @this: onenand chip structure
+ */
+static inline int onenand_read_ecc(struct onenand_chip *this)
+{
+ int ecc, i, result = 0;
+
+ if (!FLEXONENAND(this) && !ONENAND_IS_4KB_PAGE(this))
+ return this->read_word(this->base + ONENAND_REG_ECC_STATUS);
+
+ for (i = 0; i < 4; i++) {
+ ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS + i*2);
+ if (likely(!ecc))
+ continue;
+ if (ecc & FLEXONENAND_UNCORRECTABLE_ERROR)
+ return ONENAND_ECC_2BIT_ALL;
+ else
+ result = ONENAND_ECC_1BIT_ALL;
+ }
+
+ return result;
+}
+
+/**
+ * onenand_wait - [DEFAULT] wait until the command is done
+ * @mtd: MTD device structure
+ * @state: state to select the max. timeout value
+ *
+ * Wait for command done. This applies to all OneNAND command
+ * Read can take up to 30us, erase up to 2ms and program up to 350us
+ * according to general OneNAND specs
+ */
+static int onenand_wait(struct mtd_info *mtd, int state)
+{
+ struct onenand_chip * this = mtd->priv;
+ unsigned long timeout;
+ unsigned int flags = ONENAND_INT_MASTER;
+ unsigned int interrupt = 0;
+ unsigned int ctrl;
+
+ /* The 20 msec is enough */
+ timeout = jiffies + msecs_to_jiffies(20);
+ while (time_before(jiffies, timeout)) {
+ interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
+
+ if (interrupt & flags)
+ break;
+
+ if (state != FL_READING && state != FL_PREPARING_ERASE)
+ cond_resched();
+ }
+ /* To get correct interrupt status in timeout case */
+ interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
+
+ ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
+
+ /*
+ * In the Spec. it checks the controller status first
+ * However if you get the correct information in case of
+ * power off recovery (POR) test, it should read ECC status first
+ */
+ if (interrupt & ONENAND_INT_READ) {
+ int ecc = onenand_read_ecc(this);
+ if (ecc) {
+ if (ecc & ONENAND_ECC_2BIT_ALL) {
+ printk(KERN_ERR "%s: ECC error = 0x%04x\n",
+ __func__, ecc);
+ mtd->ecc_stats.failed++;
+ return -EBADMSG;
+ } else if (ecc & ONENAND_ECC_1BIT_ALL) {
+ printk(KERN_DEBUG "%s: correctable ECC error = 0x%04x\n",
+ __func__, ecc);
+ mtd->ecc_stats.corrected++;
+ }
+ }
+ } else if (state == FL_READING) {
+ printk(KERN_ERR "%s: read timeout! ctrl=0x%04x intr=0x%04x\n",
+ __func__, ctrl, interrupt);
+ return -EIO;
+ }
+
+ if (state == FL_PREPARING_ERASE && !(interrupt & ONENAND_INT_ERASE)) {
+ printk(KERN_ERR "%s: mb erase timeout! ctrl=0x%04x intr=0x%04x\n",
+ __func__, ctrl, interrupt);
+ return -EIO;
+ }
+
+ if (!(interrupt & ONENAND_INT_MASTER)) {
+ printk(KERN_ERR "%s: timeout! ctrl=0x%04x intr=0x%04x\n",
+ __func__, ctrl, interrupt);
+ return -EIO;
+ }
+
+ /* If there's controller error, it's a real error */
+ if (ctrl & ONENAND_CTRL_ERROR) {
+ printk(KERN_ERR "%s: controller error = 0x%04x\n",
+ __func__, ctrl);
+ if (ctrl & ONENAND_CTRL_LOCK)
+ printk(KERN_ERR "%s: it's locked error.\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * onenand_interrupt - [DEFAULT] onenand interrupt handler
+ * @irq: onenand interrupt number
+ * @dev_id: interrupt data
+ *
+ * complete the work
+ */
+static irqreturn_t onenand_interrupt(int irq, void *data)
+{
+ struct onenand_chip *this = data;
+
+ /* To handle shared interrupt */
+ if (!this->complete.done)
+ complete(&this->complete);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * onenand_interrupt_wait - [DEFAULT] wait until the command is done
+ * @mtd: MTD device structure
+ * @state: state to select the max. timeout value
+ *
+ * Wait for command done.
+ */
+static int onenand_interrupt_wait(struct mtd_info *mtd, int state)
+{
+ struct onenand_chip *this = mtd->priv;
+
+ wait_for_completion(&this->complete);
+
+ return onenand_wait(mtd, state);
+}
+
+/*
+ * onenand_try_interrupt_wait - [DEFAULT] try interrupt wait
+ * @mtd: MTD device structure
+ * @state: state to select the max. timeout value
+ *
+ * Try interrupt based wait (It is used one-time)
+ */
+static int onenand_try_interrupt_wait(struct mtd_info *mtd, int state)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned long remain, timeout;
+
+ /* We use interrupt wait first */
+ this->wait = onenand_interrupt_wait;
+
+ timeout = msecs_to_jiffies(100);
+ remain = wait_for_completion_timeout(&this->complete, timeout);
+ if (!remain) {
+ printk(KERN_INFO "OneNAND: There's no interrupt. "
+ "We use the normal wait\n");
+
+ /* Release the irq */
+ free_irq(this->irq, this);
+
+ this->wait = onenand_wait;
+ }
+
+ return onenand_wait(mtd, state);
+}
+
+/*
+ * onenand_setup_wait - [OneNAND Interface] setup onenand wait method
+ * @mtd: MTD device structure
+ *
+ * There's two method to wait onenand work
+ * 1. polling - read interrupt status register
+ * 2. interrupt - use the kernel interrupt method
+ */
+static void onenand_setup_wait(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int syscfg;
+
+ init_completion(&this->complete);
+
+ if (this->irq <= 0) {
+ this->wait = onenand_wait;
+ return;
+ }
+
+ if (request_irq(this->irq, &onenand_interrupt,
+ IRQF_SHARED, "onenand", this)) {
+ /* If we can't get irq, use the normal wait */
+ this->wait = onenand_wait;
+ return;
+ }
+
+ /* Enable interrupt */
+ syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
+ syscfg |= ONENAND_SYS_CFG1_IOBE;
+ this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
+
+ this->wait = onenand_try_interrupt_wait;
+}
+
+/**
+ * onenand_bufferram_offset - [DEFAULT] BufferRAM offset
+ * @mtd: MTD data structure
+ * @area: BufferRAM area
+ * @return offset given area
+ *
+ * Return BufferRAM offset given area
+ */
+static inline int onenand_bufferram_offset(struct mtd_info *mtd, int area)
+{
+ struct onenand_chip *this = mtd->priv;
+
+ if (ONENAND_CURRENT_BUFFERRAM(this)) {
+ /* Note: the 'this->writesize' is a real page size */
+ if (area == ONENAND_DATARAM)
+ return this->writesize;
+ if (area == ONENAND_SPARERAM)
+ return mtd->oobsize;
+ }
+
+ return 0;
+}
+
+/**
+ * onenand_read_bufferram - [OneNAND Interface] Read the bufferram area
+ * @mtd: MTD data structure
+ * @area: BufferRAM area
+ * @buffer: the databuffer to put/get data
+ * @offset: offset to read from or write to
+ * @count: number of bytes to read/write
+ *
+ * Read the BufferRAM area
+ */
+static int onenand_read_bufferram(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset, size_t count)
+{
+ struct onenand_chip *this = mtd->priv;
+ void __iomem *bufferram;
+
+ bufferram = this->base + area;
+
+ bufferram += onenand_bufferram_offset(mtd, area);
+
+ if (ONENAND_CHECK_BYTE_ACCESS(count)) {
+ unsigned short word;
+
+ /* Align with word(16-bit) size */
+ count--;
+
+ /* Read word and save byte */
+ word = this->read_word(bufferram + offset + count);
+ buffer[count] = (word & 0xff);
+ }
+
+ memcpy(buffer, bufferram + offset, count);
+
+ return 0;
+}
+
+/**
+ * onenand_sync_read_bufferram - [OneNAND Interface] Read the bufferram area with Sync. Burst mode
+ * @mtd: MTD data structure
+ * @area: BufferRAM area
+ * @buffer: the databuffer to put/get data
+ * @offset: offset to read from or write to
+ * @count: number of bytes to read/write
+ *
+ * Read the BufferRAM area with Sync. Burst Mode
+ */
+static int onenand_sync_read_bufferram(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset, size_t count)
+{
+ struct onenand_chip *this = mtd->priv;
+ void __iomem *bufferram;
+
+ bufferram = this->base + area;
+
+ bufferram += onenand_bufferram_offset(mtd, area);
+
+ this->mmcontrol(mtd, ONENAND_SYS_CFG1_SYNC_READ);
+
+ if (ONENAND_CHECK_BYTE_ACCESS(count)) {
+ unsigned short word;
+
+ /* Align with word(16-bit) size */
+ count--;
+
+ /* Read word and save byte */
+ word = this->read_word(bufferram + offset + count);
+ buffer[count] = (word & 0xff);
+ }
+
+ memcpy(buffer, bufferram + offset, count);
+
+ this->mmcontrol(mtd, 0);
+
+ return 0;
+}
+
+/**
+ * onenand_write_bufferram - [OneNAND Interface] Write the bufferram area
+ * @mtd: MTD data structure
+ * @area: BufferRAM area
+ * @buffer: the databuffer to put/get data
+ * @offset: offset to read from or write to
+ * @count: number of bytes to read/write
+ *
+ * Write the BufferRAM area
+ */
+static int onenand_write_bufferram(struct mtd_info *mtd, int area,
+ const unsigned char *buffer, int offset, size_t count)
+{
+ struct onenand_chip *this = mtd->priv;
+ void __iomem *bufferram;
+
+ bufferram = this->base + area;
+
+ bufferram += onenand_bufferram_offset(mtd, area);
+
+ if (ONENAND_CHECK_BYTE_ACCESS(count)) {
+ unsigned short word;
+ int byte_offset;
+
+ /* Align with word(16-bit) size */
+ count--;
+
+ /* Calculate byte access offset */
+ byte_offset = offset + count;
+
+ /* Read word and save byte */
+ word = this->read_word(bufferram + byte_offset);
+ word = (word & ~0xff) | buffer[count];
+ this->write_word(word, bufferram + byte_offset);
+ }
+
+ memcpy(bufferram + offset, buffer, count);
+
+ return 0;
+}
+
+/**
+ * onenand_get_2x_blockpage - [GENERIC] Get blockpage at 2x program mode
+ * @mtd: MTD data structure
+ * @addr: address to check
+ * @return blockpage address
+ *
+ * Get blockpage address at 2x program mode
+ */
+static int onenand_get_2x_blockpage(struct mtd_info *mtd, loff_t addr)
+{
+ struct onenand_chip *this = mtd->priv;
+ int blockpage, block, page;
+
+ /* Calculate the even block number */
+ block = (int) (addr >> this->erase_shift) & ~1;
+ /* Is it the odd plane? */
+ if (addr & this->writesize)
+ block++;
+ page = (int) (addr >> (this->page_shift + 1)) & this->page_mask;
+ blockpage = (block << 7) | page;
+
+ return blockpage;
+}
+
+/**
+ * onenand_check_bufferram - [GENERIC] Check BufferRAM information
+ * @mtd: MTD data structure
+ * @addr: address to check
+ * @return 1 if there are valid data, otherwise 0
+ *
+ * Check bufferram if there is data we required
+ */
+static int onenand_check_bufferram(struct mtd_info *mtd, loff_t addr)
+{
+ struct onenand_chip *this = mtd->priv;
+ int blockpage, found = 0;
+ unsigned int i;
+
+ if (ONENAND_IS_2PLANE(this))
+ blockpage = onenand_get_2x_blockpage(mtd, addr);
+ else
+ blockpage = (int) (addr >> this->page_shift);
+
+ /* Is there valid data? */
+ i = ONENAND_CURRENT_BUFFERRAM(this);
+ if (this->bufferram[i].blockpage == blockpage)
+ found = 1;
+ else {
+ /* Check another BufferRAM */
+ i = ONENAND_NEXT_BUFFERRAM(this);
+ if (this->bufferram[i].blockpage == blockpage) {
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+ found = 1;
+ }
+ }
+
+ if (found && ONENAND_IS_DDP(this)) {
+ /* Select DataRAM for DDP */
+ int block = onenand_block(this, addr);
+ int value = onenand_bufferram_address(this, block);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
+ }
+
+ return found;
+}
+
+/**
+ * onenand_update_bufferram - [GENERIC] Update BufferRAM information
+ * @mtd: MTD data structure
+ * @addr: address to update
+ * @valid: valid flag
+ *
+ * Update BufferRAM information
+ */
+static void onenand_update_bufferram(struct mtd_info *mtd, loff_t addr,
+ int valid)
+{
+ struct onenand_chip *this = mtd->priv;
+ int blockpage;
+ unsigned int i;
+
+ if (ONENAND_IS_2PLANE(this))
+ blockpage = onenand_get_2x_blockpage(mtd, addr);
+ else
+ blockpage = (int) (addr >> this->page_shift);
+
+ /* Invalidate another BufferRAM */
+ i = ONENAND_NEXT_BUFFERRAM(this);
+ if (this->bufferram[i].blockpage == blockpage)
+ this->bufferram[i].blockpage = -1;
+
+ /* Update BufferRAM */
+ i = ONENAND_CURRENT_BUFFERRAM(this);
+ if (valid)
+ this->bufferram[i].blockpage = blockpage;
+ else
+ this->bufferram[i].blockpage = -1;
+}
+
+/**
+ * onenand_invalidate_bufferram - [GENERIC] Invalidate BufferRAM information
+ * @mtd: MTD data structure
+ * @addr: start address to invalidate
+ * @len: length to invalidate
+ *
+ * Invalidate BufferRAM information
+ */
+static void onenand_invalidate_bufferram(struct mtd_info *mtd, loff_t addr,
+ unsigned int len)
+{
+ struct onenand_chip *this = mtd->priv;
+ int i;
+ loff_t end_addr = addr + len;
+
+ /* Invalidate BufferRAM */
+ for (i = 0; i < MAX_BUFFERRAM; i++) {
+ loff_t buf_addr = this->bufferram[i].blockpage << this->page_shift;
+ if (buf_addr >= addr && buf_addr < end_addr)
+ this->bufferram[i].blockpage = -1;
+ }
+}
+
+/**
+ * onenand_get_device - [GENERIC] Get chip for selected access
+ * @mtd: MTD device structure
+ * @new_state: the state which is requested
+ *
+ * Get the device and lock it for exclusive access
+ */
+static int onenand_get_device(struct mtd_info *mtd, int new_state)
+{
+ struct onenand_chip *this = mtd->priv;
+ DECLARE_WAITQUEUE(wait, current);
+
+ /*
+ * Grab the lock and see if the device is available
+ */
+ while (1) {
+ spin_lock(&this->chip_lock);
+ if (this->state == FL_READY) {
+ this->state = new_state;
+ spin_unlock(&this->chip_lock);
+ if (new_state != FL_PM_SUSPENDED && this->enable)
+ this->enable(mtd);
+ break;
+ }
+ if (new_state == FL_PM_SUSPENDED) {
+ spin_unlock(&this->chip_lock);
+ return (this->state == FL_PM_SUSPENDED) ? 0 : -EAGAIN;
+ }
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&this->wq, &wait);
+ spin_unlock(&this->chip_lock);
+ schedule();
+ remove_wait_queue(&this->wq, &wait);
+ }
+
+ return 0;
+}
+
+/**
+ * onenand_release_device - [GENERIC] release chip
+ * @mtd: MTD device structure
+ *
+ * Deselect, release chip lock and wake up anyone waiting on the device
+ */
+static void onenand_release_device(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+
+ if (this->state != FL_PM_SUSPENDED && this->disable)
+ this->disable(mtd);
+ /* Release the chip */
+ spin_lock(&this->chip_lock);
+ this->state = FL_READY;
+ wake_up(&this->wq);
+ spin_unlock(&this->chip_lock);
+}
+
+/**
+ * onenand_transfer_auto_oob - [INTERN] oob auto-placement transfer
+ * @mtd: MTD device structure
+ * @buf: destination address
+ * @column: oob offset to read from
+ * @thislen: oob length to read
+ */
+static int onenand_transfer_auto_oob(struct mtd_info *mtd, uint8_t *buf, int column,
+ int thislen)
+{
+ struct onenand_chip *this = mtd->priv;
+
+ this->read_bufferram(mtd, ONENAND_SPARERAM, this->oob_buf, 0,
+ mtd->oobsize);
+ return mtd_ooblayout_get_databytes(mtd, buf, this->oob_buf,
+ column, thislen);
+}
+
+/**
+ * onenand_recover_lsb - [Flex-OneNAND] Recover LSB page data
+ * @mtd: MTD device structure
+ * @addr: address to recover
+ * @status: return value from onenand_wait / onenand_bbt_wait
+ *
+ * MLC NAND Flash cell has paired pages - LSB page and MSB page. LSB page has
+ * lower page address and MSB page has higher page address in paired pages.
+ * If power off occurs during MSB page program, the paired LSB page data can
+ * become corrupt. LSB page recovery read is a way to read LSB page though page
+ * data are corrupted. When uncorrectable error occurs as a result of LSB page
+ * read after power up, issue LSB page recovery read.
+ */
+static int onenand_recover_lsb(struct mtd_info *mtd, loff_t addr, int status)
+{
+ struct onenand_chip *this = mtd->priv;
+ int i;
+
+ /* Recovery is only for Flex-OneNAND */
+ if (!FLEXONENAND(this))
+ return status;
+
+ /* check if we failed due to uncorrectable error */
+ if (!mtd_is_eccerr(status) && status != ONENAND_BBT_READ_ECC_ERROR)
+ return status;
+
+ /* check if address lies in MLC region */
+ i = flexonenand_region(mtd, addr);
+ if (mtd->eraseregions[i].erasesize < (1 << this->erase_shift))
+ return status;
+
+ /* We are attempting to reread, so decrement stats.failed
+ * which was incremented by onenand_wait due to read failure
+ */
+ printk(KERN_INFO "%s: Attempting to recover from uncorrectable read\n",
+ __func__);
+ mtd->ecc_stats.failed--;
+
+ /* Issue the LSB page recovery command */
+ this->command(mtd, FLEXONENAND_CMD_RECOVER_LSB, addr, this->writesize);
+ return this->wait(mtd, FL_READING);
+}
+
+/**
+ * onenand_mlc_read_ops_nolock - MLC OneNAND read main and/or out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operation description structure
+ *
+ * MLC OneNAND / Flex-OneNAND has 4KB page size and 4KB dataram.
+ * So, read-while-load is not present.
+ */
+static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct mtd_ecc_stats stats;
+ size_t len = ops->len;
+ size_t ooblen = ops->ooblen;
+ u_char *buf = ops->datbuf;
+ u_char *oobbuf = ops->oobbuf;
+ int read = 0, column, thislen;
+ int oobread = 0, oobcolumn, thisooblen, oobsize;
+ int ret = 0;
+ int writesize = this->writesize;
+
+ pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
+ (int)len);
+
+ oobsize = mtd_oobavail(mtd, ops);
+ oobcolumn = from & (mtd->oobsize - 1);
+
+ /* Do not allow reads past end of device */
+ if (from + len > mtd->size) {
+ printk(KERN_ERR "%s: Attempt read beyond end of device\n",
+ __func__);
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+ return -EINVAL;
+ }
+
+ stats = mtd->ecc_stats;
+
+ while (read < len) {
+ cond_resched();
+
+ thislen = min_t(int, writesize, len - read);
+
+ column = from & (writesize - 1);
+ if (column + thislen > writesize)
+ thislen = writesize - column;
+
+ if (!onenand_check_bufferram(mtd, from)) {
+ this->command(mtd, ONENAND_CMD_READ, from, writesize);
+
+ ret = this->wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+ onenand_update_bufferram(mtd, from, !ret);
+ if (mtd_is_eccerr(ret))
+ ret = 0;
+ if (ret)
+ break;
+ }
+
+ this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
+ if (oobbuf) {
+ thisooblen = oobsize - oobcolumn;
+ thisooblen = min_t(int, thisooblen, ooblen - oobread);
+
+ if (ops->mode == MTD_OPS_AUTO_OOB)
+ onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen);
+ else
+ this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen);
+ oobread += thisooblen;
+ oobbuf += thisooblen;
+ oobcolumn = 0;
+ }
+
+ read += thislen;
+ if (read == len)
+ break;
+
+ from += thislen;
+ buf += thislen;
+ }
+
+ /*
+ * Return success, if no ECC failures, else -EBADMSG
+ * fs driver will take care of that, because
+ * retlen == desired len and result == -EBADMSG
+ */
+ ops->retlen = read;
+ ops->oobretlen = oobread;
+
+ if (ret)
+ return ret;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ /* return max bitflips per ecc step; ONENANDs correct 1 bit only */
+ return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0;
+}
+
+/**
+ * onenand_read_ops_nolock - [OneNAND Interface] OneNAND read main and/or out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operation description structure
+ *
+ * OneNAND read main and/or out-of-band data
+ */
+static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct mtd_ecc_stats stats;
+ size_t len = ops->len;
+ size_t ooblen = ops->ooblen;
+ u_char *buf = ops->datbuf;
+ u_char *oobbuf = ops->oobbuf;
+ int read = 0, column, thislen;
+ int oobread = 0, oobcolumn, thisooblen, oobsize;
+ int ret = 0, boundary = 0;
+ int writesize = this->writesize;
+
+ pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
+ (int)len);
+
+ oobsize = mtd_oobavail(mtd, ops);
+ oobcolumn = from & (mtd->oobsize - 1);
+
+ /* Do not allow reads past end of device */
+ if ((from + len) > mtd->size) {
+ printk(KERN_ERR "%s: Attempt read beyond end of device\n",
+ __func__);
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+ return -EINVAL;
+ }
+
+ stats = mtd->ecc_stats;
+
+ /* Read-while-load method */
+
+ /* Do first load to bufferRAM */
+ if (read < len) {
+ if (!onenand_check_bufferram(mtd, from)) {
+ this->command(mtd, ONENAND_CMD_READ, from, writesize);
+ ret = this->wait(mtd, FL_READING);
+ onenand_update_bufferram(mtd, from, !ret);
+ if (mtd_is_eccerr(ret))
+ ret = 0;
+ }
+ }
+
+ thislen = min_t(int, writesize, len - read);
+ column = from & (writesize - 1);
+ if (column + thislen > writesize)
+ thislen = writesize - column;
+
+ while (!ret) {
+ /* If there is more to load then start next load */
+ from += thislen;
+ if (read + thislen < len) {
+ this->command(mtd, ONENAND_CMD_READ, from, writesize);
+ /*
+ * Chip boundary handling in DDP
+ * Now we issued chip 1 read and pointed chip 1
+ * bufferram so we have to point chip 0 bufferram.
+ */
+ if (ONENAND_IS_DDP(this) &&
+ unlikely(from == (this->chipsize >> 1))) {
+ this->write_word(ONENAND_DDP_CHIP0, this->base + ONENAND_REG_START_ADDRESS2);
+ boundary = 1;
+ } else
+ boundary = 0;
+ ONENAND_SET_PREV_BUFFERRAM(this);
+ }
+ /* While load is going, read from last bufferRAM */
+ this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
+
+ /* Read oob area if needed */
+ if (oobbuf) {
+ thisooblen = oobsize - oobcolumn;
+ thisooblen = min_t(int, thisooblen, ooblen - oobread);
+
+ if (ops->mode == MTD_OPS_AUTO_OOB)
+ onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen);
+ else
+ this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen);
+ oobread += thisooblen;
+ oobbuf += thisooblen;
+ oobcolumn = 0;
+ }
+
+ /* See if we are done */
+ read += thislen;
+ if (read == len)
+ break;
+ /* Set up for next read from bufferRAM */
+ if (unlikely(boundary))
+ this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2);
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+ buf += thislen;
+ thislen = min_t(int, writesize, len - read);
+ column = 0;
+ cond_resched();
+ /* Now wait for load */
+ ret = this->wait(mtd, FL_READING);
+ onenand_update_bufferram(mtd, from, !ret);
+ if (mtd_is_eccerr(ret))
+ ret = 0;
+ }
+
+ /*
+ * Return success, if no ECC failures, else -EBADMSG
+ * fs driver will take care of that, because
+ * retlen == desired len and result == -EBADMSG
+ */
+ ops->retlen = read;
+ ops->oobretlen = oobread;
+
+ if (ret)
+ return ret;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ /* return max bitflips per ecc step; ONENANDs correct 1 bit only */
+ return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0;
+}
+
+/**
+ * onenand_read_oob_nolock - [MTD Interface] OneNAND read out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operation description structure
+ *
+ * OneNAND read out-of-band data from the spare area
+ */
+static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct mtd_ecc_stats stats;
+ int read = 0, thislen, column, oobsize;
+ size_t len = ops->ooblen;
+ unsigned int mode = ops->mode;
+ u_char *buf = ops->oobbuf;
+ int ret = 0, readcmd;
+
+ from += ops->ooboffs;
+
+ pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
+ (int)len);
+
+ /* Initialize return length value */
+ ops->oobretlen = 0;
+
+ if (mode == MTD_OPS_AUTO_OOB)
+ oobsize = mtd->oobavail;
+ else
+ oobsize = mtd->oobsize;
+
+ column = from & (mtd->oobsize - 1);
+
+ if (unlikely(column >= oobsize)) {
+ printk(KERN_ERR "%s: Attempted to start read outside oob\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ stats = mtd->ecc_stats;
+
+ readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+
+ while (read < len) {
+ cond_resched();
+
+ thislen = oobsize - column;
+ thislen = min_t(int, thislen, len);
+
+ this->command(mtd, readcmd, from, mtd->oobsize);
+
+ onenand_update_bufferram(mtd, from, 0);
+
+ ret = this->wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+
+ if (ret && !mtd_is_eccerr(ret)) {
+ printk(KERN_ERR "%s: read failed = 0x%x\n",
+ __func__, ret);
+ break;
+ }
+
+ if (mode == MTD_OPS_AUTO_OOB)
+ onenand_transfer_auto_oob(mtd, buf, column, thislen);
+ else
+ this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen);
+
+ read += thislen;
+
+ if (read == len)
+ break;
+
+ buf += thislen;
+
+ /* Read more? */
+ if (read < len) {
+ /* Page size */
+ from += mtd->writesize;
+ column = 0;
+ }
+ }
+
+ ops->oobretlen = read;
+
+ if (ret)
+ return ret;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ return 0;
+}
+
+/**
+ * onenand_read_oob - [MTD Interface] Read main and/or out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operation description structure
+ *
+ * Read main and/or out-of-band
+ */
+static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct mtd_ecc_stats old_stats;
+ int ret;
+
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ break;
+ case MTD_OPS_RAW:
+ /* Not implemented yet */
+ default:
+ return -EINVAL;
+ }
+
+ onenand_get_device(mtd, FL_READING);
+
+ old_stats = mtd->ecc_stats;
+
+ if (ops->datbuf)
+ ret = ONENAND_IS_4KB_PAGE(this) ?
+ onenand_mlc_read_ops_nolock(mtd, from, ops) :
+ onenand_read_ops_nolock(mtd, from, ops);
+ else
+ ret = onenand_read_oob_nolock(mtd, from, ops);
+
+ if (ops->stats) {
+ ops->stats->uncorrectable_errors +=
+ mtd->ecc_stats.failed - old_stats.failed;
+ ops->stats->corrected_bitflips +=
+ mtd->ecc_stats.corrected - old_stats.corrected;
+ }
+
+ onenand_release_device(mtd);
+
+ return ret;
+}
+
+/**
+ * onenand_bbt_wait - [DEFAULT] wait until the command is done
+ * @mtd: MTD device structure
+ * @state: state to select the max. timeout value
+ *
+ * Wait for command done.
+ */
+static int onenand_bbt_wait(struct mtd_info *mtd, int state)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned long timeout;
+ unsigned int interrupt, ctrl, ecc, addr1, addr8;
+
+ /* The 20 msec is enough */
+ timeout = jiffies + msecs_to_jiffies(20);
+ while (time_before(jiffies, timeout)) {
+ interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
+ if (interrupt & ONENAND_INT_MASTER)
+ break;
+ }
+ /* To get correct interrupt status in timeout case */
+ interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
+ ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
+ addr1 = this->read_word(this->base + ONENAND_REG_START_ADDRESS1);
+ addr8 = this->read_word(this->base + ONENAND_REG_START_ADDRESS8);
+
+ if (interrupt & ONENAND_INT_READ) {
+ ecc = onenand_read_ecc(this);
+ if (ecc & ONENAND_ECC_2BIT_ALL) {
+ printk(KERN_DEBUG "%s: ecc 0x%04x ctrl 0x%04x "
+ "intr 0x%04x addr1 %#x addr8 %#x\n",
+ __func__, ecc, ctrl, interrupt, addr1, addr8);
+ return ONENAND_BBT_READ_ECC_ERROR;
+ }
+ } else {
+ printk(KERN_ERR "%s: read timeout! ctrl 0x%04x "
+ "intr 0x%04x addr1 %#x addr8 %#x\n",
+ __func__, ctrl, interrupt, addr1, addr8);
+ return ONENAND_BBT_READ_FATAL_ERROR;
+ }
+
+ /* Initial bad block case: 0x2400 or 0x0400 */
+ if (ctrl & ONENAND_CTRL_ERROR) {
+ printk(KERN_DEBUG "%s: ctrl 0x%04x intr 0x%04x addr1 %#x "
+ "addr8 %#x\n", __func__, ctrl, interrupt, addr1, addr8);
+ return ONENAND_BBT_READ_ERROR;
+ }
+
+ return 0;
+}
+
+/**
+ * onenand_bbt_read_oob - [MTD Interface] OneNAND read out-of-band for bbt scan
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operation description structure
+ *
+ * OneNAND read out-of-band data from the spare area for bbt scan
+ */
+int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ int read = 0, thislen, column;
+ int ret = 0, readcmd;
+ size_t len = ops->ooblen;
+ u_char *buf = ops->oobbuf;
+
+ pr_debug("%s: from = 0x%08x, len = %zi\n", __func__, (unsigned int)from,
+ len);
+
+ /* Initialize return value */
+ ops->oobretlen = 0;
+
+ /* Do not allow reads past end of device */
+ if (unlikely((from + len) > mtd->size)) {
+ printk(KERN_ERR "%s: Attempt read beyond end of device\n",
+ __func__);
+ return ONENAND_BBT_READ_FATAL_ERROR;
+ }
+
+ /* Grab the lock and see if the device is available */
+ onenand_get_device(mtd, FL_READING);
+
+ column = from & (mtd->oobsize - 1);
+
+ readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+
+ while (read < len) {
+ cond_resched();
+
+ thislen = mtd->oobsize - column;
+ thislen = min_t(int, thislen, len);
+
+ this->command(mtd, readcmd, from, mtd->oobsize);
+
+ onenand_update_bufferram(mtd, from, 0);
+
+ ret = this->bbt_wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+
+ if (ret)
+ break;
+
+ this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen);
+ read += thislen;
+ if (read == len)
+ break;
+
+ buf += thislen;
+
+ /* Read more? */
+ if (read < len) {
+ /* Update Page size */
+ from += this->writesize;
+ column = 0;
+ }
+ }
+
+ /* Deselect and wake up anyone waiting on the device */
+ onenand_release_device(mtd);
+
+ ops->oobretlen = read;
+ return ret;
+}
+
+#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
+/**
+ * onenand_verify_oob - [GENERIC] verify the oob contents after a write
+ * @mtd: MTD device structure
+ * @buf: the databuffer to verify
+ * @to: offset to read from
+ */
+static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to)
+{
+ struct onenand_chip *this = mtd->priv;
+ u_char *oob_buf = this->oob_buf;
+ int status, i, readcmd;
+
+ readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+
+ this->command(mtd, readcmd, to, mtd->oobsize);
+ onenand_update_bufferram(mtd, to, 0);
+ status = this->wait(mtd, FL_READING);
+ if (status)
+ return status;
+
+ this->read_bufferram(mtd, ONENAND_SPARERAM, oob_buf, 0, mtd->oobsize);
+ for (i = 0; i < mtd->oobsize; i++)
+ if (buf[i] != 0xFF && buf[i] != oob_buf[i])
+ return -EBADMSG;
+
+ return 0;
+}
+
+/**
+ * onenand_verify - [GENERIC] verify the chip contents after a write
+ * @mtd: MTD device structure
+ * @buf: the databuffer to verify
+ * @addr: offset to read from
+ * @len: number of bytes to read and compare
+ */
+static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr, size_t len)
+{
+ struct onenand_chip *this = mtd->priv;
+ int ret = 0;
+ int thislen, column;
+
+ column = addr & (this->writesize - 1);
+
+ while (len != 0) {
+ thislen = min_t(int, this->writesize - column, len);
+
+ this->command(mtd, ONENAND_CMD_READ, addr, this->writesize);
+
+ onenand_update_bufferram(mtd, addr, 0);
+
+ ret = this->wait(mtd, FL_READING);
+ if (ret)
+ return ret;
+
+ onenand_update_bufferram(mtd, addr, 1);
+
+ this->read_bufferram(mtd, ONENAND_DATARAM, this->verify_buf, 0, mtd->writesize);
+
+ if (memcmp(buf, this->verify_buf + column, thislen))
+ return -EBADMSG;
+
+ len -= thislen;
+ buf += thislen;
+ addr += thislen;
+ column = 0;
+ }
+
+ return 0;
+}
+#else
+#define onenand_verify(...) (0)
+#define onenand_verify_oob(...) (0)
+#endif
+
+#define NOTALIGNED(x) ((x & (this->subpagesize - 1)) != 0)
+
+static void onenand_panic_wait(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned int interrupt;
+ int i;
+
+ for (i = 0; i < 2000; i++) {
+ interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
+ if (interrupt & ONENAND_INT_MASTER)
+ break;
+ udelay(10);
+ }
+}
+
+/**
+ * onenand_panic_write - [MTD Interface] write buffer to FLASH in a panic context
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of written bytes
+ * @buf: the data to write
+ *
+ * Write with ECC
+ */
+static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct onenand_chip *this = mtd->priv;
+ int column, subpage;
+ int written = 0;
+
+ if (this->state == FL_PM_SUSPENDED)
+ return -EBUSY;
+
+ /* Wait for any existing operation to clear */
+ onenand_panic_wait(mtd);
+
+ pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
+ (int)len);
+
+ /* Reject writes, which are not page aligned */
+ if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
+ printk(KERN_ERR "%s: Attempt to write not page aligned data\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ column = to & (mtd->writesize - 1);
+
+ /* Loop until all data write */
+ while (written < len) {
+ int thislen = min_t(int, mtd->writesize - column, len - written);
+ u_char *wbuf = (u_char *) buf;
+
+ this->command(mtd, ONENAND_CMD_BUFFERRAM, to, thislen);
+
+ /* Partial page write */
+ subpage = thislen < mtd->writesize;
+ if (subpage) {
+ memset(this->page_buf, 0xff, mtd->writesize);
+ memcpy(this->page_buf + column, buf, thislen);
+ wbuf = this->page_buf;
+ }
+
+ this->write_bufferram(mtd, ONENAND_DATARAM, wbuf, 0, mtd->writesize);
+ this->write_bufferram(mtd, ONENAND_SPARERAM, ffchars, 0, mtd->oobsize);
+
+ this->command(mtd, ONENAND_CMD_PROG, to, mtd->writesize);
+
+ onenand_panic_wait(mtd);
+
+ /* In partial page write we don't update bufferram */
+ onenand_update_bufferram(mtd, to, !subpage);
+ if (ONENAND_IS_2PLANE(this)) {
+ ONENAND_SET_BUFFERRAM1(this);
+ onenand_update_bufferram(mtd, to + this->writesize, !subpage);
+ }
+
+ written += thislen;
+
+ if (written == len)
+ break;
+
+ column = 0;
+ to += thislen;
+ buf += thislen;
+ }
+
+ *retlen = written;
+ return 0;
+}
+
+/**
+ * onenand_fill_auto_oob - [INTERN] oob auto-placement transfer
+ * @mtd: MTD device structure
+ * @oob_buf: oob buffer
+ * @buf: source address
+ * @column: oob offset to write to
+ * @thislen: oob length to write
+ */
+static int onenand_fill_auto_oob(struct mtd_info *mtd, u_char *oob_buf,
+ const u_char *buf, int column, int thislen)
+{
+ return mtd_ooblayout_set_databytes(mtd, buf, oob_buf, column, thislen);
+}
+
+/**
+ * onenand_write_ops_nolock - [OneNAND Interface] write main and/or out-of-band
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
+ *
+ * Write main and/or oob with ECC
+ */
+static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ int written = 0, column, thislen = 0, subpage = 0;
+ int prev = 0, prevlen = 0, prev_subpage = 0, first = 1;
+ int oobwritten = 0, oobcolumn, thisooblen, oobsize;
+ size_t len = ops->len;
+ size_t ooblen = ops->ooblen;
+ const u_char *buf = ops->datbuf;
+ const u_char *oob = ops->oobbuf;
+ u_char *oobbuf;
+ int ret = 0, cmd;
+
+ pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
+ (int)len);
+
+ /* Initialize retlen, in case of early exit */
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+
+ /* Reject writes, which are not page aligned */
+ if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
+ printk(KERN_ERR "%s: Attempt to write not page aligned data\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Check zero length */
+ if (!len)
+ return 0;
+ oobsize = mtd_oobavail(mtd, ops);
+ oobcolumn = to & (mtd->oobsize - 1);
+
+ column = to & (mtd->writesize - 1);
+
+ /* Loop until all data write */
+ while (1) {
+ if (written < len) {
+ u_char *wbuf = (u_char *) buf;
+
+ thislen = min_t(int, mtd->writesize - column, len - written);
+ thisooblen = min_t(int, oobsize - oobcolumn, ooblen - oobwritten);
+
+ cond_resched();
+
+ this->command(mtd, ONENAND_CMD_BUFFERRAM, to, thislen);
+
+ /* Partial page write */
+ subpage = thislen < mtd->writesize;
+ if (subpage) {
+ memset(this->page_buf, 0xff, mtd->writesize);
+ memcpy(this->page_buf + column, buf, thislen);
+ wbuf = this->page_buf;
+ }
+
+ this->write_bufferram(mtd, ONENAND_DATARAM, wbuf, 0, mtd->writesize);
+
+ if (oob) {
+ oobbuf = this->oob_buf;
+
+ /* We send data to spare ram with oobsize
+ * to prevent byte access */
+ memset(oobbuf, 0xff, mtd->oobsize);
+ if (ops->mode == MTD_OPS_AUTO_OOB)
+ onenand_fill_auto_oob(mtd, oobbuf, oob, oobcolumn, thisooblen);
+ else
+ memcpy(oobbuf + oobcolumn, oob, thisooblen);
+
+ oobwritten += thisooblen;
+ oob += thisooblen;
+ oobcolumn = 0;
+ } else
+ oobbuf = (u_char *) ffchars;
+
+ this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
+ } else
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+
+ /*
+ * 2 PLANE, MLC, and Flex-OneNAND do not support
+ * write-while-program feature.
+ */
+ if (!ONENAND_IS_2PLANE(this) && !ONENAND_IS_4KB_PAGE(this) && !first) {
+ ONENAND_SET_PREV_BUFFERRAM(this);
+
+ ret = this->wait(mtd, FL_WRITING);
+
+ /* In partial page write we don't update bufferram */
+ onenand_update_bufferram(mtd, prev, !ret && !prev_subpage);
+ if (ret) {
+ written -= prevlen;
+ printk(KERN_ERR "%s: write failed %d\n",
+ __func__, ret);
+ break;
+ }
+
+ if (written == len) {
+ /* Only check verify write turn on */
+ ret = onenand_verify(mtd, buf - len, to - len, len);
+ if (ret)
+ printk(KERN_ERR "%s: verify failed %d\n",
+ __func__, ret);
+ break;
+ }
+
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+ }
+
+ this->ongoing = 0;
+ cmd = ONENAND_CMD_PROG;
+
+ /* Exclude 1st OTP and OTP blocks for cache program feature */
+ if (ONENAND_IS_CACHE_PROGRAM(this) &&
+ likely(onenand_block(this, to) != 0) &&
+ ONENAND_IS_4KB_PAGE(this) &&
+ ((written + thislen) < len)) {
+ cmd = ONENAND_CMD_2X_CACHE_PROG;
+ this->ongoing = 1;
+ }
+
+ this->command(mtd, cmd, to, mtd->writesize);
+
+ /*
+ * 2 PLANE, MLC, and Flex-OneNAND wait here
+ */
+ if (ONENAND_IS_2PLANE(this) || ONENAND_IS_4KB_PAGE(this)) {
+ ret = this->wait(mtd, FL_WRITING);
+
+ /* In partial page write we don't update bufferram */
+ onenand_update_bufferram(mtd, to, !ret && !subpage);
+ if (ret) {
+ printk(KERN_ERR "%s: write failed %d\n",
+ __func__, ret);
+ break;
+ }
+
+ /* Only check verify write turn on */
+ ret = onenand_verify(mtd, buf, to, thislen);
+ if (ret) {
+ printk(KERN_ERR "%s: verify failed %d\n",
+ __func__, ret);
+ break;
+ }
+
+ written += thislen;
+
+ if (written == len)
+ break;
+
+ } else
+ written += thislen;
+
+ column = 0;
+ prev_subpage = subpage;
+ prev = to;
+ prevlen = thislen;
+ to += thislen;
+ buf += thislen;
+ first = 0;
+ }
+
+ /* In error case, clear all bufferrams */
+ if (written != len)
+ onenand_invalidate_bufferram(mtd, 0, -1);
+
+ ops->retlen = written;
+ ops->oobretlen = oobwritten;
+
+ return ret;
+}
+
+
+/**
+ * onenand_write_oob_nolock - [INTERN] OneNAND write out-of-band
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
+ *
+ * OneNAND write out-of-band
+ */
+static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ int column, ret = 0, oobsize;
+ int written = 0, oobcmd;
+ u_char *oobbuf;
+ size_t len = ops->ooblen;
+ const u_char *buf = ops->oobbuf;
+ unsigned int mode = ops->mode;
+
+ to += ops->ooboffs;
+
+ pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
+ (int)len);
+
+ /* Initialize retlen, in case of early exit */
+ ops->oobretlen = 0;
+
+ if (mode == MTD_OPS_AUTO_OOB)
+ oobsize = mtd->oobavail;
+ else
+ oobsize = mtd->oobsize;
+
+ column = to & (mtd->oobsize - 1);
+
+ if (unlikely(column >= oobsize)) {
+ printk(KERN_ERR "%s: Attempted to start write outside oob\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* For compatibility with NAND: Do not allow write past end of page */
+ if (unlikely(column + len > oobsize)) {
+ printk(KERN_ERR "%s: Attempt to write past end of page\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ oobbuf = this->oob_buf;
+
+ oobcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB;
+
+ /* Loop until all data write */
+ while (written < len) {
+ int thislen = min_t(int, oobsize, len - written);
+
+ cond_resched();
+
+ this->command(mtd, ONENAND_CMD_BUFFERRAM, to, mtd->oobsize);
+
+ /* We send data to spare ram with oobsize
+ * to prevent byte access */
+ memset(oobbuf, 0xff, mtd->oobsize);
+ if (mode == MTD_OPS_AUTO_OOB)
+ onenand_fill_auto_oob(mtd, oobbuf, buf, column, thislen);
+ else
+ memcpy(oobbuf + column, buf, thislen);
+ this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
+
+ if (ONENAND_IS_4KB_PAGE(this)) {
+ /* Set main area of DataRAM to 0xff*/
+ memset(this->page_buf, 0xff, mtd->writesize);
+ this->write_bufferram(mtd, ONENAND_DATARAM,
+ this->page_buf, 0, mtd->writesize);
+ }
+
+ this->command(mtd, oobcmd, to, mtd->oobsize);
+
+ onenand_update_bufferram(mtd, to, 0);
+ if (ONENAND_IS_2PLANE(this)) {
+ ONENAND_SET_BUFFERRAM1(this);
+ onenand_update_bufferram(mtd, to + this->writesize, 0);
+ }
+
+ ret = this->wait(mtd, FL_WRITING);
+ if (ret) {
+ printk(KERN_ERR "%s: write failed %d\n", __func__, ret);
+ break;
+ }
+
+ ret = onenand_verify_oob(mtd, oobbuf, to);
+ if (ret) {
+ printk(KERN_ERR "%s: verify failed %d\n",
+ __func__, ret);
+ break;
+ }
+
+ written += thislen;
+ if (written == len)
+ break;
+
+ to += mtd->writesize;
+ buf += thislen;
+ column = 0;
+ }
+
+ ops->oobretlen = written;
+
+ return ret;
+}
+
+/**
+ * onenand_write_oob - [MTD Interface] NAND write data and/or out-of-band
+ * @mtd: MTD device structure
+ * @to: offset to write
+ * @ops: oob operation description structure
+ */
+static int onenand_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int ret;
+
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ break;
+ case MTD_OPS_RAW:
+ /* Not implemented yet */
+ default:
+ return -EINVAL;
+ }
+
+ onenand_get_device(mtd, FL_WRITING);
+ if (ops->datbuf)
+ ret = onenand_write_ops_nolock(mtd, to, ops);
+ else
+ ret = onenand_write_oob_nolock(mtd, to, ops);
+ onenand_release_device(mtd);
+
+ return ret;
+}
+
+/**
+ * onenand_block_isbad_nolock - [GENERIC] Check if a block is marked bad
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ * @allowbbt: 1, if its allowed to access the bbt area
+ *
+ * Check, if the block is bad. Either by reading the bad block table or
+ * calling of the scan function.
+ */
+static int onenand_block_isbad_nolock(struct mtd_info *mtd, loff_t ofs, int allowbbt)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct bbm_info *bbm = this->bbm;
+
+ /* Return info from the table */
+ return bbm->isbad_bbt(mtd, ofs, allowbbt);
+}
+
+
+static int onenand_multiblock_erase_verify(struct mtd_info *mtd,
+ struct erase_info *instr)
+{
+ struct onenand_chip *this = mtd->priv;
+ loff_t addr = instr->addr;
+ int len = instr->len;
+ unsigned int block_size = (1 << this->erase_shift);
+ int ret = 0;
+
+ while (len) {
+ this->command(mtd, ONENAND_CMD_ERASE_VERIFY, addr, block_size);
+ ret = this->wait(mtd, FL_VERIFYING_ERASE);
+ if (ret) {
+ printk(KERN_ERR "%s: Failed verify, block %d\n",
+ __func__, onenand_block(this, addr));
+ instr->fail_addr = addr;
+ return -1;
+ }
+ len -= block_size;
+ addr += block_size;
+ }
+ return 0;
+}
+
+/**
+ * onenand_multiblock_erase - [INTERN] erase block(s) using multiblock erase
+ * @mtd: MTD device structure
+ * @instr: erase instruction
+ * @block_size: block size
+ *
+ * Erase one or more blocks up to 64 block at a time
+ */
+static int onenand_multiblock_erase(struct mtd_info *mtd,
+ struct erase_info *instr,
+ unsigned int block_size)
+{
+ struct onenand_chip *this = mtd->priv;
+ loff_t addr = instr->addr;
+ int len = instr->len;
+ int eb_count = 0;
+ int ret = 0;
+ int bdry_block = 0;
+
+ if (ONENAND_IS_DDP(this)) {
+ loff_t bdry_addr = this->chipsize >> 1;
+ if (addr < bdry_addr && (addr + len) > bdry_addr)
+ bdry_block = bdry_addr >> this->erase_shift;
+ }
+
+ /* Pre-check bbs */
+ while (len) {
+ /* Check if we have a bad block, we do not erase bad blocks */
+ if (onenand_block_isbad_nolock(mtd, addr, 0)) {
+ printk(KERN_WARNING "%s: attempt to erase a bad block "
+ "at addr 0x%012llx\n",
+ __func__, (unsigned long long) addr);
+ return -EIO;
+ }
+ len -= block_size;
+ addr += block_size;
+ }
+
+ len = instr->len;
+ addr = instr->addr;
+
+ /* loop over 64 eb batches */
+ while (len) {
+ struct erase_info verify_instr = *instr;
+ int max_eb_count = MB_ERASE_MAX_BLK_COUNT;
+
+ verify_instr.addr = addr;
+ verify_instr.len = 0;
+
+ /* do not cross chip boundary */
+ if (bdry_block) {
+ int this_block = (addr >> this->erase_shift);
+
+ if (this_block < bdry_block) {
+ max_eb_count = min(max_eb_count,
+ (bdry_block - this_block));
+ }
+ }
+
+ eb_count = 0;
+
+ while (len > block_size && eb_count < (max_eb_count - 1)) {
+ this->command(mtd, ONENAND_CMD_MULTIBLOCK_ERASE,
+ addr, block_size);
+ onenand_invalidate_bufferram(mtd, addr, block_size);
+
+ ret = this->wait(mtd, FL_PREPARING_ERASE);
+ if (ret) {
+ printk(KERN_ERR "%s: Failed multiblock erase, "
+ "block %d\n", __func__,
+ onenand_block(this, addr));
+ instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+ return -EIO;
+ }
+
+ len -= block_size;
+ addr += block_size;
+ eb_count++;
+ }
+
+ /* last block of 64-eb series */
+ cond_resched();
+ this->command(mtd, ONENAND_CMD_ERASE, addr, block_size);
+ onenand_invalidate_bufferram(mtd, addr, block_size);
+
+ ret = this->wait(mtd, FL_ERASING);
+ /* Check if it is write protected */
+ if (ret) {
+ printk(KERN_ERR "%s: Failed erase, block %d\n",
+ __func__, onenand_block(this, addr));
+ instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+ return -EIO;
+ }
+
+ len -= block_size;
+ addr += block_size;
+ eb_count++;
+
+ /* verify */
+ verify_instr.len = eb_count * block_size;
+ if (onenand_multiblock_erase_verify(mtd, &verify_instr)) {
+ instr->fail_addr = verify_instr.fail_addr;
+ return -EIO;
+ }
+
+ }
+ return 0;
+}
+
+
+/**
+ * onenand_block_by_block_erase - [INTERN] erase block(s) using regular erase
+ * @mtd: MTD device structure
+ * @instr: erase instruction
+ * @region: erase region
+ * @block_size: erase block size
+ *
+ * Erase one or more blocks one block at a time
+ */
+static int onenand_block_by_block_erase(struct mtd_info *mtd,
+ struct erase_info *instr,
+ struct mtd_erase_region_info *region,
+ unsigned int block_size)
+{
+ struct onenand_chip *this = mtd->priv;
+ loff_t addr = instr->addr;
+ int len = instr->len;
+ loff_t region_end = 0;
+ int ret = 0;
+
+ if (region) {
+ /* region is set for Flex-OneNAND */
+ region_end = region->offset + region->erasesize * region->numblocks;
+ }
+
+ /* Loop through the blocks */
+ while (len) {
+ cond_resched();
+
+ /* Check if we have a bad block, we do not erase bad blocks */
+ if (onenand_block_isbad_nolock(mtd, addr, 0)) {
+ printk(KERN_WARNING "%s: attempt to erase a bad block "
+ "at addr 0x%012llx\n",
+ __func__, (unsigned long long) addr);
+ return -EIO;
+ }
+
+ this->command(mtd, ONENAND_CMD_ERASE, addr, block_size);
+
+ onenand_invalidate_bufferram(mtd, addr, block_size);
+
+ ret = this->wait(mtd, FL_ERASING);
+ /* Check, if it is write protected */
+ if (ret) {
+ printk(KERN_ERR "%s: Failed erase, block %d\n",
+ __func__, onenand_block(this, addr));
+ instr->fail_addr = addr;
+ return -EIO;
+ }
+
+ len -= block_size;
+ addr += block_size;
+
+ if (region && addr == region_end) {
+ if (!len)
+ break;
+ region++;
+
+ block_size = region->erasesize;
+ region_end = region->offset + region->erasesize * region->numblocks;
+
+ if (len & (block_size - 1)) {
+ /* FIXME: This should be handled at MTD partitioning level. */
+ printk(KERN_ERR "%s: Unaligned address\n",
+ __func__);
+ return -EIO;
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * onenand_erase - [MTD Interface] erase block(s)
+ * @mtd: MTD device structure
+ * @instr: erase instruction
+ *
+ * Erase one or more blocks
+ */
+static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned int block_size;
+ loff_t addr = instr->addr;
+ loff_t len = instr->len;
+ int ret = 0;
+ struct mtd_erase_region_info *region = NULL;
+ loff_t region_offset = 0;
+
+ pr_debug("%s: start=0x%012llx, len=%llu\n", __func__,
+ (unsigned long long)instr->addr,
+ (unsigned long long)instr->len);
+
+ if (FLEXONENAND(this)) {
+ /* Find the eraseregion of this address */
+ int i = flexonenand_region(mtd, addr);
+
+ region = &mtd->eraseregions[i];
+ block_size = region->erasesize;
+
+ /* Start address within region must align on block boundary.
+ * Erase region's start offset is always block start address.
+ */
+ region_offset = region->offset;
+ } else
+ block_size = 1 << this->erase_shift;
+
+ /* Start address must align on block boundary */
+ if (unlikely((addr - region_offset) & (block_size - 1))) {
+ printk(KERN_ERR "%s: Unaligned address\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Length must align on block boundary */
+ if (unlikely(len & (block_size - 1))) {
+ printk(KERN_ERR "%s: Length not block aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Grab the lock and see if the device is available */
+ onenand_get_device(mtd, FL_ERASING);
+
+ if (ONENAND_IS_4KB_PAGE(this) || region ||
+ instr->len < MB_ERASE_MIN_BLK_COUNT * block_size) {
+ /* region is set for Flex-OneNAND (no mb erase) */
+ ret = onenand_block_by_block_erase(mtd, instr,
+ region, block_size);
+ } else {
+ ret = onenand_multiblock_erase(mtd, instr, block_size);
+ }
+
+ /* Deselect and wake up anyone waiting on the device */
+ onenand_release_device(mtd);
+
+ return ret;
+}
+
+/**
+ * onenand_sync - [MTD Interface] sync
+ * @mtd: MTD device structure
+ *
+ * Sync is actually a wait for chip ready function
+ */
+static void onenand_sync(struct mtd_info *mtd)
+{
+ pr_debug("%s: called\n", __func__);
+
+ /* Grab the lock and see if the device is available */
+ onenand_get_device(mtd, FL_SYNCING);
+
+ /* Release it and go back */
+ onenand_release_device(mtd);
+}
+
+/**
+ * onenand_block_isbad - [MTD Interface] Check whether the block at the given offset is bad
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
+ *
+ * Check whether the block is bad
+ */
+static int onenand_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+ int ret;
+
+ onenand_get_device(mtd, FL_READING);
+ ret = onenand_block_isbad_nolock(mtd, ofs, 0);
+ onenand_release_device(mtd);
+ return ret;
+}
+
+/**
+ * onenand_default_block_markbad - [DEFAULT] mark a block bad
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ *
+ * This is the default implementation, which can be overridden by
+ * a hardware specific driver.
+ */
+static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct bbm_info *bbm = this->bbm;
+ u_char buf[2] = {0, 0};
+ struct mtd_oob_ops ops = {
+ .mode = MTD_OPS_PLACE_OOB,
+ .ooblen = 2,
+ .oobbuf = buf,
+ .ooboffs = 0,
+ };
+ int block;
+
+ /* Get block number */
+ block = onenand_block(this, ofs);
+ if (bbm->bbt)
+ bbm->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
+
+ /* We write two bytes, so we don't have to mess with 16-bit access */
+ ofs += mtd->oobsize + (this->badblockpos & ~0x01);
+ /* FIXME : What to do when marking SLC block in partition
+ * with MLC erasesize? For now, it is not advisable to
+ * create partitions containing both SLC and MLC regions.
+ */
+ return onenand_write_oob_nolock(mtd, ofs, &ops);
+}
+
+/**
+ * onenand_block_markbad - [MTD Interface] Mark the block at the given offset as bad
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
+ *
+ * Mark the block as bad
+ */
+static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct onenand_chip *this = mtd->priv;
+ int ret;
+
+ ret = onenand_block_isbad(mtd, ofs);
+ if (ret) {
+ /* If it was bad already, return success and do nothing */
+ if (ret > 0)
+ return 0;
+ return ret;
+ }
+
+ onenand_get_device(mtd, FL_WRITING);
+ ret = this->block_markbad(mtd, ofs);
+ onenand_release_device(mtd);
+ return ret;
+}
+
+/**
+ * onenand_do_lock_cmd - [OneNAND Interface] Lock or unlock block(s)
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
+ * @len: number of bytes to lock or unlock
+ * @cmd: lock or unlock command
+ *
+ * Lock or unlock one or more blocks
+ */
+static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int cmd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int start, end, block, value, status;
+ int wp_status_mask;
+
+ start = onenand_block(this, ofs);
+ end = onenand_block(this, ofs + len) - 1;
+
+ if (cmd == ONENAND_CMD_LOCK)
+ wp_status_mask = ONENAND_WP_LS;
+ else
+ wp_status_mask = ONENAND_WP_US;
+
+ /* Continuous lock scheme */
+ if (this->options & ONENAND_HAS_CONT_LOCK) {
+ /* Set start block address */
+ this->write_word(start, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
+ /* Set end block address */
+ this->write_word(end, this->base + ONENAND_REG_END_BLOCK_ADDRESS);
+ /* Write lock command */
+ this->command(mtd, cmd, 0, 0);
+
+ /* There's no return value */
+ this->wait(mtd, FL_LOCKING);
+
+ /* Sanity check */
+ while (this->read_word(this->base + ONENAND_REG_CTRL_STATUS)
+ & ONENAND_CTRL_ONGO)
+ continue;
+
+ /* Check lock status */
+ status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
+ if (!(status & wp_status_mask))
+ printk(KERN_ERR "%s: wp status = 0x%x\n",
+ __func__, status);
+
+ return 0;
+ }
+
+ /* Block lock scheme */
+ for (block = start; block < end + 1; block++) {
+ /* Set block address */
+ value = onenand_block_address(this, block);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1);
+ /* Select DataRAM for DDP */
+ value = onenand_bufferram_address(this, block);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
+ /* Set start block address */
+ this->write_word(block, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
+ /* Write lock command */
+ this->command(mtd, cmd, 0, 0);
+
+ /* There's no return value */
+ this->wait(mtd, FL_LOCKING);
+
+ /* Sanity check */
+ while (this->read_word(this->base + ONENAND_REG_CTRL_STATUS)
+ & ONENAND_CTRL_ONGO)
+ continue;
+
+ /* Check lock status */
+ status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
+ if (!(status & wp_status_mask))
+ printk(KERN_ERR "%s: block = %d, wp status = 0x%x\n",
+ __func__, block, status);
+ }
+
+ return 0;
+}
+
+/**
+ * onenand_lock - [MTD Interface] Lock block(s)
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
+ * @len: number of bytes to unlock
+ *
+ * Lock one or more blocks
+ */
+static int onenand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret;
+
+ onenand_get_device(mtd, FL_LOCKING);
+ ret = onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_LOCK);
+ onenand_release_device(mtd);
+ return ret;
+}
+
+/**
+ * onenand_unlock - [MTD Interface] Unlock block(s)
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
+ * @len: number of bytes to unlock
+ *
+ * Unlock one or more blocks
+ */
+static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret;
+
+ onenand_get_device(mtd, FL_LOCKING);
+ ret = onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
+ onenand_release_device(mtd);
+ return ret;
+}
+
+/**
+ * onenand_check_lock_status - [OneNAND Interface] Check lock status
+ * @this: onenand chip data structure
+ *
+ * Check lock status
+ */
+static int onenand_check_lock_status(struct onenand_chip *this)
+{
+ unsigned int value, block, status;
+ unsigned int end;
+
+ end = this->chipsize >> this->erase_shift;
+ for (block = 0; block < end; block++) {
+ /* Set block address */
+ value = onenand_block_address(this, block);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1);
+ /* Select DataRAM for DDP */
+ value = onenand_bufferram_address(this, block);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
+ /* Set start block address */
+ this->write_word(block, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
+
+ /* Check lock status */
+ status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
+ if (!(status & ONENAND_WP_US)) {
+ printk(KERN_ERR "%s: block = %d, wp status = 0x%x\n",
+ __func__, block, status);
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+/**
+ * onenand_unlock_all - [OneNAND Interface] unlock all blocks
+ * @mtd: MTD device structure
+ *
+ * Unlock all blocks
+ */
+static void onenand_unlock_all(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ loff_t ofs = 0;
+ loff_t len = mtd->size;
+
+ if (this->options & ONENAND_HAS_UNLOCK_ALL) {
+ /* Set start block address */
+ this->write_word(0, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
+ /* Write unlock command */
+ this->command(mtd, ONENAND_CMD_UNLOCK_ALL, 0, 0);
+
+ /* There's no return value */
+ this->wait(mtd, FL_LOCKING);
+
+ /* Sanity check */
+ while (this->read_word(this->base + ONENAND_REG_CTRL_STATUS)
+ & ONENAND_CTRL_ONGO)
+ continue;
+
+ /* Don't check lock status */
+ if (this->options & ONENAND_SKIP_UNLOCK_CHECK)
+ return;
+
+ /* Check lock status */
+ if (onenand_check_lock_status(this))
+ return;
+
+ /* Workaround for all block unlock in DDP */
+ if (ONENAND_IS_DDP(this) && !FLEXONENAND(this)) {
+ /* All blocks on another chip */
+ ofs = this->chipsize >> 1;
+ len = this->chipsize >> 1;
+ }
+ }
+
+ onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
+}
+
+#ifdef CONFIG_MTD_ONENAND_OTP
+
+/**
+ * onenand_otp_command - Send OTP specific command to OneNAND device
+ * @mtd: MTD device structure
+ * @cmd: the command to be sent
+ * @addr: offset to read from or write to
+ * @len: number of bytes to read or write
+ */
+static int onenand_otp_command(struct mtd_info *mtd, int cmd, loff_t addr,
+ size_t len)
+{
+ struct onenand_chip *this = mtd->priv;
+ int value, block, page;
+
+ /* Address translation */
+ switch (cmd) {
+ case ONENAND_CMD_OTP_ACCESS:
+ block = (int) (addr >> this->erase_shift);
+ page = -1;
+ break;
+
+ default:
+ block = (int) (addr >> this->erase_shift);
+ page = (int) (addr >> this->page_shift);
+
+ if (ONENAND_IS_2PLANE(this)) {
+ /* Make the even block number */
+ block &= ~1;
+ /* Is it the odd plane? */
+ if (addr & this->writesize)
+ block++;
+ page >>= 1;
+ }
+ page &= this->page_mask;
+ break;
+ }
+
+ if (block != -1) {
+ /* Write 'DFS, FBA' of Flash */
+ value = onenand_block_address(this, block);
+ this->write_word(value, this->base +
+ ONENAND_REG_START_ADDRESS1);
+ }
+
+ if (page != -1) {
+ /* Now we use page size operation */
+ int sectors = 4, count = 4;
+ int dataram;
+
+ switch (cmd) {
+ default:
+ if (ONENAND_IS_2PLANE(this) && cmd == ONENAND_CMD_PROG)
+ cmd = ONENAND_CMD_2X_PROG;
+ dataram = ONENAND_CURRENT_BUFFERRAM(this);
+ break;
+ }
+
+ /* Write 'FPA, FSA' of Flash */
+ value = onenand_page_address(page, sectors);
+ this->write_word(value, this->base +
+ ONENAND_REG_START_ADDRESS8);
+
+ /* Write 'BSA, BSC' of DataRAM */
+ value = onenand_buffer_address(dataram, sectors, count);
+ this->write_word(value, this->base + ONENAND_REG_START_BUFFER);
+ }
+
+ /* Interrupt clear */
+ this->write_word(ONENAND_INT_CLEAR, this->base + ONENAND_REG_INTERRUPT);
+
+ /* Write command */
+ this->write_word(cmd, this->base + ONENAND_REG_COMMAND);
+
+ return 0;
+}
+
+/**
+ * onenand_otp_write_oob_nolock - [INTERN] OneNAND write out-of-band, specific to OTP
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
+ *
+ * OneNAND write out-of-band only for OTP
+ */
+static int onenand_otp_write_oob_nolock(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ int column, ret = 0, oobsize;
+ int written = 0;
+ u_char *oobbuf;
+ size_t len = ops->ooblen;
+ const u_char *buf = ops->oobbuf;
+ int block, value, status;
+
+ to += ops->ooboffs;
+
+ /* Initialize retlen, in case of early exit */
+ ops->oobretlen = 0;
+
+ oobsize = mtd->oobsize;
+
+ column = to & (mtd->oobsize - 1);
+
+ oobbuf = this->oob_buf;
+
+ /* Loop until all data write */
+ while (written < len) {
+ int thislen = min_t(int, oobsize, len - written);
+
+ cond_resched();
+
+ block = (int) (to >> this->erase_shift);
+ /*
+ * Write 'DFS, FBA' of Flash
+ * Add: F100h DQ=DFS, FBA
+ */
+
+ value = onenand_block_address(this, block);
+ this->write_word(value, this->base +
+ ONENAND_REG_START_ADDRESS1);
+
+ /*
+ * Select DataRAM for DDP
+ * Add: F101h DQ=DBS
+ */
+
+ value = onenand_bufferram_address(this, block);
+ this->write_word(value, this->base +
+ ONENAND_REG_START_ADDRESS2);
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+
+ /*
+ * Enter OTP access mode
+ */
+ this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
+ this->wait(mtd, FL_OTPING);
+
+ /* We send data to spare ram with oobsize
+ * to prevent byte access */
+ memcpy(oobbuf + column, buf, thislen);
+
+ /*
+ * Write Data into DataRAM
+ * Add: 8th Word
+ * in sector0/spare/page0
+ * DQ=XXFCh
+ */
+ this->write_bufferram(mtd, ONENAND_SPARERAM,
+ oobbuf, 0, mtd->oobsize);
+
+ onenand_otp_command(mtd, ONENAND_CMD_PROGOOB, to, mtd->oobsize);
+ onenand_update_bufferram(mtd, to, 0);
+ if (ONENAND_IS_2PLANE(this)) {
+ ONENAND_SET_BUFFERRAM1(this);
+ onenand_update_bufferram(mtd, to + this->writesize, 0);
+ }
+
+ ret = this->wait(mtd, FL_WRITING);
+ if (ret) {
+ printk(KERN_ERR "%s: write failed %d\n", __func__, ret);
+ break;
+ }
+
+ /* Exit OTP access mode */
+ this->command(mtd, ONENAND_CMD_RESET, 0, 0);
+ this->wait(mtd, FL_RESETTING);
+
+ status = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
+ status &= 0x60;
+
+ if (status == 0x60) {
+ printk(KERN_DEBUG "\nBLOCK\tSTATUS\n");
+ printk(KERN_DEBUG "1st Block\tLOCKED\n");
+ printk(KERN_DEBUG "OTP Block\tLOCKED\n");
+ } else if (status == 0x20) {
+ printk(KERN_DEBUG "\nBLOCK\tSTATUS\n");
+ printk(KERN_DEBUG "1st Block\tLOCKED\n");
+ printk(KERN_DEBUG "OTP Block\tUN-LOCKED\n");
+ } else if (status == 0x40) {
+ printk(KERN_DEBUG "\nBLOCK\tSTATUS\n");
+ printk(KERN_DEBUG "1st Block\tUN-LOCKED\n");
+ printk(KERN_DEBUG "OTP Block\tLOCKED\n");
+ } else {
+ printk(KERN_DEBUG "Reboot to check\n");
+ }
+
+ written += thislen;
+ if (written == len)
+ break;
+
+ to += mtd->writesize;
+ buf += thislen;
+ column = 0;
+ }
+
+ ops->oobretlen = written;
+
+ return ret;
+}
+
+/* Internal OTP operation */
+typedef int (*otp_op_t)(struct mtd_info *mtd, loff_t form, size_t len,
+ size_t *retlen, u_char *buf);
+
+/**
+ * do_otp_read - [DEFAULT] Read OTP block area
+ * @mtd: MTD device structure
+ * @from: The offset to read
+ * @len: number of bytes to read
+ * @retlen: pointer to variable to store the number of readbytes
+ * @buf: the databuffer to put/get data
+ *
+ * Read OTP block area.
+ */
+static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .ooblen = 0,
+ .datbuf = buf,
+ .oobbuf = NULL,
+ };
+ int ret;
+
+ /* Enter OTP access mode */
+ this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
+ this->wait(mtd, FL_OTPING);
+
+ ret = ONENAND_IS_4KB_PAGE(this) ?
+ onenand_mlc_read_ops_nolock(mtd, from, &ops) :
+ onenand_read_ops_nolock(mtd, from, &ops);
+
+ /* Exit OTP access mode */
+ this->command(mtd, ONENAND_CMD_RESET, 0, 0);
+ this->wait(mtd, FL_RESETTING);
+
+ return ret;
+}
+
+/**
+ * do_otp_write - [DEFAULT] Write OTP block area
+ * @mtd: MTD device structure
+ * @to: The offset to write
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of write bytes
+ * @buf: the databuffer to put/get data
+ *
+ * Write OTP block area.
+ */
+static int do_otp_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned char *pbuf = buf;
+ int ret;
+ struct mtd_oob_ops ops = { };
+
+ /* Force buffer page aligned */
+ if (len < mtd->writesize) {
+ memcpy(this->page_buf, buf, len);
+ memset(this->page_buf + len, 0xff, mtd->writesize - len);
+ pbuf = this->page_buf;
+ len = mtd->writesize;
+ }
+
+ /* Enter OTP access mode */
+ this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
+ this->wait(mtd, FL_OTPING);
+
+ ops.len = len;
+ ops.ooblen = 0;
+ ops.datbuf = pbuf;
+ ops.oobbuf = NULL;
+ ret = onenand_write_ops_nolock(mtd, to, &ops);
+ *retlen = ops.retlen;
+
+ /* Exit OTP access mode */
+ this->command(mtd, ONENAND_CMD_RESET, 0, 0);
+ this->wait(mtd, FL_RESETTING);
+
+ return ret;
+}
+
+/**
+ * do_otp_lock - [DEFAULT] Lock OTP block area
+ * @mtd: MTD device structure
+ * @from: The offset to lock
+ * @len: number of bytes to lock
+ * @retlen: pointer to variable to store the number of lock bytes
+ * @buf: the databuffer to put/get data
+ *
+ * Lock OTP block area.
+ */
+static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct mtd_oob_ops ops = { };
+ int ret;
+
+ if (FLEXONENAND(this)) {
+
+ /* Enter OTP access mode */
+ this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
+ this->wait(mtd, FL_OTPING);
+ /*
+ * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
+ * main area of page 49.
+ */
+ ops.len = mtd->writesize;
+ ops.ooblen = 0;
+ ops.datbuf = buf;
+ ops.oobbuf = NULL;
+ ret = onenand_write_ops_nolock(mtd, mtd->writesize * 49, &ops);
+ *retlen = ops.retlen;
+
+ /* Exit OTP access mode */
+ this->command(mtd, ONENAND_CMD_RESET, 0, 0);
+ this->wait(mtd, FL_RESETTING);
+ } else {
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooblen = len;
+ ops.oobbuf = buf;
+ ops.ooboffs = 0;
+ ret = onenand_otp_write_oob_nolock(mtd, from, &ops);
+ *retlen = ops.oobretlen;
+ }
+
+ return ret;
+}
+
+/**
+ * onenand_otp_walk - [DEFAULT] Handle OTP operation
+ * @mtd: MTD device structure
+ * @from: The offset to read/write
+ * @len: number of bytes to read/write
+ * @retlen: pointer to variable to store the number of read bytes
+ * @buf: the databuffer to put/get data
+ * @action: do given action
+ * @mode: specify user and factory
+ *
+ * Handle OTP operation.
+ */
+static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf,
+ otp_op_t action, int mode)
+{
+ struct onenand_chip *this = mtd->priv;
+ int otp_pages;
+ int density;
+ int ret = 0;
+
+ *retlen = 0;
+
+ density = onenand_get_density(this->device_id);
+ if (density < ONENAND_DEVICE_DENSITY_512Mb)
+ otp_pages = 20;
+ else
+ otp_pages = 50;
+
+ if (mode == MTD_OTP_FACTORY) {
+ from += mtd->writesize * otp_pages;
+ otp_pages = ONENAND_PAGES_PER_BLOCK - otp_pages;
+ }
+
+ /* Check User/Factory boundary */
+ if (mode == MTD_OTP_USER) {
+ if (mtd->writesize * otp_pages < from + len)
+ return 0;
+ } else {
+ if (mtd->writesize * otp_pages < len)
+ return 0;
+ }
+
+ onenand_get_device(mtd, FL_OTPING);
+ while (len > 0 && otp_pages > 0) {
+ if (!action) { /* OTP Info functions */
+ struct otp_info *otpinfo;
+
+ len -= sizeof(struct otp_info);
+ if (len <= 0) {
+ ret = -ENOSPC;
+ break;
+ }
+
+ otpinfo = (struct otp_info *) buf;
+ otpinfo->start = from;
+ otpinfo->length = mtd->writesize;
+ otpinfo->locked = 0;
+
+ from += mtd->writesize;
+ buf += sizeof(struct otp_info);
+ *retlen += sizeof(struct otp_info);
+ } else {
+ size_t tmp_retlen;
+
+ ret = action(mtd, from, len, &tmp_retlen, buf);
+ if (ret)
+ break;
+
+ buf += tmp_retlen;
+ len -= tmp_retlen;
+ *retlen += tmp_retlen;
+
+ }
+ otp_pages--;
+ }
+ onenand_release_device(mtd);
+
+ return ret;
+}
+
+/**
+ * onenand_get_fact_prot_info - [MTD Interface] Read factory OTP info
+ * @mtd: MTD device structure
+ * @len: number of bytes to read
+ * @retlen: pointer to variable to store the number of read bytes
+ * @buf: the databuffer to put/get data
+ *
+ * Read factory OTP info.
+ */
+static int onenand_get_fact_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+{
+ return onenand_otp_walk(mtd, 0, len, retlen, (u_char *) buf, NULL,
+ MTD_OTP_FACTORY);
+}
+
+/**
+ * onenand_read_fact_prot_reg - [MTD Interface] Read factory OTP area
+ * @mtd: MTD device structure
+ * @from: The offset to read
+ * @len: number of bytes to read
+ * @retlen: pointer to variable to store the number of read bytes
+ * @buf: the databuffer to put/get data
+ *
+ * Read factory OTP area.
+ */
+static int onenand_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf)
+{
+ return onenand_otp_walk(mtd, from, len, retlen, buf, do_otp_read, MTD_OTP_FACTORY);
+}
+
+/**
+ * onenand_get_user_prot_info - [MTD Interface] Read user OTP info
+ * @mtd: MTD device structure
+ * @retlen: pointer to variable to store the number of read bytes
+ * @len: number of bytes to read
+ * @buf: the databuffer to put/get data
+ *
+ * Read user OTP info.
+ */
+static int onenand_get_user_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+{
+ return onenand_otp_walk(mtd, 0, len, retlen, (u_char *) buf, NULL,
+ MTD_OTP_USER);
+}
+
+/**
+ * onenand_read_user_prot_reg - [MTD Interface] Read user OTP area
+ * @mtd: MTD device structure
+ * @from: The offset to read
+ * @len: number of bytes to read
+ * @retlen: pointer to variable to store the number of read bytes
+ * @buf: the databuffer to put/get data
+ *
+ * Read user OTP area.
+ */
+static int onenand_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf)
+{
+ return onenand_otp_walk(mtd, from, len, retlen, buf, do_otp_read, MTD_OTP_USER);
+}
+
+/**
+ * onenand_write_user_prot_reg - [MTD Interface] Write user OTP area
+ * @mtd: MTD device structure
+ * @from: The offset to write
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of write bytes
+ * @buf: the databuffer to put/get data
+ *
+ * Write user OTP area.
+ */
+static int onenand_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, const u_char *buf)
+{
+ return onenand_otp_walk(mtd, from, len, retlen, (u_char *)buf,
+ do_otp_write, MTD_OTP_USER);
+}
+
+/**
+ * onenand_lock_user_prot_reg - [MTD Interface] Lock user OTP area
+ * @mtd: MTD device structure
+ * @from: The offset to lock
+ * @len: number of bytes to unlock
+ *
+ * Write lock mark on spare area in page 0 in OTP block
+ */
+static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len)
+{
+ struct onenand_chip *this = mtd->priv;
+ u_char *buf = FLEXONENAND(this) ? this->page_buf : this->oob_buf;
+ size_t retlen;
+ int ret;
+ unsigned int otp_lock_offset = ONENAND_OTP_LOCK_OFFSET;
+
+ memset(buf, 0xff, FLEXONENAND(this) ? this->writesize
+ : mtd->oobsize);
+ /*
+ * Write lock mark to 8th word of sector0 of page0 of the spare0.
+ * We write 16 bytes spare area instead of 2 bytes.
+ * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
+ * main area of page 49.
+ */
+
+ from = 0;
+ len = FLEXONENAND(this) ? mtd->writesize : 16;
+
+ /*
+ * Note: OTP lock operation
+ * OTP block : 0xXXFC XX 1111 1100
+ * 1st block : 0xXXF3 (If chip support) XX 1111 0011
+ * Both : 0xXXF0 (If chip support) XX 1111 0000
+ */
+ if (FLEXONENAND(this))
+ otp_lock_offset = FLEXONENAND_OTP_LOCK_OFFSET;
+
+ /* ONENAND_OTP_AREA | ONENAND_OTP_BLOCK0 | ONENAND_OTP_AREA_BLOCK0 */
+ if (otp == 1)
+ buf[otp_lock_offset] = 0xFC;
+ else if (otp == 2)
+ buf[otp_lock_offset] = 0xF3;
+ else if (otp == 3)
+ buf[otp_lock_offset] = 0xF0;
+ else if (otp != 0)
+ printk(KERN_DEBUG "[OneNAND] Invalid option selected for OTP\n");
+
+ ret = onenand_otp_walk(mtd, from, len, &retlen, buf, do_otp_lock, MTD_OTP_USER);
+
+ return ret ? : retlen;
+}
+
+#endif /* CONFIG_MTD_ONENAND_OTP */
+
+/**
+ * onenand_check_features - Check and set OneNAND features
+ * @mtd: MTD data structure
+ *
+ * Check and set OneNAND features
+ * - lock scheme
+ * - two plane
+ */
+static void onenand_check_features(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned int density, process, numbufs;
+
+ /* Lock scheme depends on density and process */
+ density = onenand_get_density(this->device_id);
+ process = this->version_id >> ONENAND_VERSION_PROCESS_SHIFT;
+ numbufs = this->read_word(this->base + ONENAND_REG_NUM_BUFFERS) >> 8;
+
+ /* Lock scheme */
+ switch (density) {
+ case ONENAND_DEVICE_DENSITY_8Gb:
+ this->options |= ONENAND_HAS_NOP_1;
+ fallthrough;
+ case ONENAND_DEVICE_DENSITY_4Gb:
+ if (ONENAND_IS_DDP(this))
+ this->options |= ONENAND_HAS_2PLANE;
+ else if (numbufs == 1) {
+ this->options |= ONENAND_HAS_4KB_PAGE;
+ this->options |= ONENAND_HAS_CACHE_PROGRAM;
+ /*
+ * There are two different 4KiB pagesize chips
+ * and no way to detect it by H/W config values.
+ *
+ * To detect the correct NOP for each chips,
+ * It should check the version ID as workaround.
+ *
+ * Now it has as following
+ * KFM4G16Q4M has NOP 4 with version ID 0x0131
+ * KFM4G16Q5M has NOP 1 with versoin ID 0x013e
+ */
+ if ((this->version_id & 0xf) == 0xe)
+ this->options |= ONENAND_HAS_NOP_1;
+ }
+ this->options |= ONENAND_HAS_UNLOCK_ALL;
+ break;
+
+ case ONENAND_DEVICE_DENSITY_2Gb:
+ /* 2Gb DDP does not have 2 plane */
+ if (!ONENAND_IS_DDP(this))
+ this->options |= ONENAND_HAS_2PLANE;
+ this->options |= ONENAND_HAS_UNLOCK_ALL;
+ break;
+
+ case ONENAND_DEVICE_DENSITY_1Gb:
+ /* A-Die has all block unlock */
+ if (process)
+ this->options |= ONENAND_HAS_UNLOCK_ALL;
+ break;
+
+ default:
+ /* Some OneNAND has continuous lock scheme */
+ if (!process)
+ this->options |= ONENAND_HAS_CONT_LOCK;
+ break;
+ }
+
+ /* The MLC has 4KiB pagesize. */
+ if (ONENAND_IS_MLC(this))
+ this->options |= ONENAND_HAS_4KB_PAGE;
+
+ if (ONENAND_IS_4KB_PAGE(this))
+ this->options &= ~ONENAND_HAS_2PLANE;
+
+ if (FLEXONENAND(this)) {
+ this->options &= ~ONENAND_HAS_CONT_LOCK;
+ this->options |= ONENAND_HAS_UNLOCK_ALL;
+ }
+
+ if (this->options & ONENAND_HAS_CONT_LOCK)
+ printk(KERN_DEBUG "Lock scheme is Continuous Lock\n");
+ if (this->options & ONENAND_HAS_UNLOCK_ALL)
+ printk(KERN_DEBUG "Chip support all block unlock\n");
+ if (this->options & ONENAND_HAS_2PLANE)
+ printk(KERN_DEBUG "Chip has 2 plane\n");
+ if (this->options & ONENAND_HAS_4KB_PAGE)
+ printk(KERN_DEBUG "Chip has 4KiB pagesize\n");
+ if (this->options & ONENAND_HAS_CACHE_PROGRAM)
+ printk(KERN_DEBUG "Chip has cache program feature\n");
+}
+
+/**
+ * onenand_print_device_info - Print device & version ID
+ * @device: device ID
+ * @version: version ID
+ *
+ * Print device & version ID
+ */
+static void onenand_print_device_info(int device, int version)
+{
+ int vcc, demuxed, ddp, density, flexonenand;
+
+ vcc = device & ONENAND_DEVICE_VCC_MASK;
+ demuxed = device & ONENAND_DEVICE_IS_DEMUX;
+ ddp = device & ONENAND_DEVICE_IS_DDP;
+ density = onenand_get_density(device);
+ flexonenand = device & DEVICE_IS_FLEXONENAND;
+ printk(KERN_INFO "%s%sOneNAND%s %dMB %sV 16-bit (0x%02x)\n",
+ demuxed ? "" : "Muxed ",
+ flexonenand ? "Flex-" : "",
+ ddp ? "(DDP)" : "",
+ (16 << density),
+ vcc ? "2.65/3.3" : "1.8",
+ device);
+ printk(KERN_INFO "OneNAND version = 0x%04x\n", version);
+}
+
+static const struct onenand_manufacturers onenand_manuf_ids[] = {
+ {ONENAND_MFR_SAMSUNG, "Samsung"},
+ {ONENAND_MFR_NUMONYX, "Numonyx"},
+};
+
+/**
+ * onenand_check_maf - Check manufacturer ID
+ * @manuf: manufacturer ID
+ *
+ * Check manufacturer ID
+ */
+static int onenand_check_maf(int manuf)
+{
+ int size = ARRAY_SIZE(onenand_manuf_ids);
+ char *name;
+ int i;
+
+ for (i = 0; i < size; i++)
+ if (manuf == onenand_manuf_ids[i].id)
+ break;
+
+ if (i < size)
+ name = onenand_manuf_ids[i].name;
+ else
+ name = "Unknown";
+
+ printk(KERN_DEBUG "OneNAND Manufacturer: %s (0x%0x)\n", name, manuf);
+
+ return (i == size);
+}
+
+/**
+ * flexonenand_get_boundary - Reads the SLC boundary
+ * @mtd: MTD data structure
+ */
+static int flexonenand_get_boundary(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned die, bdry;
+ int syscfg, locked;
+
+ /* Disable ECC */
+ syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
+ this->write_word((syscfg | 0x0100), this->base + ONENAND_REG_SYS_CFG1);
+
+ for (die = 0; die < this->dies; die++) {
+ this->command(mtd, FLEXONENAND_CMD_PI_ACCESS, die, 0);
+ this->wait(mtd, FL_SYNCING);
+
+ this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
+ this->wait(mtd, FL_READING);
+
+ bdry = this->read_word(this->base + ONENAND_DATARAM);
+ if ((bdry >> FLEXONENAND_PI_UNLOCK_SHIFT) == 3)
+ locked = 0;
+ else
+ locked = 1;
+ this->boundary[die] = bdry & FLEXONENAND_PI_MASK;
+
+ this->command(mtd, ONENAND_CMD_RESET, 0, 0);
+ this->wait(mtd, FL_RESETTING);
+
+ printk(KERN_INFO "Die %d boundary: %d%s\n", die,
+ this->boundary[die], locked ? "(Locked)" : "(Unlocked)");
+ }
+
+ /* Enable ECC */
+ this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
+ return 0;
+}
+
+/**
+ * flexonenand_get_size - Fill up fields in onenand_chip and mtd_info
+ * boundary[], diesize[], mtd->size, mtd->erasesize
+ * @mtd: - MTD device structure
+ */
+static void flexonenand_get_size(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int die, i, eraseshift, density;
+ int blksperdie, maxbdry;
+ loff_t ofs;
+
+ density = onenand_get_density(this->device_id);
+ blksperdie = ((loff_t)(16 << density) << 20) >> (this->erase_shift);
+ blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
+ maxbdry = blksperdie - 1;
+ eraseshift = this->erase_shift - 1;
+
+ mtd->numeraseregions = this->dies << 1;
+
+ /* This fills up the device boundary */
+ flexonenand_get_boundary(mtd);
+ die = ofs = 0;
+ i = -1;
+ for (; die < this->dies; die++) {
+ if (!die || this->boundary[die-1] != maxbdry) {
+ i++;
+ mtd->eraseregions[i].offset = ofs;
+ mtd->eraseregions[i].erasesize = 1 << eraseshift;
+ mtd->eraseregions[i].numblocks =
+ this->boundary[die] + 1;
+ ofs += mtd->eraseregions[i].numblocks << eraseshift;
+ eraseshift++;
+ } else {
+ mtd->numeraseregions -= 1;
+ mtd->eraseregions[i].numblocks +=
+ this->boundary[die] + 1;
+ ofs += (this->boundary[die] + 1) << (eraseshift - 1);
+ }
+ if (this->boundary[die] != maxbdry) {
+ i++;
+ mtd->eraseregions[i].offset = ofs;
+ mtd->eraseregions[i].erasesize = 1 << eraseshift;
+ mtd->eraseregions[i].numblocks = maxbdry ^
+ this->boundary[die];
+ ofs += mtd->eraseregions[i].numblocks << eraseshift;
+ eraseshift--;
+ } else
+ mtd->numeraseregions -= 1;
+ }
+
+ /* Expose MLC erase size except when all blocks are SLC */
+ mtd->erasesize = 1 << this->erase_shift;
+ if (mtd->numeraseregions == 1)
+ mtd->erasesize >>= 1;
+
+ printk(KERN_INFO "Device has %d eraseregions\n", mtd->numeraseregions);
+ for (i = 0; i < mtd->numeraseregions; i++)
+ printk(KERN_INFO "[offset: 0x%08x, erasesize: 0x%05x,"
+ " numblocks: %04u]\n",
+ (unsigned int) mtd->eraseregions[i].offset,
+ mtd->eraseregions[i].erasesize,
+ mtd->eraseregions[i].numblocks);
+
+ for (die = 0, mtd->size = 0; die < this->dies; die++) {
+ this->diesize[die] = (loff_t)blksperdie << this->erase_shift;
+ this->diesize[die] -= (loff_t)(this->boundary[die] + 1)
+ << (this->erase_shift - 1);
+ mtd->size += this->diesize[die];
+ }
+}
+
+/**
+ * flexonenand_check_blocks_erased - Check if blocks are erased
+ * @mtd: mtd info structure
+ * @start: first erase block to check
+ * @end: last erase block to check
+ *
+ * Converting an unerased block from MLC to SLC
+ * causes byte values to change. Since both data and its ECC
+ * have changed, reads on the block give uncorrectable error.
+ * This might lead to the block being detected as bad.
+ *
+ * Avoid this by ensuring that the block to be converted is
+ * erased.
+ */
+static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int end)
+{
+ struct onenand_chip *this = mtd->priv;
+ int i, ret;
+ int block;
+ struct mtd_oob_ops ops = {
+ .mode = MTD_OPS_PLACE_OOB,
+ .ooboffs = 0,
+ .ooblen = mtd->oobsize,
+ .datbuf = NULL,
+ .oobbuf = this->oob_buf,
+ };
+ loff_t addr;
+
+ printk(KERN_DEBUG "Check blocks from %d to %d\n", start, end);
+
+ for (block = start; block <= end; block++) {
+ addr = flexonenand_addr(this, block);
+ if (onenand_block_isbad_nolock(mtd, addr, 0))
+ continue;
+
+ /*
+ * Since main area write results in ECC write to spare,
+ * it is sufficient to check only ECC bytes for change.
+ */
+ ret = onenand_read_oob_nolock(mtd, addr, &ops);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < mtd->oobsize; i++)
+ if (this->oob_buf[i] != 0xff)
+ break;
+
+ if (i != mtd->oobsize) {
+ printk(KERN_WARNING "%s: Block %d not erased.\n",
+ __func__, block);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * flexonenand_set_boundary - Writes the SLC boundary
+ */
+static int flexonenand_set_boundary(struct mtd_info *mtd, int die,
+ int boundary, int lock)
+{
+ struct onenand_chip *this = mtd->priv;
+ int ret, density, blksperdie, old, new, thisboundary;
+ loff_t addr;
+
+ /* Change only once for SDP Flex-OneNAND */
+ if (die && (!ONENAND_IS_DDP(this)))
+ return 0;
+
+ /* boundary value of -1 indicates no required change */
+ if (boundary < 0 || boundary == this->boundary[die])
+ return 0;
+
+ density = onenand_get_density(this->device_id);
+ blksperdie = ((16 << density) << 20) >> this->erase_shift;
+ blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
+
+ if (boundary >= blksperdie) {
+ printk(KERN_ERR "%s: Invalid boundary value. "
+ "Boundary not changed.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check if converting blocks are erased */
+ old = this->boundary[die] + (die * this->density_mask);
+ new = boundary + (die * this->density_mask);
+ ret = flexonenand_check_blocks_erased(mtd, min(old, new) + 1, max(old, new));
+ if (ret) {
+ printk(KERN_ERR "%s: Please erase blocks "
+ "before boundary change\n", __func__);
+ return ret;
+ }
+
+ this->command(mtd, FLEXONENAND_CMD_PI_ACCESS, die, 0);
+ this->wait(mtd, FL_SYNCING);
+
+ /* Check is boundary is locked */
+ this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
+ this->wait(mtd, FL_READING);
+
+ thisboundary = this->read_word(this->base + ONENAND_DATARAM);
+ if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) {
+ printk(KERN_ERR "%s: boundary locked\n", __func__);
+ ret = 1;
+ goto out;
+ }
+
+ printk(KERN_INFO "Changing die %d boundary: %d%s\n",
+ die, boundary, lock ? "(Locked)" : "(Unlocked)");
+
+ addr = die ? this->diesize[0] : 0;
+
+ boundary &= FLEXONENAND_PI_MASK;
+ boundary |= lock ? 0 : (3 << FLEXONENAND_PI_UNLOCK_SHIFT);
+
+ this->command(mtd, ONENAND_CMD_ERASE, addr, 0);
+ ret = this->wait(mtd, FL_ERASING);
+ if (ret) {
+ printk(KERN_ERR "%s: Failed PI erase for Die %d\n",
+ __func__, die);
+ goto out;
+ }
+
+ this->write_word(boundary, this->base + ONENAND_DATARAM);
+ this->command(mtd, ONENAND_CMD_PROG, addr, 0);
+ ret = this->wait(mtd, FL_WRITING);
+ if (ret) {
+ printk(KERN_ERR "%s: Failed PI write for Die %d\n",
+ __func__, die);
+ goto out;
+ }
+
+ this->command(mtd, FLEXONENAND_CMD_PI_UPDATE, die, 0);
+ ret = this->wait(mtd, FL_WRITING);
+out:
+ this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_REG_COMMAND);
+ this->wait(mtd, FL_RESETTING);
+ if (!ret)
+ /* Recalculate device size on boundary change*/
+ flexonenand_get_size(mtd);
+
+ return ret;
+}
+
+/**
+ * onenand_chip_probe - [OneNAND Interface] The generic chip probe
+ * @mtd: MTD device structure
+ *
+ * OneNAND detection method:
+ * Compare the values from command with ones from register
+ */
+static int onenand_chip_probe(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int bram_maf_id, bram_dev_id, maf_id, dev_id;
+ int syscfg;
+
+ /* Save system configuration 1 */
+ syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
+ /* Clear Sync. Burst Read mode to read BootRAM */
+ this->write_word((syscfg & ~ONENAND_SYS_CFG1_SYNC_READ & ~ONENAND_SYS_CFG1_SYNC_WRITE), this->base + ONENAND_REG_SYS_CFG1);
+
+ /* Send the command for reading device ID from BootRAM */
+ this->write_word(ONENAND_CMD_READID, this->base + ONENAND_BOOTRAM);
+
+ /* Read manufacturer and device IDs from BootRAM */
+ bram_maf_id = this->read_word(this->base + ONENAND_BOOTRAM + 0x0);
+ bram_dev_id = this->read_word(this->base + ONENAND_BOOTRAM + 0x2);
+
+ /* Reset OneNAND to read default register values */
+ this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_BOOTRAM);
+ /* Wait reset */
+ this->wait(mtd, FL_RESETTING);
+
+ /* Restore system configuration 1 */
+ this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
+
+ /* Check manufacturer ID */
+ if (onenand_check_maf(bram_maf_id))
+ return -ENXIO;
+
+ /* Read manufacturer and device IDs from Register */
+ maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID);
+ dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
+
+ /* Check OneNAND device */
+ if (maf_id != bram_maf_id || dev_id != bram_dev_id)
+ return -ENXIO;
+
+ return 0;
+}
+
+/**
+ * onenand_probe - [OneNAND Interface] Probe the OneNAND device
+ * @mtd: MTD device structure
+ */
+static int onenand_probe(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int dev_id, ver_id;
+ int density;
+ int ret;
+
+ ret = this->chip_probe(mtd);
+ if (ret)
+ return ret;
+
+ /* Device and version IDs from Register */
+ dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
+ ver_id = this->read_word(this->base + ONENAND_REG_VERSION_ID);
+ this->technology = this->read_word(this->base + ONENAND_REG_TECHNOLOGY);
+
+ /* Flash device information */
+ onenand_print_device_info(dev_id, ver_id);
+ this->device_id = dev_id;
+ this->version_id = ver_id;
+
+ /* Check OneNAND features */
+ onenand_check_features(mtd);
+
+ density = onenand_get_density(dev_id);
+ if (FLEXONENAND(this)) {
+ this->dies = ONENAND_IS_DDP(this) ? 2 : 1;
+ /* Maximum possible erase regions */
+ mtd->numeraseregions = this->dies << 1;
+ mtd->eraseregions =
+ kcalloc(this->dies << 1,
+ sizeof(struct mtd_erase_region_info),
+ GFP_KERNEL);
+ if (!mtd->eraseregions)
+ return -ENOMEM;
+ }
+
+ /*
+ * For Flex-OneNAND, chipsize represents maximum possible device size.
+ * mtd->size represents the actual device size.
+ */
+ this->chipsize = (16 << density) << 20;
+
+ /* OneNAND page size & block size */
+ /* The data buffer size is equal to page size */
+ mtd->writesize = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE);
+ /* We use the full BufferRAM */
+ if (ONENAND_IS_4KB_PAGE(this))
+ mtd->writesize <<= 1;
+
+ mtd->oobsize = mtd->writesize >> 5;
+ /* Pages per a block are always 64 in OneNAND */
+ mtd->erasesize = mtd->writesize << 6;
+ /*
+ * Flex-OneNAND SLC area has 64 pages per block.
+ * Flex-OneNAND MLC area has 128 pages per block.
+ * Expose MLC erase size to find erase_shift and page_mask.
+ */
+ if (FLEXONENAND(this))
+ mtd->erasesize <<= 1;
+
+ this->erase_shift = ffs(mtd->erasesize) - 1;
+ this->page_shift = ffs(mtd->writesize) - 1;
+ this->page_mask = (1 << (this->erase_shift - this->page_shift)) - 1;
+ /* Set density mask. it is used for DDP */
+ if (ONENAND_IS_DDP(this))
+ this->density_mask = this->chipsize >> (this->erase_shift + 1);
+ /* It's real page size */
+ this->writesize = mtd->writesize;
+
+ /* REVISIT: Multichip handling */
+
+ if (FLEXONENAND(this))
+ flexonenand_get_size(mtd);
+ else
+ mtd->size = this->chipsize;
+
+ /*
+ * We emulate the 4KiB page and 256KiB erase block size
+ * But oobsize is still 64 bytes.
+ * It is only valid if you turn on 2X program support,
+ * Otherwise it will be ignored by compiler.
+ */
+ if (ONENAND_IS_2PLANE(this)) {
+ mtd->writesize <<= 1;
+ mtd->erasesize <<= 1;
+ }
+
+ return 0;
+}
+
+/**
+ * onenand_suspend - [MTD Interface] Suspend the OneNAND flash
+ * @mtd: MTD device structure
+ */
+static int onenand_suspend(struct mtd_info *mtd)
+{
+ return onenand_get_device(mtd, FL_PM_SUSPENDED);
+}
+
+/**
+ * onenand_resume - [MTD Interface] Resume the OneNAND flash
+ * @mtd: MTD device structure
+ */
+static void onenand_resume(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+
+ if (this->state == FL_PM_SUSPENDED)
+ onenand_release_device(mtd);
+ else
+ printk(KERN_ERR "%s: resume() called for the chip which is not "
+ "in suspended state\n", __func__);
+}
+
+/**
+ * onenand_scan - [OneNAND Interface] Scan for the OneNAND device
+ * @mtd: MTD device structure
+ * @maxchips: Number of chips to scan for
+ *
+ * This fills out all the not initialized function pointers
+ * with the defaults.
+ * The flash ID is read and the mtd/chip structures are
+ * filled with the appropriate values.
+ */
+int onenand_scan(struct mtd_info *mtd, int maxchips)
+{
+ int i, ret;
+ struct onenand_chip *this = mtd->priv;
+
+ if (!this->read_word)
+ this->read_word = onenand_readw;
+ if (!this->write_word)
+ this->write_word = onenand_writew;
+
+ if (!this->command)
+ this->command = onenand_command;
+ if (!this->wait)
+ onenand_setup_wait(mtd);
+ if (!this->bbt_wait)
+ this->bbt_wait = onenand_bbt_wait;
+ if (!this->unlock_all)
+ this->unlock_all = onenand_unlock_all;
+
+ if (!this->chip_probe)
+ this->chip_probe = onenand_chip_probe;
+
+ if (!this->read_bufferram)
+ this->read_bufferram = onenand_read_bufferram;
+ if (!this->write_bufferram)
+ this->write_bufferram = onenand_write_bufferram;
+
+ if (!this->block_markbad)
+ this->block_markbad = onenand_default_block_markbad;
+ if (!this->scan_bbt)
+ this->scan_bbt = onenand_default_bbt;
+
+ if (onenand_probe(mtd))
+ return -ENXIO;
+
+ /* Set Sync. Burst Read after probing */
+ if (this->mmcontrol) {
+ printk(KERN_INFO "OneNAND Sync. Burst Read support\n");
+ this->read_bufferram = onenand_sync_read_bufferram;
+ }
+
+ /* Allocate buffers, if necessary */
+ if (!this->page_buf) {
+ this->page_buf = kzalloc(mtd->writesize, GFP_KERNEL);
+ if (!this->page_buf)
+ return -ENOMEM;
+#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
+ this->verify_buf = kzalloc(mtd->writesize, GFP_KERNEL);
+ if (!this->verify_buf) {
+ kfree(this->page_buf);
+ return -ENOMEM;
+ }
+#endif
+ this->options |= ONENAND_PAGEBUF_ALLOC;
+ }
+ if (!this->oob_buf) {
+ this->oob_buf = kzalloc(mtd->oobsize, GFP_KERNEL);
+ if (!this->oob_buf) {
+ if (this->options & ONENAND_PAGEBUF_ALLOC) {
+ this->options &= ~ONENAND_PAGEBUF_ALLOC;
+#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
+ kfree(this->verify_buf);
+#endif
+ kfree(this->page_buf);
+ }
+ return -ENOMEM;
+ }
+ this->options |= ONENAND_OOBBUF_ALLOC;
+ }
+
+ this->state = FL_READY;
+ init_waitqueue_head(&this->wq);
+ spin_lock_init(&this->chip_lock);
+
+ /*
+ * Allow subpage writes up to oobsize.
+ */
+ switch (mtd->oobsize) {
+ case 128:
+ if (FLEXONENAND(this)) {
+ mtd_set_ooblayout(mtd, &flexonenand_ooblayout_ops);
+ mtd->subpage_sft = 0;
+ } else {
+ mtd_set_ooblayout(mtd, &onenand_oob_128_ooblayout_ops);
+ mtd->subpage_sft = 2;
+ }
+ if (ONENAND_IS_NOP_1(this))
+ mtd->subpage_sft = 0;
+ break;
+ case 64:
+ mtd_set_ooblayout(mtd, &onenand_oob_32_64_ooblayout_ops);
+ mtd->subpage_sft = 2;
+ break;
+
+ case 32:
+ mtd_set_ooblayout(mtd, &onenand_oob_32_64_ooblayout_ops);
+ mtd->subpage_sft = 1;
+ break;
+
+ default:
+ printk(KERN_WARNING "%s: No OOB scheme defined for oobsize %d\n",
+ __func__, mtd->oobsize);
+ mtd->subpage_sft = 0;
+ /* To prevent kernel oops */
+ mtd_set_ooblayout(mtd, &onenand_oob_32_64_ooblayout_ops);
+ break;
+ }
+
+ this->subpagesize = mtd->writesize >> mtd->subpage_sft;
+
+ /*
+ * The number of bytes available for a client to place data into
+ * the out of band area
+ */
+ ret = mtd_ooblayout_count_freebytes(mtd);
+ if (ret < 0)
+ ret = 0;
+
+ mtd->oobavail = ret;
+
+ mtd->ecc_strength = 1;
+
+ /* Fill in remaining MTD driver data */
+ mtd->type = ONENAND_IS_MLC(this) ? MTD_MLCNANDFLASH : MTD_NANDFLASH;
+ mtd->flags = MTD_CAP_NANDFLASH;
+ mtd->_erase = onenand_erase;
+ mtd->_point = NULL;
+ mtd->_unpoint = NULL;
+ mtd->_read_oob = onenand_read_oob;
+ mtd->_write_oob = onenand_write_oob;
+ mtd->_panic_write = onenand_panic_write;
+#ifdef CONFIG_MTD_ONENAND_OTP
+ mtd->_get_fact_prot_info = onenand_get_fact_prot_info;
+ mtd->_read_fact_prot_reg = onenand_read_fact_prot_reg;
+ mtd->_get_user_prot_info = onenand_get_user_prot_info;
+ mtd->_read_user_prot_reg = onenand_read_user_prot_reg;
+ mtd->_write_user_prot_reg = onenand_write_user_prot_reg;
+ mtd->_lock_user_prot_reg = onenand_lock_user_prot_reg;
+#endif
+ mtd->_sync = onenand_sync;
+ mtd->_lock = onenand_lock;
+ mtd->_unlock = onenand_unlock;
+ mtd->_suspend = onenand_suspend;
+ mtd->_resume = onenand_resume;
+ mtd->_block_isbad = onenand_block_isbad;
+ mtd->_block_markbad = onenand_block_markbad;
+ mtd->owner = THIS_MODULE;
+ mtd->writebufsize = mtd->writesize;
+
+ /* Unlock whole block */
+ if (!(this->options & ONENAND_SKIP_INITIAL_UNLOCKING))
+ this->unlock_all(mtd);
+
+ /* Set the bad block marker position */
+ this->badblockpos = ONENAND_BADBLOCK_POS;
+
+ ret = this->scan_bbt(mtd);
+ if ((!FLEXONENAND(this)) || ret)
+ return ret;
+
+ /* Change Flex-OneNAND boundaries if required */
+ for (i = 0; i < MAX_DIES; i++)
+ flexonenand_set_boundary(mtd, i, flex_bdry[2 * i],
+ flex_bdry[(2 * i) + 1]);
+
+ return 0;
+}
+
+/**
+ * onenand_release - [OneNAND Interface] Free resources held by the OneNAND device
+ * @mtd: MTD device structure
+ */
+void onenand_release(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+
+ /* Deregister partitions */
+ mtd_device_unregister(mtd);
+
+ /* Free bad block table memory, if allocated */
+ if (this->bbm) {
+ struct bbm_info *bbm = this->bbm;
+ kfree(bbm->bbt);
+ kfree(this->bbm);
+ }
+ /* Buffers allocated by onenand_scan */
+ if (this->options & ONENAND_PAGEBUF_ALLOC) {
+ kfree(this->page_buf);
+#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
+ kfree(this->verify_buf);
+#endif
+ }
+ if (this->options & ONENAND_OOBBUF_ALLOC)
+ kfree(this->oob_buf);
+ kfree(mtd->eraseregions);
+}
+
+EXPORT_SYMBOL_GPL(onenand_scan);
+EXPORT_SYMBOL_GPL(onenand_release);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
+MODULE_DESCRIPTION("Generic OneNAND flash driver code");
diff --git a/drivers/mtd/nand/onenand/onenand_bbt.c b/drivers/mtd/nand/onenand/onenand_bbt.c
new file mode 100644
index 0000000000..d7fe35bc45
--- /dev/null
+++ b/drivers/mtd/nand/onenand/onenand_bbt.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Bad Block Table support for the OneNAND driver
+ *
+ * Copyright(c) 2005 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * Derived from nand_bbt.c
+ *
+ * TODO:
+ * Split BBT core and chip specific BBT.
+ */
+
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/onenand.h>
+#include <linux/export.h>
+
+/**
+ * check_short_pattern - [GENERIC] check if a pattern is in the buffer
+ * @buf: the buffer to search
+ * @len: the length of buffer to search
+ * @paglen: the pagelength
+ * @td: search pattern descriptor
+ *
+ * Check for a pattern at the given place. Used to search bad block
+ * tables and good / bad block identifiers. Same as check_pattern, but
+ * no optional empty check and the pattern is expected to start
+ * at offset 0.
+ *
+ */
+static int check_short_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
+{
+ int i;
+ uint8_t *p = buf;
+
+ /* Compare the pattern */
+ for (i = 0; i < td->len; i++) {
+ if (p[i] != td->pattern[i])
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * create_bbt - [GENERIC] Create a bad block table by scanning the device
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ * @chip: create the table for a specific chip, -1 read all chips.
+ * Applies only if NAND_BBT_PERCHIP option is set
+ *
+ * Create a bad block table by scanning the device
+ * for the given good/bad block identify pattern
+ */
+static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd, int chip)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct bbm_info *bbm = this->bbm;
+ int i, j, numblocks, len, scanlen;
+ int startblock;
+ loff_t from;
+ size_t readlen;
+ struct mtd_oob_ops ops = { };
+ int rgn;
+
+ printk(KERN_INFO "Scanning device for bad blocks\n");
+
+ len = 2;
+
+ /* We need only read few bytes from the OOB area */
+ scanlen = 0;
+ readlen = bd->len;
+
+ /* chip == -1 case only */
+ /* Note that numblocks is 2 * (real numblocks) here;
+ * see i += 2 below as it makses shifting and masking less painful
+ */
+ numblocks = this->chipsize >> (bbm->bbt_erase_shift - 1);
+ startblock = 0;
+ from = 0;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooblen = readlen;
+ ops.oobbuf = buf;
+ ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
+
+ for (i = startblock; i < numblocks; ) {
+ int ret;
+
+ for (j = 0; j < len; j++) {
+ /* No need to read pages fully,
+ * just read required OOB bytes */
+ ret = onenand_bbt_read_oob(mtd,
+ from + j * this->writesize + bd->offs, &ops);
+
+ /* If it is a initial bad block, just ignore it */
+ if (ret == ONENAND_BBT_READ_FATAL_ERROR)
+ return -EIO;
+
+ if (ret || check_short_pattern(&buf[j * scanlen],
+ scanlen, this->writesize, bd)) {
+ bbm->bbt[i >> 3] |= 0x03 << (i & 0x6);
+ printk(KERN_INFO "OneNAND eraseblock %d is an "
+ "initial bad block\n", i >> 1);
+ mtd->ecc_stats.badblocks++;
+ break;
+ }
+ }
+ i += 2;
+
+ if (FLEXONENAND(this)) {
+ rgn = flexonenand_region(mtd, from);
+ from += mtd->eraseregions[rgn].erasesize;
+ } else
+ from += (1 << bbm->bbt_erase_shift);
+ }
+
+ return 0;
+}
+
+
+/**
+ * onenand_memory_bbt - [GENERIC] create a memory based bad block table
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function creates a memory based bbt by scanning the device
+ * for manufacturer / software marked good / bad blocks
+ */
+static inline int onenand_memory_bbt (struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+ struct onenand_chip *this = mtd->priv;
+
+ return create_bbt(mtd, this->page_buf, bd, -1);
+}
+
+/**
+ * onenand_isbad_bbt - [OneNAND Interface] Check if a block is bad
+ * @mtd: MTD device structure
+ * @offs: offset in the device
+ * @allowbbt: allow access to bad block table region
+ */
+static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct bbm_info *bbm = this->bbm;
+ int block;
+ uint8_t res;
+
+ /* Get block number * 2 */
+ block = (int) (onenand_block(this, offs) << 1);
+ res = (bbm->bbt[block >> 3] >> (block & 0x06)) & 0x03;
+
+ pr_debug("onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n",
+ (unsigned int) offs, block >> 1, res);
+
+ switch ((int) res) {
+ case 0x00: return 0;
+ case 0x01: return 1;
+ case 0x02: return allowbbt ? 0 : 1;
+ }
+
+ return 1;
+}
+
+/**
+ * onenand_scan_bbt - [OneNAND Interface] scan, find, read and maybe create bad block table(s)
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function checks, if a bad block table(s) is/are already
+ * available. If not it scans the device for manufacturer
+ * marked good / bad blocks and writes the bad block table(s) to
+ * the selected place.
+ *
+ * The bad block table memory is allocated here. It is freed
+ * by the onenand_release function.
+ *
+ */
+static int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct bbm_info *bbm = this->bbm;
+ int len, ret = 0;
+
+ len = this->chipsize >> (this->erase_shift + 2);
+ /* Allocate memory (2bit per block) and clear the memory bad block table */
+ bbm->bbt = kzalloc(len, GFP_KERNEL);
+ if (!bbm->bbt)
+ return -ENOMEM;
+
+ /* Set erase shift */
+ bbm->bbt_erase_shift = this->erase_shift;
+
+ if (!bbm->isbad_bbt)
+ bbm->isbad_bbt = onenand_isbad_bbt;
+
+ /* Scan the device to build a memory based bad block table */
+ if ((ret = onenand_memory_bbt(mtd, bd))) {
+ printk(KERN_ERR "onenand_scan_bbt: Can't scan flash and build the RAM-based BBT\n");
+ kfree(bbm->bbt);
+ bbm->bbt = NULL;
+ }
+
+ return ret;
+}
+
+/*
+ * Define some generic bad / good block scan pattern which are used
+ * while scanning a device for factory marked good / bad blocks.
+ */
+static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
+
+static struct nand_bbt_descr largepage_memorybased = {
+ .options = 0,
+ .offs = 0,
+ .len = 2,
+ .pattern = scan_ff_pattern,
+};
+
+/**
+ * onenand_default_bbt - [OneNAND Interface] Select a default bad block table for the device
+ * @mtd: MTD device structure
+ *
+ * This function selects the default bad block table
+ * support for the device and calls the onenand_scan_bbt function
+ */
+int onenand_default_bbt(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct bbm_info *bbm;
+
+ this->bbm = kzalloc(sizeof(struct bbm_info), GFP_KERNEL);
+ if (!this->bbm)
+ return -ENOMEM;
+
+ bbm = this->bbm;
+
+ /* 1KB page has same configuration as 2KB page */
+ if (!bbm->badblock_pattern)
+ bbm->badblock_pattern = &largepage_memorybased;
+
+ return onenand_scan_bbt(mtd, bbm->badblock_pattern);
+}
diff --git a/drivers/mtd/nand/onenand/onenand_omap2.c b/drivers/mtd/nand/onenand/onenand_omap2.c
new file mode 100644
index 0000000000..a12f8f3efd
--- /dev/null
+++ b/drivers/mtd/nand/onenand/onenand_omap2.c
@@ -0,0 +1,609 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * OneNAND driver for OMAP2 / OMAP3
+ *
+ * Copyright © 2005-2006 Nokia Corporation
+ *
+ * Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
+ * IRQ and DMA support written by Timo Teras
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/onenand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+#include <linux/omap-gpmc.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/gpio/consumer.h>
+
+#include <asm/mach/flash.h>
+
+#define DRIVER_NAME "omap2-onenand"
+
+#define ONENAND_BUFRAM_SIZE (1024 * 5)
+
+struct omap2_onenand {
+ struct platform_device *pdev;
+ int gpmc_cs;
+ unsigned long phys_base;
+ struct gpio_desc *int_gpiod;
+ struct mtd_info mtd;
+ struct onenand_chip onenand;
+ struct completion irq_done;
+ struct completion dma_done;
+ struct dma_chan *dma_chan;
+};
+
+static void omap2_onenand_dma_complete_func(void *completion)
+{
+ complete(completion);
+}
+
+static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
+{
+ struct omap2_onenand *c = dev_id;
+
+ complete(&c->irq_done);
+
+ return IRQ_HANDLED;
+}
+
+static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
+{
+ return readw(c->onenand.base + reg);
+}
+
+static inline void write_reg(struct omap2_onenand *c, unsigned short value,
+ int reg)
+{
+ writew(value, c->onenand.base + reg);
+}
+
+static int omap2_onenand_set_cfg(struct omap2_onenand *c,
+ bool sr, bool sw,
+ int latency, int burst_len)
+{
+ unsigned short reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
+
+ reg |= latency << ONENAND_SYS_CFG1_BRL_SHIFT;
+
+ switch (burst_len) {
+ case 0: /* continuous */
+ break;
+ case 4:
+ reg |= ONENAND_SYS_CFG1_BL_4;
+ break;
+ case 8:
+ reg |= ONENAND_SYS_CFG1_BL_8;
+ break;
+ case 16:
+ reg |= ONENAND_SYS_CFG1_BL_16;
+ break;
+ case 32:
+ reg |= ONENAND_SYS_CFG1_BL_32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (latency > 5)
+ reg |= ONENAND_SYS_CFG1_HF;
+ if (latency > 7)
+ reg |= ONENAND_SYS_CFG1_VHF;
+ if (sr)
+ reg |= ONENAND_SYS_CFG1_SYNC_READ;
+ if (sw)
+ reg |= ONENAND_SYS_CFG1_SYNC_WRITE;
+
+ write_reg(c, reg, ONENAND_REG_SYS_CFG1);
+
+ return 0;
+}
+
+static int omap2_onenand_get_freq(int ver)
+{
+ switch ((ver >> 4) & 0xf) {
+ case 0:
+ return 40;
+ case 1:
+ return 54;
+ case 2:
+ return 66;
+ case 3:
+ return 83;
+ case 4:
+ return 104;
+ }
+
+ return -EINVAL;
+}
+
+static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
+{
+ printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
+ msg, state, ctrl, intr);
+}
+
+static void wait_warn(char *msg, int state, unsigned int ctrl,
+ unsigned int intr)
+{
+ printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
+ "intr 0x%04x\n", msg, state, ctrl, intr);
+}
+
+static int omap2_onenand_wait(struct mtd_info *mtd, int state)
+{
+ struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+ struct onenand_chip *this = mtd->priv;
+ unsigned int intr = 0;
+ unsigned int ctrl, ctrl_mask;
+ unsigned long timeout;
+ u32 syscfg;
+
+ if (state == FL_RESETTING || state == FL_PREPARING_ERASE ||
+ state == FL_VERIFYING_ERASE) {
+ int i = 21;
+ unsigned int intr_flags = ONENAND_INT_MASTER;
+
+ switch (state) {
+ case FL_RESETTING:
+ intr_flags |= ONENAND_INT_RESET;
+ break;
+ case FL_PREPARING_ERASE:
+ intr_flags |= ONENAND_INT_ERASE;
+ break;
+ case FL_VERIFYING_ERASE:
+ i = 101;
+ break;
+ }
+
+ while (--i) {
+ udelay(1);
+ intr = read_reg(c, ONENAND_REG_INTERRUPT);
+ if (intr & ONENAND_INT_MASTER)
+ break;
+ }
+ ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
+ if (ctrl & ONENAND_CTRL_ERROR) {
+ wait_err("controller error", state, ctrl, intr);
+ return -EIO;
+ }
+ if ((intr & intr_flags) == intr_flags)
+ return 0;
+ /* Continue in wait for interrupt branch */
+ }
+
+ if (state != FL_READING) {
+ int result;
+
+ /* Turn interrupts on */
+ syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
+ if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
+ syscfg |= ONENAND_SYS_CFG1_IOBE;
+ write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
+ /* Add a delay to let GPIO settle */
+ syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
+ }
+
+ reinit_completion(&c->irq_done);
+ result = gpiod_get_value(c->int_gpiod);
+ if (result < 0) {
+ ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
+ intr = read_reg(c, ONENAND_REG_INTERRUPT);
+ wait_err("gpio error", state, ctrl, intr);
+ return result;
+ } else if (result == 0) {
+ int retry_cnt = 0;
+retry:
+ if (!wait_for_completion_io_timeout(&c->irq_done,
+ msecs_to_jiffies(20))) {
+ /* Timeout after 20ms */
+ ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
+ if (ctrl & ONENAND_CTRL_ONGO &&
+ !this->ongoing) {
+ /*
+ * The operation seems to be still going
+ * so give it some more time.
+ */
+ retry_cnt += 1;
+ if (retry_cnt < 3)
+ goto retry;
+ intr = read_reg(c,
+ ONENAND_REG_INTERRUPT);
+ wait_err("timeout", state, ctrl, intr);
+ return -EIO;
+ }
+ intr = read_reg(c, ONENAND_REG_INTERRUPT);
+ if ((intr & ONENAND_INT_MASTER) == 0)
+ wait_warn("timeout", state, ctrl, intr);
+ }
+ }
+ } else {
+ int retry_cnt = 0;
+
+ /* Turn interrupts off */
+ syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
+ syscfg &= ~ONENAND_SYS_CFG1_IOBE;
+ write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
+
+ timeout = jiffies + msecs_to_jiffies(20);
+ while (1) {
+ if (time_before(jiffies, timeout)) {
+ intr = read_reg(c, ONENAND_REG_INTERRUPT);
+ if (intr & ONENAND_INT_MASTER)
+ break;
+ } else {
+ /* Timeout after 20ms */
+ ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
+ if (ctrl & ONENAND_CTRL_ONGO) {
+ /*
+ * The operation seems to be still going
+ * so give it some more time.
+ */
+ retry_cnt += 1;
+ if (retry_cnt < 3) {
+ timeout = jiffies +
+ msecs_to_jiffies(20);
+ continue;
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ intr = read_reg(c, ONENAND_REG_INTERRUPT);
+ ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
+
+ if (intr & ONENAND_INT_READ) {
+ int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
+
+ if (ecc) {
+ unsigned int addr1, addr8;
+
+ addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
+ addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
+ if (ecc & ONENAND_ECC_2BIT_ALL) {
+ printk(KERN_ERR "onenand_wait: ECC error = "
+ "0x%04x, addr1 %#x, addr8 %#x\n",
+ ecc, addr1, addr8);
+ mtd->ecc_stats.failed++;
+ return -EBADMSG;
+ } else if (ecc & ONENAND_ECC_1BIT_ALL) {
+ printk(KERN_NOTICE "onenand_wait: correctable "
+ "ECC error = 0x%04x, addr1 %#x, "
+ "addr8 %#x\n", ecc, addr1, addr8);
+ mtd->ecc_stats.corrected++;
+ }
+ }
+ } else if (state == FL_READING) {
+ wait_err("timeout", state, ctrl, intr);
+ return -EIO;
+ }
+
+ if (ctrl & ONENAND_CTRL_ERROR) {
+ wait_err("controller error", state, ctrl, intr);
+ if (ctrl & ONENAND_CTRL_LOCK)
+ printk(KERN_ERR "onenand_wait: "
+ "Device is write protected!!!\n");
+ return -EIO;
+ }
+
+ ctrl_mask = 0xFE9F;
+ if (this->ongoing)
+ ctrl_mask &= ~0x8000;
+
+ if (ctrl & ctrl_mask)
+ wait_warn("unexpected controller status", state, ctrl, intr);
+
+ return 0;
+}
+
+static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
+{
+ struct onenand_chip *this = mtd->priv;
+
+ if (ONENAND_CURRENT_BUFFERRAM(this)) {
+ if (area == ONENAND_DATARAM)
+ return this->writesize;
+ if (area == ONENAND_SPARERAM)
+ return mtd->oobsize;
+ }
+
+ return 0;
+}
+
+static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c,
+ dma_addr_t src, dma_addr_t dst,
+ size_t count)
+{
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t cookie;
+
+ tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n");
+ return -EIO;
+ }
+
+ reinit_completion(&c->dma_done);
+
+ tx->callback = omap2_onenand_dma_complete_func;
+ tx->callback_param = &c->dma_done;
+
+ cookie = tx->tx_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_err(&c->pdev->dev, "Failed to do DMA tx_submit\n");
+ return -EIO;
+ }
+
+ dma_async_issue_pending(c->dma_chan);
+
+ if (!wait_for_completion_io_timeout(&c->dma_done,
+ msecs_to_jiffies(20))) {
+ dmaengine_terminate_sync(c->dma_chan);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset,
+ size_t count)
+{
+ struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+ struct onenand_chip *this = mtd->priv;
+ struct device *dev = &c->pdev->dev;
+ void *buf = (void *)buffer;
+ dma_addr_t dma_src, dma_dst;
+ int bram_offset, err;
+ size_t xtra;
+
+ bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
+ /*
+ * If the buffer address is not DMA-able, len is not long enough to
+ * make DMA transfers profitable or if invoked from panic_write()
+ * fallback to PIO mode.
+ */
+ if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
+ count < 384 || mtd->oops_panic_write)
+ goto out_copy;
+
+ xtra = count & 3;
+ if (xtra) {
+ count -= xtra;
+ memcpy(buf + count, this->base + bram_offset + count, xtra);
+ }
+
+ dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
+ dma_src = c->phys_base + bram_offset;
+
+ if (dma_mapping_error(dev, dma_dst)) {
+ dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
+ goto out_copy;
+ }
+
+ err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
+ dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE);
+ if (!err)
+ return 0;
+
+ dev_err(dev, "timeout waiting for DMA\n");
+
+out_copy:
+ memcpy(buf, this->base + bram_offset, count);
+ return 0;
+}
+
+static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
+ const unsigned char *buffer,
+ int offset, size_t count)
+{
+ struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+ struct onenand_chip *this = mtd->priv;
+ struct device *dev = &c->pdev->dev;
+ void *buf = (void *)buffer;
+ dma_addr_t dma_src, dma_dst;
+ int bram_offset, err;
+
+ bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
+ /*
+ * If the buffer address is not DMA-able, len is not long enough to
+ * make DMA transfers profitable or if invoked from panic_write()
+ * fallback to PIO mode.
+ */
+ if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
+ count < 384 || mtd->oops_panic_write)
+ goto out_copy;
+
+ dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE);
+ dma_dst = c->phys_base + bram_offset;
+ if (dma_mapping_error(dev, dma_src)) {
+ dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
+ goto out_copy;
+ }
+
+ err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
+ dma_unmap_page(dev, dma_src, count, DMA_TO_DEVICE);
+ if (!err)
+ return 0;
+
+ dev_err(dev, "timeout waiting for DMA\n");
+
+out_copy:
+ memcpy(this->base + bram_offset, buf, count);
+ return 0;
+}
+
+static void omap2_onenand_shutdown(struct platform_device *pdev)
+{
+ struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
+
+ /* With certain content in the buffer RAM, the OMAP boot ROM code
+ * can recognize the flash chip incorrectly. Zero it out before
+ * soft reset.
+ */
+ memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
+}
+
+static int omap2_onenand_probe(struct platform_device *pdev)
+{
+ u32 val;
+ dma_cap_mask_t mask;
+ int freq, latency, r;
+ struct resource *res;
+ struct omap2_onenand *c;
+ struct gpmc_onenand_info info;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+
+ r = of_property_read_u32(np, "reg", &val);
+ if (r) {
+ dev_err(dev, "reg not found in DT\n");
+ return r;
+ }
+
+ c = devm_kzalloc(dev, sizeof(struct omap2_onenand), GFP_KERNEL);
+ if (!c)
+ return -ENOMEM;
+
+ init_completion(&c->irq_done);
+ init_completion(&c->dma_done);
+ c->gpmc_cs = val;
+
+ c->onenand.base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(c->onenand.base))
+ return PTR_ERR(c->onenand.base);
+ c->phys_base = res->start;
+
+ c->int_gpiod = devm_gpiod_get_optional(dev, "int", GPIOD_IN);
+ if (IS_ERR(c->int_gpiod)) {
+ /* Just try again if this happens */
+ return dev_err_probe(dev, PTR_ERR(c->int_gpiod), "error getting gpio\n");
+ }
+
+ if (c->int_gpiod) {
+ r = devm_request_irq(dev, gpiod_to_irq(c->int_gpiod),
+ omap2_onenand_interrupt,
+ IRQF_TRIGGER_RISING, "onenand", c);
+ if (r)
+ return r;
+
+ c->onenand.wait = omap2_onenand_wait;
+ }
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ c->dma_chan = dma_request_channel(mask, NULL, NULL);
+ if (c->dma_chan) {
+ c->onenand.read_bufferram = omap2_onenand_read_bufferram;
+ c->onenand.write_bufferram = omap2_onenand_write_bufferram;
+ }
+
+ c->pdev = pdev;
+ c->mtd.priv = &c->onenand;
+ c->mtd.dev.parent = dev;
+ mtd_set_of_node(&c->mtd, dev->of_node);
+
+ dev_info(dev, "initializing on CS%d (0x%08lx), va %p, %s mode\n",
+ c->gpmc_cs, c->phys_base, c->onenand.base,
+ c->dma_chan ? "DMA" : "PIO");
+
+ r = onenand_scan(&c->mtd, 1);
+ if (r < 0)
+ goto err_release_dma;
+
+ freq = omap2_onenand_get_freq(c->onenand.version_id);
+ if (freq > 0) {
+ switch (freq) {
+ case 104:
+ latency = 7;
+ break;
+ case 83:
+ latency = 6;
+ break;
+ case 66:
+ latency = 5;
+ break;
+ case 56:
+ latency = 4;
+ break;
+ default: /* 40 MHz or lower */
+ latency = 3;
+ break;
+ }
+
+ r = gpmc_omap_onenand_set_timings(dev, c->gpmc_cs,
+ freq, latency, &info);
+ if (r)
+ goto err_release_onenand;
+
+ r = omap2_onenand_set_cfg(c, info.sync_read, info.sync_write,
+ latency, info.burst_len);
+ if (r)
+ goto err_release_onenand;
+
+ if (info.sync_read || info.sync_write)
+ dev_info(dev, "optimized timings for %d MHz\n", freq);
+ }
+
+ r = mtd_device_register(&c->mtd, NULL, 0);
+ if (r)
+ goto err_release_onenand;
+
+ platform_set_drvdata(pdev, c);
+
+ return 0;
+
+err_release_onenand:
+ onenand_release(&c->mtd);
+err_release_dma:
+ if (c->dma_chan)
+ dma_release_channel(c->dma_chan);
+
+ return r;
+}
+
+static void omap2_onenand_remove(struct platform_device *pdev)
+{
+ struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
+
+ onenand_release(&c->mtd);
+ if (c->dma_chan)
+ dma_release_channel(c->dma_chan);
+ omap2_onenand_shutdown(pdev);
+}
+
+static const struct of_device_id omap2_onenand_id_table[] = {
+ { .compatible = "ti,omap2-onenand", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap2_onenand_id_table);
+
+static struct platform_driver omap2_onenand_driver = {
+ .probe = omap2_onenand_probe,
+ .remove_new = omap2_onenand_remove,
+ .shutdown = omap2_onenand_shutdown,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = omap2_onenand_id_table,
+ },
+};
+
+module_platform_driver(omap2_onenand_driver);
+
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
+MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");
diff --git a/drivers/mtd/nand/onenand/onenand_samsung.c b/drivers/mtd/nand/onenand/onenand_samsung.c
new file mode 100644
index 0000000000..fd6890a03d
--- /dev/null
+++ b/drivers/mtd/nand/onenand/onenand_samsung.c
@@ -0,0 +1,1001 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Samsung S3C64XX/S5PC1XX OneNAND driver
+ *
+ * Copyright © 2008-2010 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * Implementation:
+ * S3C64XX: emulate the pseudo BufferRAM
+ * S5PC110: use DMA
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/onenand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#include "samsung.h"
+
+enum soc_type {
+ TYPE_S3C6400,
+ TYPE_S3C6410,
+ TYPE_S5PC110,
+};
+
+#define ONENAND_ERASE_STATUS 0x00
+#define ONENAND_MULTI_ERASE_SET 0x01
+#define ONENAND_ERASE_START 0x03
+#define ONENAND_UNLOCK_START 0x08
+#define ONENAND_UNLOCK_END 0x09
+#define ONENAND_LOCK_START 0x0A
+#define ONENAND_LOCK_END 0x0B
+#define ONENAND_LOCK_TIGHT_START 0x0C
+#define ONENAND_LOCK_TIGHT_END 0x0D
+#define ONENAND_UNLOCK_ALL 0x0E
+#define ONENAND_OTP_ACCESS 0x12
+#define ONENAND_SPARE_ACCESS_ONLY 0x13
+#define ONENAND_MAIN_ACCESS_ONLY 0x14
+#define ONENAND_ERASE_VERIFY 0x15
+#define ONENAND_MAIN_SPARE_ACCESS 0x16
+#define ONENAND_PIPELINE_READ 0x4000
+
+#define MAP_00 (0x0)
+#define MAP_01 (0x1)
+#define MAP_10 (0x2)
+#define MAP_11 (0x3)
+
+#define S3C64XX_CMD_MAP_SHIFT 24
+
+#define S3C6400_FBA_SHIFT 10
+#define S3C6400_FPA_SHIFT 4
+#define S3C6400_FSA_SHIFT 2
+
+#define S3C6410_FBA_SHIFT 12
+#define S3C6410_FPA_SHIFT 6
+#define S3C6410_FSA_SHIFT 4
+
+/* S5PC110 specific definitions */
+#define S5PC110_DMA_SRC_ADDR 0x400
+#define S5PC110_DMA_SRC_CFG 0x404
+#define S5PC110_DMA_DST_ADDR 0x408
+#define S5PC110_DMA_DST_CFG 0x40C
+#define S5PC110_DMA_TRANS_SIZE 0x414
+#define S5PC110_DMA_TRANS_CMD 0x418
+#define S5PC110_DMA_TRANS_STATUS 0x41C
+#define S5PC110_DMA_TRANS_DIR 0x420
+#define S5PC110_INTC_DMA_CLR 0x1004
+#define S5PC110_INTC_ONENAND_CLR 0x1008
+#define S5PC110_INTC_DMA_MASK 0x1024
+#define S5PC110_INTC_ONENAND_MASK 0x1028
+#define S5PC110_INTC_DMA_PEND 0x1044
+#define S5PC110_INTC_ONENAND_PEND 0x1048
+#define S5PC110_INTC_DMA_STATUS 0x1064
+#define S5PC110_INTC_ONENAND_STATUS 0x1068
+
+#define S5PC110_INTC_DMA_TD (1 << 24)
+#define S5PC110_INTC_DMA_TE (1 << 16)
+
+#define S5PC110_DMA_CFG_SINGLE (0x0 << 16)
+#define S5PC110_DMA_CFG_4BURST (0x2 << 16)
+#define S5PC110_DMA_CFG_8BURST (0x3 << 16)
+#define S5PC110_DMA_CFG_16BURST (0x4 << 16)
+
+#define S5PC110_DMA_CFG_INC (0x0 << 8)
+#define S5PC110_DMA_CFG_CNT (0x1 << 8)
+
+#define S5PC110_DMA_CFG_8BIT (0x0 << 0)
+#define S5PC110_DMA_CFG_16BIT (0x1 << 0)
+#define S5PC110_DMA_CFG_32BIT (0x2 << 0)
+
+#define S5PC110_DMA_SRC_CFG_READ (S5PC110_DMA_CFG_16BURST | \
+ S5PC110_DMA_CFG_INC | \
+ S5PC110_DMA_CFG_16BIT)
+#define S5PC110_DMA_DST_CFG_READ (S5PC110_DMA_CFG_16BURST | \
+ S5PC110_DMA_CFG_INC | \
+ S5PC110_DMA_CFG_32BIT)
+#define S5PC110_DMA_SRC_CFG_WRITE (S5PC110_DMA_CFG_16BURST | \
+ S5PC110_DMA_CFG_INC | \
+ S5PC110_DMA_CFG_32BIT)
+#define S5PC110_DMA_DST_CFG_WRITE (S5PC110_DMA_CFG_16BURST | \
+ S5PC110_DMA_CFG_INC | \
+ S5PC110_DMA_CFG_16BIT)
+
+#define S5PC110_DMA_TRANS_CMD_TDC (0x1 << 18)
+#define S5PC110_DMA_TRANS_CMD_TEC (0x1 << 16)
+#define S5PC110_DMA_TRANS_CMD_TR (0x1 << 0)
+
+#define S5PC110_DMA_TRANS_STATUS_TD (0x1 << 18)
+#define S5PC110_DMA_TRANS_STATUS_TB (0x1 << 17)
+#define S5PC110_DMA_TRANS_STATUS_TE (0x1 << 16)
+
+#define S5PC110_DMA_DIR_READ 0x0
+#define S5PC110_DMA_DIR_WRITE 0x1
+
+struct s3c_onenand {
+ struct mtd_info *mtd;
+ struct platform_device *pdev;
+ enum soc_type type;
+ void __iomem *base;
+ void __iomem *ahb_addr;
+ int bootram_command;
+ void *page_buf;
+ void *oob_buf;
+ unsigned int (*mem_addr)(int fba, int fpa, int fsa);
+ unsigned int (*cmd_map)(unsigned int type, unsigned int val);
+ void __iomem *dma_addr;
+ unsigned long phys_base;
+ struct completion complete;
+};
+
+#define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1)))
+#define CMD_MAP_01(dev, mem_addr) (dev->cmd_map(MAP_01, (mem_addr)))
+#define CMD_MAP_10(dev, mem_addr) (dev->cmd_map(MAP_10, (mem_addr)))
+#define CMD_MAP_11(dev, addr) (dev->cmd_map(MAP_11, ((addr) << 2)))
+
+static struct s3c_onenand *onenand;
+
+static inline int s3c_read_reg(int offset)
+{
+ return readl(onenand->base + offset);
+}
+
+static inline void s3c_write_reg(int value, int offset)
+{
+ writel(value, onenand->base + offset);
+}
+
+static inline int s3c_read_cmd(unsigned int cmd)
+{
+ return readl(onenand->ahb_addr + cmd);
+}
+
+static inline void s3c_write_cmd(int value, unsigned int cmd)
+{
+ writel(value, onenand->ahb_addr + cmd);
+}
+
+#ifdef SAMSUNG_DEBUG
+static void s3c_dump_reg(void)
+{
+ int i;
+
+ for (i = 0; i < 0x400; i += 0x40) {
+ printk(KERN_INFO "0x%08X: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ (unsigned int) onenand->base + i,
+ s3c_read_reg(i), s3c_read_reg(i + 0x10),
+ s3c_read_reg(i + 0x20), s3c_read_reg(i + 0x30));
+ }
+}
+#endif
+
+static unsigned int s3c64xx_cmd_map(unsigned type, unsigned val)
+{
+ return (type << S3C64XX_CMD_MAP_SHIFT) | val;
+}
+
+static unsigned int s3c6400_mem_addr(int fba, int fpa, int fsa)
+{
+ return (fba << S3C6400_FBA_SHIFT) | (fpa << S3C6400_FPA_SHIFT) |
+ (fsa << S3C6400_FSA_SHIFT);
+}
+
+static unsigned int s3c6410_mem_addr(int fba, int fpa, int fsa)
+{
+ return (fba << S3C6410_FBA_SHIFT) | (fpa << S3C6410_FPA_SHIFT) |
+ (fsa << S3C6410_FSA_SHIFT);
+}
+
+static void s3c_onenand_reset(void)
+{
+ unsigned long timeout = 0x10000;
+ int stat;
+
+ s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET);
+ while (1 && timeout--) {
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ if (stat & RST_CMP)
+ break;
+ }
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
+
+ /* Clear interrupt */
+ s3c_write_reg(0x0, INT_ERR_ACK_OFFSET);
+ /* Clear the ECC status */
+ s3c_write_reg(0x0, ECC_ERR_STAT_OFFSET);
+}
+
+static unsigned short s3c_onenand_readw(void __iomem *addr)
+{
+ struct onenand_chip *this = onenand->mtd->priv;
+ struct device *dev = &onenand->pdev->dev;
+ int reg = addr - this->base;
+ int word_addr = reg >> 1;
+ int value;
+
+ /* It's used for probing time */
+ switch (reg) {
+ case ONENAND_REG_MANUFACTURER_ID:
+ return s3c_read_reg(MANUFACT_ID_OFFSET);
+ case ONENAND_REG_DEVICE_ID:
+ return s3c_read_reg(DEVICE_ID_OFFSET);
+ case ONENAND_REG_VERSION_ID:
+ return s3c_read_reg(FLASH_VER_ID_OFFSET);
+ case ONENAND_REG_DATA_BUFFER_SIZE:
+ return s3c_read_reg(DATA_BUF_SIZE_OFFSET);
+ case ONENAND_REG_TECHNOLOGY:
+ return s3c_read_reg(TECH_OFFSET);
+ case ONENAND_REG_SYS_CFG1:
+ return s3c_read_reg(MEM_CFG_OFFSET);
+
+ /* Used at unlock all status */
+ case ONENAND_REG_CTRL_STATUS:
+ return 0;
+
+ case ONENAND_REG_WP_STATUS:
+ return ONENAND_WP_US;
+
+ default:
+ break;
+ }
+
+ /* BootRAM access control */
+ if ((unsigned long)addr < ONENAND_DATARAM && onenand->bootram_command) {
+ if (word_addr == 0)
+ return s3c_read_reg(MANUFACT_ID_OFFSET);
+ if (word_addr == 1)
+ return s3c_read_reg(DEVICE_ID_OFFSET);
+ if (word_addr == 2)
+ return s3c_read_reg(FLASH_VER_ID_OFFSET);
+ }
+
+ value = s3c_read_cmd(CMD_MAP_11(onenand, word_addr)) & 0xffff;
+ dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__,
+ word_addr, value);
+ return value;
+}
+
+static void s3c_onenand_writew(unsigned short value, void __iomem *addr)
+{
+ struct onenand_chip *this = onenand->mtd->priv;
+ struct device *dev = &onenand->pdev->dev;
+ unsigned int reg = addr - this->base;
+ unsigned int word_addr = reg >> 1;
+
+ /* It's used for probing time */
+ switch (reg) {
+ case ONENAND_REG_SYS_CFG1:
+ s3c_write_reg(value, MEM_CFG_OFFSET);
+ return;
+
+ case ONENAND_REG_START_ADDRESS1:
+ case ONENAND_REG_START_ADDRESS2:
+ return;
+
+ /* Lock/lock-tight/unlock/unlock_all */
+ case ONENAND_REG_START_BLOCK_ADDRESS:
+ return;
+
+ default:
+ break;
+ }
+
+ /* BootRAM access control */
+ if ((unsigned long)addr < ONENAND_DATARAM) {
+ if (value == ONENAND_CMD_READID) {
+ onenand->bootram_command = 1;
+ return;
+ }
+ if (value == ONENAND_CMD_RESET) {
+ s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET);
+ onenand->bootram_command = 0;
+ return;
+ }
+ }
+
+ dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__,
+ word_addr, value);
+
+ s3c_write_cmd(value, CMD_MAP_11(onenand, word_addr));
+}
+
+static int s3c_onenand_wait(struct mtd_info *mtd, int state)
+{
+ struct device *dev = &onenand->pdev->dev;
+ unsigned int flags = INT_ACT;
+ unsigned int stat, ecc;
+ unsigned long timeout;
+
+ switch (state) {
+ case FL_READING:
+ flags |= BLK_RW_CMP | LOAD_CMP;
+ break;
+ case FL_WRITING:
+ flags |= BLK_RW_CMP | PGM_CMP;
+ break;
+ case FL_ERASING:
+ flags |= BLK_RW_CMP | ERS_CMP;
+ break;
+ case FL_LOCKING:
+ flags |= BLK_RW_CMP;
+ break;
+ default:
+ break;
+ }
+
+ /* The 20 msec is enough */
+ timeout = jiffies + msecs_to_jiffies(20);
+ while (time_before(jiffies, timeout)) {
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ if (stat & flags)
+ break;
+
+ if (state != FL_READING)
+ cond_resched();
+ }
+ /* To get correct interrupt status in timeout case */
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
+
+ /*
+ * In the Spec. it checks the controller status first
+ * However if you get the correct information in case of
+ * power off recovery (POR) test, it should read ECC status first
+ */
+ if (stat & LOAD_CMP) {
+ ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET);
+ if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
+ dev_info(dev, "%s: ECC error = 0x%04x\n", __func__,
+ ecc);
+ mtd->ecc_stats.failed++;
+ return -EBADMSG;
+ }
+ }
+
+ if (stat & (LOCKED_BLK | ERS_FAIL | PGM_FAIL | LD_FAIL_ECC_ERR)) {
+ dev_info(dev, "%s: controller error = 0x%04x\n", __func__,
+ stat);
+ if (stat & LOCKED_BLK)
+ dev_info(dev, "%s: it's locked error = 0x%04x\n",
+ __func__, stat);
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int s3c_onenand_command(struct mtd_info *mtd, int cmd, loff_t addr,
+ size_t len)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned int *m, *s;
+ int fba, fpa, fsa = 0;
+ unsigned int mem_addr, cmd_map_01, cmd_map_10;
+ int i, mcount, scount;
+ int index;
+
+ fba = (int) (addr >> this->erase_shift);
+ fpa = (int) (addr >> this->page_shift);
+ fpa &= this->page_mask;
+
+ mem_addr = onenand->mem_addr(fba, fpa, fsa);
+ cmd_map_01 = CMD_MAP_01(onenand, mem_addr);
+ cmd_map_10 = CMD_MAP_10(onenand, mem_addr);
+
+ switch (cmd) {
+ case ONENAND_CMD_READ:
+ case ONENAND_CMD_READOOB:
+ case ONENAND_CMD_BUFFERRAM:
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+ break;
+ default:
+ break;
+ }
+
+ index = ONENAND_CURRENT_BUFFERRAM(this);
+
+ /*
+ * Emulate Two BufferRAMs and access with 4 bytes pointer
+ */
+ m = onenand->page_buf;
+ s = onenand->oob_buf;
+
+ if (index) {
+ m += (this->writesize >> 2);
+ s += (mtd->oobsize >> 2);
+ }
+
+ mcount = mtd->writesize >> 2;
+ scount = mtd->oobsize >> 2;
+
+ switch (cmd) {
+ case ONENAND_CMD_READ:
+ /* Main */
+ for (i = 0; i < mcount; i++)
+ *m++ = s3c_read_cmd(cmd_map_01);
+ return 0;
+
+ case ONENAND_CMD_READOOB:
+ s3c_write_reg(TSRF, TRANS_SPARE_OFFSET);
+ /* Main */
+ for (i = 0; i < mcount; i++)
+ *m++ = s3c_read_cmd(cmd_map_01);
+
+ /* Spare */
+ for (i = 0; i < scount; i++)
+ *s++ = s3c_read_cmd(cmd_map_01);
+
+ s3c_write_reg(0, TRANS_SPARE_OFFSET);
+ return 0;
+
+ case ONENAND_CMD_PROG:
+ /* Main */
+ for (i = 0; i < mcount; i++)
+ s3c_write_cmd(*m++, cmd_map_01);
+ return 0;
+
+ case ONENAND_CMD_PROGOOB:
+ s3c_write_reg(TSRF, TRANS_SPARE_OFFSET);
+
+ /* Main - dummy write */
+ for (i = 0; i < mcount; i++)
+ s3c_write_cmd(0xffffffff, cmd_map_01);
+
+ /* Spare */
+ for (i = 0; i < scount; i++)
+ s3c_write_cmd(*s++, cmd_map_01);
+
+ s3c_write_reg(0, TRANS_SPARE_OFFSET);
+ return 0;
+
+ case ONENAND_CMD_UNLOCK_ALL:
+ s3c_write_cmd(ONENAND_UNLOCK_ALL, cmd_map_10);
+ return 0;
+
+ case ONENAND_CMD_ERASE:
+ s3c_write_cmd(ONENAND_ERASE_START, cmd_map_10);
+ return 0;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static unsigned char *s3c_get_bufferram(struct mtd_info *mtd, int area)
+{
+ struct onenand_chip *this = mtd->priv;
+ int index = ONENAND_CURRENT_BUFFERRAM(this);
+ unsigned char *p;
+
+ if (area == ONENAND_DATARAM) {
+ p = onenand->page_buf;
+ if (index == 1)
+ p += this->writesize;
+ } else {
+ p = onenand->oob_buf;
+ if (index == 1)
+ p += mtd->oobsize;
+ }
+
+ return p;
+}
+
+static int onenand_read_bufferram(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset,
+ size_t count)
+{
+ unsigned char *p;
+
+ p = s3c_get_bufferram(mtd, area);
+ memcpy(buffer, p + offset, count);
+ return 0;
+}
+
+static int onenand_write_bufferram(struct mtd_info *mtd, int area,
+ const unsigned char *buffer, int offset,
+ size_t count)
+{
+ unsigned char *p;
+
+ p = s3c_get_bufferram(mtd, area);
+ memcpy(p + offset, buffer, count);
+ return 0;
+}
+
+static int (*s5pc110_dma_ops)(dma_addr_t dst, dma_addr_t src, size_t count, int direction);
+
+static int s5pc110_dma_poll(dma_addr_t dst, dma_addr_t src, size_t count, int direction)
+{
+ void __iomem *base = onenand->dma_addr;
+ int status;
+ unsigned long timeout;
+
+ writel(src, base + S5PC110_DMA_SRC_ADDR);
+ writel(dst, base + S5PC110_DMA_DST_ADDR);
+
+ if (direction == S5PC110_DMA_DIR_READ) {
+ writel(S5PC110_DMA_SRC_CFG_READ, base + S5PC110_DMA_SRC_CFG);
+ writel(S5PC110_DMA_DST_CFG_READ, base + S5PC110_DMA_DST_CFG);
+ } else {
+ writel(S5PC110_DMA_SRC_CFG_WRITE, base + S5PC110_DMA_SRC_CFG);
+ writel(S5PC110_DMA_DST_CFG_WRITE, base + S5PC110_DMA_DST_CFG);
+ }
+
+ writel(count, base + S5PC110_DMA_TRANS_SIZE);
+ writel(direction, base + S5PC110_DMA_TRANS_DIR);
+
+ writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD);
+
+ /*
+ * There's no exact timeout values at Spec.
+ * In real case it takes under 1 msec.
+ * So 20 msecs are enough.
+ */
+ timeout = jiffies + msecs_to_jiffies(20);
+
+ do {
+ status = readl(base + S5PC110_DMA_TRANS_STATUS);
+ if (status & S5PC110_DMA_TRANS_STATUS_TE) {
+ writel(S5PC110_DMA_TRANS_CMD_TEC,
+ base + S5PC110_DMA_TRANS_CMD);
+ return -EIO;
+ }
+ } while (!(status & S5PC110_DMA_TRANS_STATUS_TD) &&
+ time_before(jiffies, timeout));
+
+ writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
+
+ return 0;
+}
+
+static irqreturn_t s5pc110_onenand_irq(int irq, void *data)
+{
+ void __iomem *base = onenand->dma_addr;
+ int status, cmd = 0;
+
+ status = readl(base + S5PC110_INTC_DMA_STATUS);
+
+ if (likely(status & S5PC110_INTC_DMA_TD))
+ cmd = S5PC110_DMA_TRANS_CMD_TDC;
+
+ if (unlikely(status & S5PC110_INTC_DMA_TE))
+ cmd = S5PC110_DMA_TRANS_CMD_TEC;
+
+ writel(cmd, base + S5PC110_DMA_TRANS_CMD);
+ writel(status, base + S5PC110_INTC_DMA_CLR);
+
+ if (!onenand->complete.done)
+ complete(&onenand->complete);
+
+ return IRQ_HANDLED;
+}
+
+static int s5pc110_dma_irq(dma_addr_t dst, dma_addr_t src, size_t count, int direction)
+{
+ void __iomem *base = onenand->dma_addr;
+ int status;
+
+ status = readl(base + S5PC110_INTC_DMA_MASK);
+ if (status) {
+ status &= ~(S5PC110_INTC_DMA_TD | S5PC110_INTC_DMA_TE);
+ writel(status, base + S5PC110_INTC_DMA_MASK);
+ }
+
+ writel(src, base + S5PC110_DMA_SRC_ADDR);
+ writel(dst, base + S5PC110_DMA_DST_ADDR);
+
+ if (direction == S5PC110_DMA_DIR_READ) {
+ writel(S5PC110_DMA_SRC_CFG_READ, base + S5PC110_DMA_SRC_CFG);
+ writel(S5PC110_DMA_DST_CFG_READ, base + S5PC110_DMA_DST_CFG);
+ } else {
+ writel(S5PC110_DMA_SRC_CFG_WRITE, base + S5PC110_DMA_SRC_CFG);
+ writel(S5PC110_DMA_DST_CFG_WRITE, base + S5PC110_DMA_DST_CFG);
+ }
+
+ writel(count, base + S5PC110_DMA_TRANS_SIZE);
+ writel(direction, base + S5PC110_DMA_TRANS_DIR);
+
+ writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD);
+
+ wait_for_completion_timeout(&onenand->complete, msecs_to_jiffies(20));
+
+ return 0;
+}
+
+static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset, size_t count)
+{
+ struct onenand_chip *this = mtd->priv;
+ void __iomem *p;
+ void *buf = (void *) buffer;
+ dma_addr_t dma_src, dma_dst;
+ int err, ofs, page_dma = 0;
+ struct device *dev = &onenand->pdev->dev;
+
+ p = this->base + area;
+ if (ONENAND_CURRENT_BUFFERRAM(this)) {
+ if (area == ONENAND_DATARAM)
+ p += this->writesize;
+ else
+ p += mtd->oobsize;
+ }
+
+ if (offset & 3 || (size_t) buf & 3 ||
+ !onenand->dma_addr || count != mtd->writesize)
+ goto normal;
+
+ /* Handle vmalloc address */
+ if (buf >= high_memory) {
+ struct page *page;
+
+ if (((size_t) buf & PAGE_MASK) !=
+ ((size_t) (buf + count - 1) & PAGE_MASK))
+ goto normal;
+ page = vmalloc_to_page(buf);
+ if (!page)
+ goto normal;
+
+ /* Page offset */
+ ofs = ((size_t) buf & ~PAGE_MASK);
+ page_dma = 1;
+
+ /* DMA routine */
+ dma_src = onenand->phys_base + (p - this->base);
+ dma_dst = dma_map_page(dev, page, ofs, count, DMA_FROM_DEVICE);
+ } else {
+ /* DMA routine */
+ dma_src = onenand->phys_base + (p - this->base);
+ dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
+ }
+ if (dma_mapping_error(dev, dma_dst)) {
+ dev_err(dev, "Couldn't map a %zu byte buffer for DMA\n", count);
+ goto normal;
+ }
+ err = s5pc110_dma_ops(dma_dst, dma_src,
+ count, S5PC110_DMA_DIR_READ);
+
+ if (page_dma)
+ dma_unmap_page(dev, dma_dst, count, DMA_FROM_DEVICE);
+ else
+ dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE);
+
+ if (!err)
+ return 0;
+
+normal:
+ if (count != mtd->writesize) {
+ /* Copy the bufferram to memory to prevent unaligned access */
+ memcpy_fromio(this->page_buf, p, mtd->writesize);
+ memcpy(buffer, this->page_buf + offset, count);
+ } else {
+ memcpy_fromio(buffer, p, count);
+ }
+
+ return 0;
+}
+
+static int s5pc110_chip_probe(struct mtd_info *mtd)
+{
+ /* Now just return 0 */
+ return 0;
+}
+
+static int s3c_onenand_bbt_wait(struct mtd_info *mtd, int state)
+{
+ unsigned int flags = INT_ACT | LOAD_CMP;
+ unsigned int stat;
+ unsigned long timeout;
+
+ /* The 20 msec is enough */
+ timeout = jiffies + msecs_to_jiffies(20);
+ while (time_before(jiffies, timeout)) {
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ if (stat & flags)
+ break;
+ }
+ /* To get correct interrupt status in timeout case */
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
+
+ if (stat & LD_FAIL_ECC_ERR) {
+ s3c_onenand_reset();
+ return ONENAND_BBT_READ_ERROR;
+ }
+
+ if (stat & LOAD_CMP) {
+ int ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET);
+ if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
+ s3c_onenand_reset();
+ return ONENAND_BBT_READ_ERROR;
+ }
+ }
+
+ return 0;
+}
+
+static void s3c_onenand_check_lock_status(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct device *dev = &onenand->pdev->dev;
+ unsigned int block, end;
+
+ end = this->chipsize >> this->erase_shift;
+
+ for (block = 0; block < end; block++) {
+ unsigned int mem_addr = onenand->mem_addr(block, 0, 0);
+ s3c_read_cmd(CMD_MAP_01(onenand, mem_addr));
+
+ if (s3c_read_reg(INT_ERR_STAT_OFFSET) & LOCKED_BLK) {
+ dev_err(dev, "block %d is write-protected!\n", block);
+ s3c_write_reg(LOCKED_BLK, INT_ERR_ACK_OFFSET);
+ }
+ }
+}
+
+static void s3c_onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs,
+ size_t len, int cmd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int start, end, start_mem_addr, end_mem_addr;
+
+ start = ofs >> this->erase_shift;
+ start_mem_addr = onenand->mem_addr(start, 0, 0);
+ end = start + (len >> this->erase_shift) - 1;
+ end_mem_addr = onenand->mem_addr(end, 0, 0);
+
+ if (cmd == ONENAND_CMD_LOCK) {
+ s3c_write_cmd(ONENAND_LOCK_START, CMD_MAP_10(onenand,
+ start_mem_addr));
+ s3c_write_cmd(ONENAND_LOCK_END, CMD_MAP_10(onenand,
+ end_mem_addr));
+ } else {
+ s3c_write_cmd(ONENAND_UNLOCK_START, CMD_MAP_10(onenand,
+ start_mem_addr));
+ s3c_write_cmd(ONENAND_UNLOCK_END, CMD_MAP_10(onenand,
+ end_mem_addr));
+ }
+
+ this->wait(mtd, FL_LOCKING);
+}
+
+static void s3c_unlock_all(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ loff_t ofs = 0;
+ size_t len = this->chipsize;
+
+ if (this->options & ONENAND_HAS_UNLOCK_ALL) {
+ /* Write unlock command */
+ this->command(mtd, ONENAND_CMD_UNLOCK_ALL, 0, 0);
+
+ /* No need to check return value */
+ this->wait(mtd, FL_LOCKING);
+
+ /* Workaround for all block unlock in DDP */
+ if (!ONENAND_IS_DDP(this)) {
+ s3c_onenand_check_lock_status(mtd);
+ return;
+ }
+
+ /* All blocks on another chip */
+ ofs = this->chipsize >> 1;
+ len = this->chipsize >> 1;
+ }
+
+ s3c_onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
+
+ s3c_onenand_check_lock_status(mtd);
+}
+
+static void s3c_onenand_setup(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+
+ onenand->mtd = mtd;
+
+ if (onenand->type == TYPE_S3C6400) {
+ onenand->mem_addr = s3c6400_mem_addr;
+ onenand->cmd_map = s3c64xx_cmd_map;
+ } else if (onenand->type == TYPE_S3C6410) {
+ onenand->mem_addr = s3c6410_mem_addr;
+ onenand->cmd_map = s3c64xx_cmd_map;
+ } else if (onenand->type == TYPE_S5PC110) {
+ /* Use generic onenand functions */
+ this->read_bufferram = s5pc110_read_bufferram;
+ this->chip_probe = s5pc110_chip_probe;
+ return;
+ } else {
+ BUG();
+ }
+
+ this->read_word = s3c_onenand_readw;
+ this->write_word = s3c_onenand_writew;
+
+ this->wait = s3c_onenand_wait;
+ this->bbt_wait = s3c_onenand_bbt_wait;
+ this->unlock_all = s3c_unlock_all;
+ this->command = s3c_onenand_command;
+
+ this->read_bufferram = onenand_read_bufferram;
+ this->write_bufferram = onenand_write_bufferram;
+}
+
+static int s3c_onenand_probe(struct platform_device *pdev)
+{
+ struct onenand_platform_data *pdata;
+ struct onenand_chip *this;
+ struct mtd_info *mtd;
+ struct resource *r;
+ int size, err;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ /* No need to check pdata. the platform data is optional */
+
+ size = sizeof(struct mtd_info) + sizeof(struct onenand_chip);
+ mtd = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!mtd)
+ return -ENOMEM;
+
+ onenand = devm_kzalloc(&pdev->dev, sizeof(struct s3c_onenand),
+ GFP_KERNEL);
+ if (!onenand)
+ return -ENOMEM;
+
+ this = (struct onenand_chip *) &mtd[1];
+ mtd->priv = this;
+ mtd->dev.parent = &pdev->dev;
+ onenand->pdev = pdev;
+ onenand->type = platform_get_device_id(pdev)->driver_data;
+
+ s3c_onenand_setup(mtd);
+
+ onenand->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
+ if (IS_ERR(onenand->base))
+ return PTR_ERR(onenand->base);
+
+ onenand->phys_base = r->start;
+
+ /* Set onenand_chip also */
+ this->base = onenand->base;
+
+ /* Use runtime badblock check */
+ this->options |= ONENAND_SKIP_UNLOCK_CHECK;
+
+ if (onenand->type != TYPE_S5PC110) {
+ onenand->ahb_addr = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(onenand->ahb_addr))
+ return PTR_ERR(onenand->ahb_addr);
+
+ /* Allocate 4KiB BufferRAM */
+ onenand->page_buf = devm_kzalloc(&pdev->dev, SZ_4K,
+ GFP_KERNEL);
+ if (!onenand->page_buf)
+ return -ENOMEM;
+
+ /* Allocate 128 SpareRAM */
+ onenand->oob_buf = devm_kzalloc(&pdev->dev, 128, GFP_KERNEL);
+ if (!onenand->oob_buf)
+ return -ENOMEM;
+
+ /* S3C doesn't handle subpage write */
+ mtd->subpage_sft = 0;
+ this->subpagesize = mtd->writesize;
+
+ } else { /* S5PC110 */
+ onenand->dma_addr = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(onenand->dma_addr))
+ return PTR_ERR(onenand->dma_addr);
+
+ s5pc110_dma_ops = s5pc110_dma_poll;
+ /* Interrupt support */
+ r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (r) {
+ init_completion(&onenand->complete);
+ s5pc110_dma_ops = s5pc110_dma_irq;
+ err = devm_request_irq(&pdev->dev, r->start,
+ s5pc110_onenand_irq,
+ IRQF_SHARED, "onenand",
+ &onenand);
+ if (err) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ return err;
+ }
+ }
+ }
+
+ err = onenand_scan(mtd, 1);
+ if (err)
+ return err;
+
+ if (onenand->type != TYPE_S5PC110) {
+ /* S3C doesn't handle subpage write */
+ mtd->subpage_sft = 0;
+ this->subpagesize = mtd->writesize;
+ }
+
+ if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
+ dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
+
+ err = mtd_device_register(mtd, pdata ? pdata->parts : NULL,
+ pdata ? pdata->nr_parts : 0);
+ if (err) {
+ dev_err(&pdev->dev, "failed to parse partitions and register the MTD device\n");
+ onenand_release(mtd);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, mtd);
+
+ return 0;
+}
+
+static void s3c_onenand_remove(struct platform_device *pdev)
+{
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+
+ onenand_release(mtd);
+}
+
+static int s3c_pm_ops_suspend(struct device *dev)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ struct onenand_chip *this = mtd->priv;
+
+ this->wait(mtd, FL_PM_SUSPENDED);
+ return 0;
+}
+
+static int s3c_pm_ops_resume(struct device *dev)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ struct onenand_chip *this = mtd->priv;
+
+ this->unlock_all(mtd);
+ return 0;
+}
+
+static const struct dev_pm_ops s3c_pm_ops = {
+ .suspend = s3c_pm_ops_suspend,
+ .resume = s3c_pm_ops_resume,
+};
+
+static const struct platform_device_id s3c_onenand_driver_ids[] = {
+ {
+ .name = "s3c6400-onenand",
+ .driver_data = TYPE_S3C6400,
+ }, {
+ .name = "s3c6410-onenand",
+ .driver_data = TYPE_S3C6410,
+ }, {
+ .name = "s5pc110-onenand",
+ .driver_data = TYPE_S5PC110,
+ }, { },
+};
+MODULE_DEVICE_TABLE(platform, s3c_onenand_driver_ids);
+
+static struct platform_driver s3c_onenand_driver = {
+ .driver = {
+ .name = "samsung-onenand",
+ .pm = &s3c_pm_ops,
+ },
+ .id_table = s3c_onenand_driver_ids,
+ .probe = s3c_onenand_probe,
+ .remove_new = s3c_onenand_remove,
+};
+
+module_platform_driver(s3c_onenand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
+MODULE_DESCRIPTION("Samsung OneNAND controller support");
diff --git a/drivers/mtd/nand/onenand/samsung.h b/drivers/mtd/nand/onenand/samsung.h
new file mode 100644
index 0000000000..892bbb6ca4
--- /dev/null
+++ b/drivers/mtd/nand/onenand/samsung.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2008-2010 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ */
+#ifndef __SAMSUNG_ONENAND_H__
+#define __SAMSUNG_ONENAND_H__
+
+/*
+ * OneNAND Controller
+ */
+#define MEM_CFG_OFFSET 0x0000
+#define BURST_LEN_OFFSET 0x0010
+#define MEM_RESET_OFFSET 0x0020
+#define INT_ERR_STAT_OFFSET 0x0030
+#define INT_ERR_MASK_OFFSET 0x0040
+#define INT_ERR_ACK_OFFSET 0x0050
+#define ECC_ERR_STAT_OFFSET 0x0060
+#define MANUFACT_ID_OFFSET 0x0070
+#define DEVICE_ID_OFFSET 0x0080
+#define DATA_BUF_SIZE_OFFSET 0x0090
+#define BOOT_BUF_SIZE_OFFSET 0x00A0
+#define BUF_AMOUNT_OFFSET 0x00B0
+#define TECH_OFFSET 0x00C0
+#define FBA_WIDTH_OFFSET 0x00D0
+#define FPA_WIDTH_OFFSET 0x00E0
+#define FSA_WIDTH_OFFSET 0x00F0
+#define TRANS_SPARE_OFFSET 0x0140
+#define DBS_DFS_WIDTH_OFFSET 0x0160
+#define INT_PIN_ENABLE_OFFSET 0x01A0
+#define ACC_CLOCK_OFFSET 0x01C0
+#define FLASH_VER_ID_OFFSET 0x01F0
+#define FLASH_AUX_CNTRL_OFFSET 0x0300 /* s3c64xx only */
+
+#define ONENAND_MEM_RESET_HOT 0x3
+#define ONENAND_MEM_RESET_COLD 0x2
+#define ONENAND_MEM_RESET_WARM 0x1
+
+#define CACHE_OP_ERR (1 << 13)
+#define RST_CMP (1 << 12)
+#define RDY_ACT (1 << 11)
+#define INT_ACT (1 << 10)
+#define UNSUP_CMD (1 << 9)
+#define LOCKED_BLK (1 << 8)
+#define BLK_RW_CMP (1 << 7)
+#define ERS_CMP (1 << 6)
+#define PGM_CMP (1 << 5)
+#define LOAD_CMP (1 << 4)
+#define ERS_FAIL (1 << 3)
+#define PGM_FAIL (1 << 2)
+#define INT_TO (1 << 1)
+#define LD_FAIL_ECC_ERR (1 << 0)
+
+#define TSRF (1 << 0)
+
+#endif
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
new file mode 100644
index 0000000000..cbf8ae85e1
--- /dev/null
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -0,0 +1,546 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig MTD_RAW_NAND
+ tristate "Raw/Parallel NAND Device Support"
+ select MTD_NAND_CORE
+ select MTD_NAND_ECC
+ help
+ This enables support for accessing all type of raw/parallel
+ NAND flash devices. For further information see
+ <http://www.linux-mtd.infradead.org/doc/nand.html>.
+
+if MTD_RAW_NAND
+
+comment "Raw/parallel NAND flash controllers"
+
+config MTD_NAND_DENALI
+ tristate
+
+config MTD_NAND_DENALI_PCI
+ tristate "Denali NAND controller on Intel Moorestown"
+ select MTD_NAND_DENALI
+ depends on PCI
+ help
+ Enable the driver for NAND flash on Intel Moorestown, using the
+ Denali NAND controller core.
+
+config MTD_NAND_DENALI_DT
+ tristate "Denali NAND controller as a DT device"
+ select MTD_NAND_DENALI
+ depends on HAS_DMA && HAVE_CLK && OF && HAS_IOMEM
+ help
+ Enable the driver for NAND flash on platforms using a Denali NAND
+ controller as a DT device.
+
+config MTD_NAND_AMS_DELTA
+ tristate "Amstrad E3 NAND controller"
+ depends on MACH_AMS_DELTA || COMPILE_TEST
+ default y
+ help
+ Support for NAND flash on Amstrad E3 (Delta).
+
+config MTD_NAND_OMAP2
+ tristate "OMAP2, OMAP3, OMAP4 and Keystone NAND controller"
+ depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on OMAP_GPMC
+ help
+ Support for NAND flash on Texas Instruments OMAP2, OMAP3, OMAP4
+ and Keystone platforms.
+
+config MTD_NAND_OMAP_BCH
+ depends on MTD_NAND_OMAP2
+ bool "Support hardware based BCH error correction"
+ default n
+ select BCH
+ help
+ This config enables the ELM hardware engine, which can be used to
+ locate and correct errors when using BCH ECC scheme. This offloads
+ the cpu from doing ECC error searching and correction. However some
+ legacy OMAP families like OMAP2xxx, OMAP3xxx do not have ELM engine
+ so this is optional for them.
+
+config MTD_NAND_OMAP_BCH_BUILD
+ def_tristate MTD_NAND_OMAP2 && MTD_NAND_OMAP_BCH
+
+config MTD_NAND_AU1550
+ tristate "Au1550/1200 NAND support"
+ depends on MIPS_ALCHEMY
+ help
+ This enables the driver for the NAND flash controller on the
+ AMD/Alchemy 1550 SOC.
+
+config MTD_NAND_NDFC
+ tristate "IBM/MCC 4xx NAND controller"
+ depends on 4xx
+ select MTD_NAND_ECC_SW_HAMMING
+ select MTD_NAND_ECC_SW_HAMMING_SMC
+ help
+ NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
+
+config MTD_NAND_S3C2410
+ tristate "Samsung S3C NAND controller"
+ depends on ARCH_S3C64XX
+ help
+ This enables the NAND flash controller on the S3C24xx and S3C64xx
+ SoCs
+
+ No board specific support is done by this driver, each board
+ must advertise a platform_device for the driver to attach.
+
+config MTD_NAND_S3C2410_DEBUG
+ bool "Samsung S3C NAND controller debug"
+ depends on MTD_NAND_S3C2410
+ help
+ Enable debugging of the S3C NAND driver
+
+config MTD_NAND_S3C2410_CLKSTOP
+ bool "Samsung S3C NAND IDLE clock stop"
+ depends on MTD_NAND_S3C2410
+ default n
+ help
+ Stop the clock to the NAND controller when there is no chip
+ selected to save power. This will mean there is a small delay
+ when the is NAND chip selected or released, but will save
+ approximately 5mA of power when there is nothing happening.
+
+config MTD_NAND_SHARPSL
+ tristate "Sharp SL Series (C7xx + others) NAND controller"
+ depends on ARCH_PXA || COMPILE_TEST
+ depends on HAS_IOMEM
+
+config MTD_NAND_CAFE
+ tristate "OLPC CAFÉ NAND controller"
+ depends on PCI
+ select REED_SOLOMON
+ select REED_SOLOMON_DEC16
+ help
+ Use NAND flash attached to the CAFÉ chip designed for the OLPC
+ laptop.
+
+config MTD_NAND_CS553X
+ tristate "CS5535/CS5536 (AMD Geode companion) NAND controller"
+ depends on X86_32
+ depends on !UML && HAS_IOMEM
+ help
+ The CS553x companion chips for the AMD Geode processor
+ include NAND flash controllers with built-in hardware ECC
+ capabilities; enabling this option will allow you to use
+ these. The driver will check the MSRs to verify that the
+ controller is enabled for NAND, and currently requires that
+ the controller be in MMIO mode.
+
+ If you say "m", the module will be called cs553x_nand.
+
+config MTD_NAND_ATMEL
+ tristate "Atmel AT91 NAND Flash/SmartMedia NAND controller"
+ depends on ARCH_AT91 || COMPILE_TEST
+ depends on HAS_IOMEM
+ select GENERIC_ALLOCATOR
+ select MFD_ATMEL_SMC
+ help
+ Enables support for NAND Flash / Smart Media Card interface
+ on Atmel AT91 processors.
+
+config MTD_NAND_ORION
+ tristate "Marvell Orion NAND controller"
+ depends on PLAT_ORION
+ help
+ This enables the NAND flash controller on Orion machines.
+
+ No board specific support is done by this driver, each board
+ must advertise a platform_device for the driver to attach.
+
+config MTD_NAND_MARVELL
+ tristate "Marvell EBU NAND controller"
+ depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \
+ COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This enables the NAND flash controller driver for Marvell boards,
+ including:
+ - PXA3xx processors (NFCv1)
+ - 32-bit Armada platforms (XP, 37x, 38x, 39x) (NFCv2)
+ - 64-bit Aramda platforms (7k, 8k, ac5) (NFCv2)
+
+config MTD_NAND_SLC_LPC32XX
+ tristate "NXP LPC32xx SLC NAND controller"
+ depends on ARCH_LPC32XX || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Enables support for NXP's LPC32XX SLC (i.e. for Single Level Cell
+ chips) NAND controller. This is the default for the PHYTEC 3250
+ reference board which contains a NAND256R3A2CZA6 chip.
+
+ Please check the actual NAND chip connected and its support
+ by the SLC NAND controller.
+
+config MTD_NAND_MLC_LPC32XX
+ tristate "NXP LPC32xx MLC NAND controller"
+ depends on ARCH_LPC32XX || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Uses the LPC32XX MLC (i.e. for Multi Level Cell chips) NAND
+ controller. This is the default for the WORK92105 controller
+ board.
+
+ Please check the actual NAND chip connected and its support
+ by the MLC NAND controller.
+
+config MTD_NAND_PASEMI
+ tristate "PA Semi PWRficient NAND controller"
+ depends on PPC_PASEMI
+ help
+ Enables support for NAND Flash interface on PA Semi PWRficient
+ based boards
+
+source "drivers/mtd/nand/raw/brcmnand/Kconfig"
+
+config MTD_NAND_BCM47XXNFLASH
+ tristate "BCM4706 BCMA NAND controller"
+ depends on BCMA_NFLASH
+ depends on BCMA
+ help
+ BCMA bus can have various flash memories attached, they are
+ registered by bcma as platform devices. This enables driver for
+ NAND flash memories. For now only BCM4706 is supported.
+
+config MTD_NAND_MPC5121_NFC
+ tristate "MPC5121 NAND controller"
+ depends on PPC_MPC512x
+ help
+ This enables the driver for the NAND flash controller on the
+ MPC5121 SoC.
+
+config MTD_NAND_GPMI_NAND
+ tristate "Freescale GPMI NAND controller"
+ depends on MXS_DMA
+ help
+ Enables NAND Flash support for IMX23, IMX28 or IMX6.
+ The GPMI controller is very powerful, with the help of BCH
+ module, it can do the hardware ECC. The GPMI supports several
+ NAND flashs at the same time.
+
+config MTD_NAND_FSL_ELBC
+ tristate "Freescale eLBC NAND controller"
+ depends on FSL_SOC
+ select FSL_LBC
+ help
+ Various Freescale chips, including the 8313, include a NAND Flash
+ Controller Module with built-in hardware ECC capabilities.
+ Enabling this option will enable you to use this to control
+ external NAND devices.
+
+config MTD_NAND_FSL_IFC
+ tristate "Freescale IFC NAND controller"
+ depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A || COMPILE_TEST
+ depends on HAS_IOMEM
+ select FSL_IFC
+ select MEMORY
+ help
+ Various Freescale chips e.g P1010, include a NAND Flash machine
+ with built-in hardware ECC capabilities.
+ Enabling this option will enable you to use this to control
+ external NAND devices.
+
+config MTD_NAND_FSL_UPM
+ tristate "Freescale UPM NAND controller"
+ depends on PPC_83xx || PPC_85xx
+ select FSL_LBC
+ help
+ Enables support for NAND Flash chips wired onto Freescale PowerPC
+ processor localbus with User-Programmable Machine support.
+
+config MTD_NAND_VF610_NFC
+ tristate "Freescale VF610/MPC5125 NAND controller"
+ depends on (SOC_VF610 || COMPILE_TEST)
+ depends on HAS_IOMEM
+ help
+ Enables support for NAND Flash Controller on some Freescale
+ processors like the VF610, MPC5125, MCF54418 or Kinetis K70.
+ The driver supports a maximum 2k page size. With 2k pages and
+ 64 bytes or more of OOB, hardware ECC with up to 32-bit error
+ correction is supported. Hardware ECC is only enabled through
+ device tree.
+
+config MTD_NAND_MXC
+ tristate "Freescale MXC NAND controller"
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on HAS_IOMEM && OF
+ help
+ This enables the driver for the NAND flash controller on the
+ MXC processors.
+
+config MTD_NAND_SH_FLCTL
+ tristate "Renesas SuperH FLCTL NAND controller"
+ depends on SUPERH || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Several Renesas SuperH CPU has FLCTL. This option enables support
+ for NAND Flash using FLCTL.
+
+config MTD_NAND_DAVINCI
+ tristate "DaVinci/Keystone NAND controller"
+ depends on ARCH_DAVINCI || (ARCH_KEYSTONE && TI_AEMIF) || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Enable the driver for NAND flash chips on Texas Instruments
+ DaVinci/Keystone processors.
+
+config MTD_NAND_TXX9NDFMC
+ tristate "TXx9 NAND controller"
+ depends on SOC_TX4938 || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This enables the NAND flash controller on the TXx9 SoCs.
+
+config MTD_NAND_SOCRATES
+ tristate "Socrates NAND controller"
+ depends on SOCRATES
+ help
+ Enables support for NAND Flash chips wired onto Socrates board.
+
+source "drivers/mtd/nand/raw/ingenic/Kconfig"
+
+config MTD_NAND_FSMC
+ tristate "ST Micros FSMC NAND controller"
+ depends on OF && HAS_IOMEM
+ depends on PLAT_SPEAR || ARCH_NOMADIK || ARCH_U8500 || COMPILE_TEST
+ help
+ Enables support for NAND Flash chips on the ST Microelectronics
+ Flexible Static Memory Controller (FSMC)
+
+config MTD_NAND_XWAY
+ bool "Lantiq XWAY NAND controller"
+ depends on LANTIQ && SOC_TYPE_XWAY
+ help
+ Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
+ to the External Bus Unit (EBU).
+
+config MTD_NAND_SUNXI
+ tristate "Allwinner NAND controller"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Enables support for NAND Flash chips on Allwinner SoCs.
+
+config MTD_NAND_HISI504
+ tristate "Hisilicon Hip04 NAND controller"
+ depends on ARCH_HISI || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Enables support for NAND controller on Hisilicon SoC Hip04.
+
+config MTD_NAND_QCOM
+ tristate "QCOM NAND controller"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Enables support for NAND flash chips on SoCs containing the EBI2 NAND
+ controller. This controller is found on IPQ806x SoC.
+
+config MTD_NAND_MTK
+ tristate "MTK NAND controller"
+ depends on MTD_NAND_ECC_MEDIATEK
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Enables support for NAND controller on MTK SoCs.
+ This controller is found on mt27xx, mt81xx, mt65xx SoCs.
+
+config MTD_NAND_MXIC
+ tristate "Macronix raw NAND controller"
+ depends on HAS_IOMEM || COMPILE_TEST
+ help
+ This selects the Macronix raw NAND controller driver.
+
+config MTD_NAND_TEGRA
+ tristate "NVIDIA Tegra NAND controller"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Enables support for NAND flash controller on NVIDIA Tegra SoC.
+ The driver has been developed and tested on a Tegra 2 SoC. DMA
+ support, raw read/write page as well as HW ECC read/write page
+ is supported. Extra OOB bytes when using HW ECC are currently
+ not supported.
+
+config MTD_NAND_STM32_FMC2
+ tristate "Support for NAND controller on STM32MP SoCs"
+ depends on ARCH_STM32 || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ Enables support for NAND Flash chips on SoCs containing the FMC2
+ NAND controller. This controller is found on STM32MP SoCs.
+ The controller supports a maximum 8k page size and supports
+ a maximum 8-bit correction error per sector of 512 bytes.
+
+config MTD_NAND_MESON
+ tristate "Support for NAND controller on Amlogic's Meson SoCs"
+ depends on COMMON_CLK && (ARCH_MESON || COMPILE_TEST)
+ select MFD_SYSCON
+ help
+ Enables support for NAND controller on Amlogic's Meson SoCs.
+ This controller is found on Meson SoCs.
+
+config MTD_NAND_GPIO
+ tristate "GPIO assisted NAND controller"
+ depends on GPIOLIB || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This enables a NAND flash driver where control signals are
+ connected to GPIO pins, and commands and data are communicated
+ via a memory mapped interface.
+
+config MTD_NAND_PLATFORM
+ tristate "Generic NAND controller"
+ depends on HAS_IOMEM
+ help
+ This implements a generic NAND driver for on-SOC platform
+ devices. You will need to provide platform-specific functions
+ via platform_data.
+
+config MTD_NAND_CADENCE
+ tristate "Support Cadence NAND (HPNFC) controller"
+ depends on OF && HAS_IOMEM
+ help
+ Enable the driver for NAND flash on platforms using a Cadence NAND
+ controller.
+
+config MTD_NAND_ARASAN
+ tristate "Support for Arasan NAND flash controller"
+ depends on HAS_IOMEM && HAS_DMA
+ select BCH
+ help
+ Enables the driver for the Arasan NAND flash controller on
+ Zynq Ultrascale+ MPSoC.
+
+config MTD_NAND_INTEL_LGM
+ tristate "Support for NAND controller on Intel LGM SoC"
+ depends on OF
+ depends on HAS_IOMEM
+ help
+ Enables support for NAND Flash chips on Intel's LGM SoC.
+ NAND flash controller interfaced through the External Bus Unit.
+
+config MTD_NAND_ROCKCHIP
+ tristate "Rockchip NAND controller"
+ depends on ARCH_ROCKCHIP && HAS_IOMEM
+ help
+ Enables support for NAND controller on Rockchip SoCs.
+ There are four different versions of NAND FLASH Controllers,
+ including:
+ NFC v600: RK2928, RK3066, RK3188
+ NFC v622: RK3036, RK3128
+ NFC v800: RK3308, RV1108
+ NFC v900: PX30, RK3326
+
+config MTD_NAND_PL35X
+ tristate "ARM PL35X NAND controller"
+ depends on OF
+ depends on PL353_SMC
+ help
+ Enables support for PrimeCell SMC PL351 and PL353 NAND
+ controller found on Zynq7000.
+
+config MTD_NAND_RENESAS
+ tristate "Renesas R-Car Gen3 & RZ/N1 NAND controller"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ help
+ Enables support for the NAND controller found on Renesas R-Car
+ Gen3 and RZ/N1 SoC families.
+
+comment "Misc"
+
+config MTD_SM_COMMON
+ tristate
+ default n
+
+config MTD_NAND_NANDSIM
+ tristate "Support for NAND Flash Simulator"
+ help
+ The simulator may simulate various NAND flash chips for the
+ MTD nand layer.
+
+config MTD_NAND_RICOH
+ tristate "Ricoh xD card reader"
+ default n
+ depends on PCI
+ select MTD_SM_COMMON
+ help
+ Enable support for Ricoh R5C852 xD card reader
+ You also need to enable either
+ NAND SSFDC (SmartMedia) read only translation layer' or new
+ experimental, readwrite
+ 'SmartMedia/xD new translation layer'
+
+config MTD_NAND_DISKONCHIP
+ tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation)"
+ depends on HAS_IOMEM
+ select REED_SOLOMON
+ select REED_SOLOMON_DEC16
+ help
+ This is a reimplementation of M-Systems DiskOnChip 2000,
+ Millennium and Millennium Plus as a standard NAND device driver,
+ as opposed to the earlier self-contained MTD device drivers.
+ This should enable, among other things, proper JFFS2 operation on
+ these devices.
+
+config MTD_NAND_DISKONCHIP_PROBE_ADVANCED
+ bool "Advanced detection options for DiskOnChip"
+ depends on MTD_NAND_DISKONCHIP
+ help
+ This option allows you to specify nonstandard address at which to
+ probe for a DiskOnChip, or to change the detection options. You
+ are unlikely to need any of this unless you are using LinuxBIOS.
+ Say 'N'.
+
+config MTD_NAND_DISKONCHIP_PROBE_ADDRESS
+ hex "Physical address of DiskOnChip" if MTD_NAND_DISKONCHIP_PROBE_ADVANCED
+ depends on MTD_NAND_DISKONCHIP
+ default "0"
+ help
+ By default, the probe for DiskOnChip devices will look for a
+ DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
+ This option allows you to specify a single address at which to probe
+ for the device, which is useful if you have other devices in that
+ range which get upset when they are probed.
+
+ (Note that on PowerPC, the normal probe will only check at
+ 0xE4000000.)
+
+ Normally, you should leave this set to zero, to allow the probe at
+ the normal addresses.
+
+config MTD_NAND_DISKONCHIP_PROBE_HIGH
+ bool "Probe high addresses"
+ depends on MTD_NAND_DISKONCHIP_PROBE_ADVANCED
+ help
+ By default, the probe for DiskOnChip devices will look for a
+ DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
+ This option changes to make it probe between 0xFFFC8000 and
+ 0xFFFEE000. Unless you are using LinuxBIOS, this is unlikely to be
+ useful to you. Say 'N'.
+
+config MTD_NAND_DISKONCHIP_BBTWRITE
+ bool "Allow BBT writes on DiskOnChip Millennium and 2000TSOP"
+ depends on MTD_NAND_DISKONCHIP
+ help
+ On DiskOnChip devices shipped with the INFTL filesystem (Millennium
+ and 2000 TSOP/Alon), Linux reserves some space at the end of the
+ device for the Bad Block Table (BBT). If you have existing INFTL
+ data on your device (created by non-Linux tools such as M-Systems'
+ DOS drivers), your data might overlap the area Linux wants to use for
+ the BBT. If this is a concern for you, leave this option disabled and
+ Linux will not write BBT data into this area.
+ The downside of leaving this option disabled is that if bad blocks
+ are detected by Linux, they will not be recorded in the BBT, which
+ could cause future problems.
+ Once you enable this option, new filesystems (INFTL or others, created
+ in Linux or other operating systems) will not use the reserved area.
+ The only reason not to enable this option is to prevent damage to
+ preexisting filesystems.
+ Even if you leave this disabled, you can enable BBT writes at module
+ load time (assuming you build diskonchip as a module) with the module
+ parameter "inftl_bbt_write=1".
+
+endif # MTD_RAW_NAND
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
new file mode 100644
index 0000000000..25120a4afa
--- /dev/null
+++ b/drivers/mtd/nand/raw/Makefile
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_MTD_RAW_NAND) += nand.o
+obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o
+
+obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o
+obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o
+obj-$(CONFIG_MTD_NAND_DENALI) += denali.o
+obj-$(CONFIG_MTD_NAND_DENALI_PCI) += denali_pci.o
+obj-$(CONFIG_MTD_NAND_DENALI_DT) += denali_dt.o
+obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
+obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o
+obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o
+obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
+obj-$(CONFIG_MTD_NAND_FSMC) += fsmc_nand.o
+obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o
+obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o
+obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
+obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
+obj-$(CONFIG_MTD_NAND_ATMEL) += atmel/
+obj-$(CONFIG_MTD_NAND_GPIO) += gpio.o
+omap2_nand-objs := omap2.o
+obj-$(CONFIG_MTD_NAND_OMAP2) += omap2_nand.o
+obj-$(CONFIG_MTD_NAND_OMAP_BCH_BUILD) += omap_elm.o
+obj-$(CONFIG_MTD_NAND_MARVELL) += marvell_nand.o
+obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
+obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
+obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o
+obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o
+obj-$(CONFIG_MTD_NAND_FSL_IFC) += fsl_ifc_nand.o
+obj-$(CONFIG_MTD_NAND_FSL_UPM) += fsl_upm.o
+obj-$(CONFIG_MTD_NAND_SLC_LPC32XX) += lpc32xx_slc.o
+obj-$(CONFIG_MTD_NAND_MLC_LPC32XX) += lpc32xx_mlc.o
+obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o
+obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o
+obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
+obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
+obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
+obj-$(CONFIG_MTD_NAND_VF610_NFC) += vf610_nfc.o
+obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
+obj-y += ingenic/
+obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
+obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
+obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
+obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o
+obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
+obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
+obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o
+obj-$(CONFIG_MTD_NAND_MXIC) += mxic_nand.o
+obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o
+obj-$(CONFIG_MTD_NAND_STM32_FMC2) += stm32_fmc2_nand.o
+obj-$(CONFIG_MTD_NAND_MESON) += meson_nand.o
+obj-$(CONFIG_MTD_NAND_CADENCE) += cadence-nand-controller.o
+obj-$(CONFIG_MTD_NAND_ARASAN) += arasan-nand-controller.o
+obj-$(CONFIG_MTD_NAND_INTEL_LGM) += intel-nand-controller.o
+obj-$(CONFIG_MTD_NAND_ROCKCHIP) += rockchip-nand-controller.o
+obj-$(CONFIG_MTD_NAND_PL35X) += pl35x-nand-controller.o
+obj-$(CONFIG_MTD_NAND_RENESAS) += renesas-nand-controller.o
+
+nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
+nand-objs += nand_onfi.o
+nand-objs += nand_jedec.o
+nand-objs += nand_amd.o
+nand-objs += nand_esmt.o
+nand-objs += nand_hynix.o
+nand-objs += nand_macronix.o
+nand-objs += nand_micron.o
+nand-objs += nand_sandisk.o
+nand-objs += nand_samsung.o
+nand-objs += nand_toshiba.o
diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c
new file mode 100644
index 0000000000..919816a7ac
--- /dev/null
+++ b/drivers/mtd/nand/raw/ams-delta.c
@@ -0,0 +1,447 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2006 Jonathan McDowell <noodles@earth.li>
+ *
+ * Derived from drivers/mtd/nand/toto.c (removed in v2.6.28)
+ * Copyright (c) 2003 Texas Instruments
+ * Copyright (c) 2002 Thomas Gleixner <tgxl@linutronix.de>
+ *
+ * Converted to platform driver by Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
+ * Partially stolen from plat_nand.c
+ *
+ * Overview:
+ * This is a device driver for the NAND flash device found on the
+ * Amstrad E3 (Delta).
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-gpio.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+
+/*
+ * MTD structure for E3 (Delta)
+ */
+struct gpio_nand {
+ struct nand_controller base;
+ struct nand_chip nand_chip;
+ struct gpio_desc *gpiod_rdy;
+ struct gpio_desc *gpiod_nce;
+ struct gpio_desc *gpiod_nre;
+ struct gpio_desc *gpiod_nwp;
+ struct gpio_desc *gpiod_nwe;
+ struct gpio_desc *gpiod_ale;
+ struct gpio_desc *gpiod_cle;
+ struct gpio_descs *data_gpiods;
+ bool data_in;
+ unsigned int tRP;
+ unsigned int tWP;
+ u8 (*io_read)(struct gpio_nand *this);
+ void (*io_write)(struct gpio_nand *this, u8 byte);
+};
+
+static void gpio_nand_write_commit(struct gpio_nand *priv)
+{
+ gpiod_set_value(priv->gpiod_nwe, 1);
+ ndelay(priv->tWP);
+ gpiod_set_value(priv->gpiod_nwe, 0);
+}
+
+static void gpio_nand_io_write(struct gpio_nand *priv, u8 byte)
+{
+ struct gpio_descs *data_gpiods = priv->data_gpiods;
+ DECLARE_BITMAP(values, BITS_PER_TYPE(byte)) = { byte, };
+
+ gpiod_set_raw_array_value(data_gpiods->ndescs, data_gpiods->desc,
+ data_gpiods->info, values);
+
+ gpio_nand_write_commit(priv);
+}
+
+static void gpio_nand_dir_output(struct gpio_nand *priv, u8 byte)
+{
+ struct gpio_descs *data_gpiods = priv->data_gpiods;
+ DECLARE_BITMAP(values, BITS_PER_TYPE(byte)) = { byte, };
+ int i;
+
+ for (i = 0; i < data_gpiods->ndescs; i++)
+ gpiod_direction_output_raw(data_gpiods->desc[i],
+ test_bit(i, values));
+
+ gpio_nand_write_commit(priv);
+
+ priv->data_in = false;
+}
+
+static u8 gpio_nand_io_read(struct gpio_nand *priv)
+{
+ u8 res;
+ struct gpio_descs *data_gpiods = priv->data_gpiods;
+ DECLARE_BITMAP(values, BITS_PER_TYPE(res)) = { 0, };
+
+ gpiod_set_value(priv->gpiod_nre, 1);
+ ndelay(priv->tRP);
+
+ gpiod_get_raw_array_value(data_gpiods->ndescs, data_gpiods->desc,
+ data_gpiods->info, values);
+
+ gpiod_set_value(priv->gpiod_nre, 0);
+
+ res = values[0];
+ return res;
+}
+
+static void gpio_nand_dir_input(struct gpio_nand *priv)
+{
+ struct gpio_descs *data_gpiods = priv->data_gpiods;
+ int i;
+
+ for (i = 0; i < data_gpiods->ndescs; i++)
+ gpiod_direction_input(data_gpiods->desc[i]);
+
+ priv->data_in = true;
+}
+
+static void gpio_nand_write_buf(struct gpio_nand *priv, const u8 *buf, int len)
+{
+ int i = 0;
+
+ if (len > 0 && priv->data_in)
+ gpio_nand_dir_output(priv, buf[i++]);
+
+ while (i < len)
+ priv->io_write(priv, buf[i++]);
+}
+
+static void gpio_nand_read_buf(struct gpio_nand *priv, u8 *buf, int len)
+{
+ int i;
+
+ if (priv->data_gpiods && !priv->data_in)
+ gpio_nand_dir_input(priv);
+
+ for (i = 0; i < len; i++)
+ buf[i] = priv->io_read(priv);
+}
+
+static void gpio_nand_ctrl_cs(struct gpio_nand *priv, bool assert)
+{
+ gpiod_set_value(priv->gpiod_nce, assert);
+}
+
+static int gpio_nand_exec_op(struct nand_chip *this,
+ const struct nand_operation *op, bool check_only)
+{
+ struct gpio_nand *priv = nand_get_controller_data(this);
+ const struct nand_op_instr *instr;
+ int ret = 0;
+
+ if (check_only)
+ return 0;
+
+ gpio_nand_ctrl_cs(priv, 1);
+
+ for (instr = op->instrs; instr < op->instrs + op->ninstrs; instr++) {
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ gpiod_set_value(priv->gpiod_cle, 1);
+ gpio_nand_write_buf(priv, &instr->ctx.cmd.opcode, 1);
+ gpiod_set_value(priv->gpiod_cle, 0);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ gpiod_set_value(priv->gpiod_ale, 1);
+ gpio_nand_write_buf(priv, instr->ctx.addr.addrs,
+ instr->ctx.addr.naddrs);
+ gpiod_set_value(priv->gpiod_ale, 0);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ gpio_nand_read_buf(priv, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ gpio_nand_write_buf(priv, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ ret = priv->gpiod_rdy ?
+ nand_gpio_waitrdy(this, priv->gpiod_rdy,
+ instr->ctx.waitrdy.timeout_ms) :
+ nand_soft_waitrdy(this,
+ instr->ctx.waitrdy.timeout_ms);
+ break;
+ }
+
+ if (ret)
+ break;
+ }
+
+ gpio_nand_ctrl_cs(priv, 0);
+
+ return ret;
+}
+
+static int gpio_nand_setup_interface(struct nand_chip *this, int csline,
+ const struct nand_interface_config *cf)
+{
+ struct gpio_nand *priv = nand_get_controller_data(this);
+ const struct nand_sdr_timings *sdr = nand_get_sdr_timings(cf);
+ struct device *dev = &nand_to_mtd(this)->dev;
+
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ if (priv->gpiod_nre) {
+ priv->tRP = DIV_ROUND_UP(sdr->tRP_min, 1000);
+ dev_dbg(dev, "using %u ns read pulse width\n", priv->tRP);
+ }
+
+ priv->tWP = DIV_ROUND_UP(sdr->tWP_min, 1000);
+ dev_dbg(dev, "using %u ns write pulse width\n", priv->tWP);
+
+ return 0;
+}
+
+static int gpio_nand_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+ chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ return 0;
+}
+
+static const struct nand_controller_ops gpio_nand_ops = {
+ .exec_op = gpio_nand_exec_op,
+ .attach_chip = gpio_nand_attach_chip,
+ .setup_interface = gpio_nand_setup_interface,
+};
+
+/*
+ * Main initialization routine
+ */
+static int gpio_nand_probe(struct platform_device *pdev)
+{
+ struct gpio_nand_platdata *pdata = dev_get_platdata(&pdev->dev);
+ const struct mtd_partition *partitions = NULL;
+ int num_partitions = 0;
+ struct gpio_nand *priv;
+ struct nand_chip *this;
+ struct mtd_info *mtd;
+ int (*probe)(struct platform_device *pdev, struct gpio_nand *priv);
+ int err = 0;
+
+ if (pdata) {
+ partitions = pdata->parts;
+ num_partitions = pdata->num_parts;
+ }
+
+ /* Allocate memory for MTD device structure and private data */
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct gpio_nand),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ this = &priv->nand_chip;
+
+ mtd = nand_to_mtd(this);
+ mtd->dev.parent = &pdev->dev;
+
+ nand_set_controller_data(this, priv);
+ nand_set_flash_node(this, pdev->dev.of_node);
+
+ priv->gpiod_rdy = devm_gpiod_get_optional(&pdev->dev, "rdy", GPIOD_IN);
+ if (IS_ERR(priv->gpiod_rdy)) {
+ err = PTR_ERR(priv->gpiod_rdy);
+ dev_warn(&pdev->dev, "RDY GPIO request failed (%d)\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ /* Set chip enabled but write protected */
+ priv->gpiod_nwp = devm_gpiod_get_optional(&pdev->dev, "nwp",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->gpiod_nwp)) {
+ err = PTR_ERR(priv->gpiod_nwp);
+ dev_err(&pdev->dev, "NWP GPIO request failed (%d)\n", err);
+ return err;
+ }
+
+ priv->gpiod_nce = devm_gpiod_get_optional(&pdev->dev, "nce",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(priv->gpiod_nce)) {
+ err = PTR_ERR(priv->gpiod_nce);
+ dev_err(&pdev->dev, "NCE GPIO request failed (%d)\n", err);
+ return err;
+ }
+
+ priv->gpiod_nre = devm_gpiod_get_optional(&pdev->dev, "nre",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(priv->gpiod_nre)) {
+ err = PTR_ERR(priv->gpiod_nre);
+ dev_err(&pdev->dev, "NRE GPIO request failed (%d)\n", err);
+ return err;
+ }
+
+ priv->gpiod_nwe = devm_gpiod_get_optional(&pdev->dev, "nwe",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(priv->gpiod_nwe)) {
+ err = PTR_ERR(priv->gpiod_nwe);
+ dev_err(&pdev->dev, "NWE GPIO request failed (%d)\n", err);
+ return err;
+ }
+
+ priv->gpiod_ale = devm_gpiod_get(&pdev->dev, "ale", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->gpiod_ale)) {
+ err = PTR_ERR(priv->gpiod_ale);
+ dev_err(&pdev->dev, "ALE GPIO request failed (%d)\n", err);
+ return err;
+ }
+
+ priv->gpiod_cle = devm_gpiod_get(&pdev->dev, "cle", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->gpiod_cle)) {
+ err = PTR_ERR(priv->gpiod_cle);
+ dev_err(&pdev->dev, "CLE GPIO request failed (%d)\n", err);
+ return err;
+ }
+
+ /* Request array of data pins, initialize them as input */
+ priv->data_gpiods = devm_gpiod_get_array_optional(&pdev->dev, "data",
+ GPIOD_IN);
+ if (IS_ERR(priv->data_gpiods)) {
+ err = PTR_ERR(priv->data_gpiods);
+ dev_err(&pdev->dev, "data GPIO request failed: %d\n", err);
+ return err;
+ }
+ if (priv->data_gpiods) {
+ if (!priv->gpiod_nwe) {
+ dev_err(&pdev->dev,
+ "mandatory NWE pin not provided by platform\n");
+ return -ENODEV;
+ }
+
+ priv->io_read = gpio_nand_io_read;
+ priv->io_write = gpio_nand_io_write;
+ priv->data_in = true;
+ }
+
+ if (pdev->id_entry)
+ probe = (void *) pdev->id_entry->driver_data;
+ else
+ probe = of_device_get_match_data(&pdev->dev);
+ if (probe)
+ err = probe(pdev, priv);
+ if (err)
+ return err;
+
+ if (!priv->io_read || !priv->io_write) {
+ dev_err(&pdev->dev, "incomplete device configuration\n");
+ return -ENODEV;
+ }
+
+ /* Initialize the NAND controller object embedded in gpio_nand. */
+ priv->base.ops = &gpio_nand_ops;
+ nand_controller_init(&priv->base);
+ this->controller = &priv->base;
+
+ /*
+ * FIXME: We should release write protection only after nand_scan() to
+ * be on the safe side but we can't do that until we have a generic way
+ * to assert/deassert WP from the core. Even if the core shouldn't
+ * write things in the nand_scan() path, it should have control on this
+ * pin just in case we ever need to disable write protection during
+ * chip detection/initialization.
+ */
+ /* Release write protection */
+ gpiod_set_value(priv->gpiod_nwp, 0);
+
+ /*
+ * This driver assumes that the default ECC engine should be TYPE_SOFT.
+ * Set ->engine_type before registering the NAND devices in order to
+ * provide a driver specific default value.
+ */
+ this->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
+ /* Scan to find existence of the device */
+ err = nand_scan(this, 1);
+ if (err)
+ return err;
+
+ /* Register the partitions */
+ err = mtd_device_register(mtd, partitions, num_partitions);
+ if (err)
+ goto err_nand_cleanup;
+
+ return 0;
+
+err_nand_cleanup:
+ nand_cleanup(this);
+
+ return err;
+}
+
+/*
+ * Clean up routine
+ */
+static void gpio_nand_remove(struct platform_device *pdev)
+{
+ struct gpio_nand *priv = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = nand_to_mtd(&priv->nand_chip);
+ int ret;
+
+ /* Apply write protection */
+ gpiod_set_value(priv->gpiod_nwp, 1);
+
+ /* Unregister device */
+ ret = mtd_device_unregister(mtd);
+ WARN_ON(ret);
+ nand_cleanup(mtd_to_nand(mtd));
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id gpio_nand_of_id_table[] = {
+ {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, gpio_nand_of_id_table);
+#endif
+
+static const struct platform_device_id gpio_nand_plat_id_table[] = {
+ {
+ .name = "ams-delta-nand",
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(platform, gpio_nand_plat_id_table);
+
+static struct platform_driver gpio_nand_driver = {
+ .probe = gpio_nand_probe,
+ .remove_new = gpio_nand_remove,
+ .id_table = gpio_nand_plat_id_table,
+ .driver = {
+ .name = "ams-delta-nand",
+ .of_match_table = of_match_ptr(gpio_nand_of_id_table),
+ },
+};
+
+module_platform_driver(gpio_nand_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
+MODULE_DESCRIPTION("Glue layer for NAND flash on Amstrad E3 (Delta)");
diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
new file mode 100644
index 0000000000..a492051c46
--- /dev/null
+++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
@@ -0,0 +1,1512 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Arasan NAND Flash Controller Driver
+ *
+ * Copyright (C) 2014 - 2020 Xilinx, Inc.
+ * Author:
+ * Miquel Raynal <miquel.raynal@bootlin.com>
+ * Original work (fully rewritten):
+ * Punnaiah Choudary Kalluri <punnaia@xilinx.com>
+ * Naga Sureshkumar Relli <nagasure@xilinx.com>
+ */
+
+#include <linux/bch.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define PKT_REG 0x00
+#define PKT_SIZE(x) FIELD_PREP(GENMASK(10, 0), (x))
+#define PKT_STEPS(x) FIELD_PREP(GENMASK(23, 12), (x))
+
+#define MEM_ADDR1_REG 0x04
+
+#define MEM_ADDR2_REG 0x08
+#define ADDR2_STRENGTH(x) FIELD_PREP(GENMASK(27, 25), (x))
+#define ADDR2_CS(x) FIELD_PREP(GENMASK(31, 30), (x))
+
+#define CMD_REG 0x0C
+#define CMD_1(x) FIELD_PREP(GENMASK(7, 0), (x))
+#define CMD_2(x) FIELD_PREP(GENMASK(15, 8), (x))
+#define CMD_PAGE_SIZE(x) FIELD_PREP(GENMASK(25, 23), (x))
+#define CMD_DMA_ENABLE BIT(27)
+#define CMD_NADDRS(x) FIELD_PREP(GENMASK(30, 28), (x))
+#define CMD_ECC_ENABLE BIT(31)
+
+#define PROG_REG 0x10
+#define PROG_PGRD BIT(0)
+#define PROG_ERASE BIT(2)
+#define PROG_STATUS BIT(3)
+#define PROG_PGPROG BIT(4)
+#define PROG_RDID BIT(6)
+#define PROG_RDPARAM BIT(7)
+#define PROG_RST BIT(8)
+#define PROG_GET_FEATURE BIT(9)
+#define PROG_SET_FEATURE BIT(10)
+#define PROG_CHG_RD_COL_ENH BIT(14)
+
+#define INTR_STS_EN_REG 0x14
+#define INTR_SIG_EN_REG 0x18
+#define INTR_STS_REG 0x1C
+#define WRITE_READY BIT(0)
+#define READ_READY BIT(1)
+#define XFER_COMPLETE BIT(2)
+#define DMA_BOUNDARY BIT(6)
+#define EVENT_MASK GENMASK(7, 0)
+
+#define READY_STS_REG 0x20
+
+#define DMA_ADDR0_REG 0x50
+#define DMA_ADDR1_REG 0x24
+
+#define FLASH_STS_REG 0x28
+
+#define TIMING_REG 0x2C
+#define TCCS_TIME_500NS 0
+#define TCCS_TIME_300NS 3
+#define TCCS_TIME_200NS 2
+#define TCCS_TIME_100NS 1
+#define FAST_TCAD BIT(2)
+#define DQS_BUFF_SEL_IN(x) FIELD_PREP(GENMASK(6, 3), (x))
+#define DQS_BUFF_SEL_OUT(x) FIELD_PREP(GENMASK(18, 15), (x))
+
+#define DATA_PORT_REG 0x30
+
+#define ECC_CONF_REG 0x34
+#define ECC_CONF_COL(x) FIELD_PREP(GENMASK(15, 0), (x))
+#define ECC_CONF_LEN(x) FIELD_PREP(GENMASK(26, 16), (x))
+#define ECC_CONF_BCH_EN BIT(27)
+
+#define ECC_ERR_CNT_REG 0x38
+#define GET_PKT_ERR_CNT(x) FIELD_GET(GENMASK(7, 0), (x))
+#define GET_PAGE_ERR_CNT(x) FIELD_GET(GENMASK(16, 8), (x))
+
+#define ECC_SP_REG 0x3C
+#define ECC_SP_CMD1(x) FIELD_PREP(GENMASK(7, 0), (x))
+#define ECC_SP_CMD2(x) FIELD_PREP(GENMASK(15, 8), (x))
+#define ECC_SP_ADDRS(x) FIELD_PREP(GENMASK(30, 28), (x))
+
+#define ECC_1ERR_CNT_REG 0x40
+#define ECC_2ERR_CNT_REG 0x44
+
+#define DATA_INTERFACE_REG 0x6C
+#define DIFACE_SDR_MODE(x) FIELD_PREP(GENMASK(2, 0), (x))
+#define DIFACE_DDR_MODE(x) FIELD_PREP(GENMASK(5, 3), (x))
+#define DIFACE_SDR 0
+#define DIFACE_NVDDR BIT(9)
+
+#define ANFC_MAX_CS 2
+#define ANFC_DFLT_TIMEOUT_US 1000000
+#define ANFC_MAX_CHUNK_SIZE SZ_1M
+#define ANFC_MAX_PARAM_SIZE SZ_4K
+#define ANFC_MAX_STEPS SZ_2K
+#define ANFC_MAX_PKT_SIZE (SZ_2K - 1)
+#define ANFC_MAX_ADDR_CYC 5U
+#define ANFC_RSVD_ECC_BYTES 21
+
+#define ANFC_XLNX_SDR_DFLT_CORE_CLK 100000000
+#define ANFC_XLNX_SDR_HS_CORE_CLK 80000000
+
+static struct gpio_desc *anfc_default_cs_array[2] = {NULL, NULL};
+
+/**
+ * struct anfc_op - Defines how to execute an operation
+ * @pkt_reg: Packet register
+ * @addr1_reg: Memory address 1 register
+ * @addr2_reg: Memory address 2 register
+ * @cmd_reg: Command register
+ * @prog_reg: Program register
+ * @steps: Number of "packets" to read/write
+ * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin
+ * @len: Data transfer length
+ * @read: Data transfer direction from the controller point of view
+ * @buf: Data buffer
+ */
+struct anfc_op {
+ u32 pkt_reg;
+ u32 addr1_reg;
+ u32 addr2_reg;
+ u32 cmd_reg;
+ u32 prog_reg;
+ int steps;
+ unsigned int rdy_timeout_ms;
+ unsigned int len;
+ bool read;
+ u8 *buf;
+};
+
+/**
+ * struct anand - Defines the NAND chip related information
+ * @node: Used to store NAND chips into a list
+ * @chip: NAND chip information structure
+ * @rb: Ready-busy line
+ * @page_sz: Register value of the page_sz field to use
+ * @clk: Expected clock frequency to use
+ * @data_iface: Data interface timing mode to use
+ * @timings: NV-DDR specific timings to use
+ * @ecc_conf: Hardware ECC configuration value
+ * @strength: Register value of the ECC strength
+ * @raddr_cycles: Row address cycle information
+ * @caddr_cycles: Column address cycle information
+ * @ecc_bits: Exact number of ECC bits per syndrome
+ * @ecc_total: Total number of ECC bytes
+ * @errloc: Array of errors located with soft BCH
+ * @hw_ecc: Buffer to store syndromes computed by hardware
+ * @bch: BCH structure
+ * @cs_idx: Array of chip-select for this device, values are indexes
+ * of the controller structure @gpio_cs array
+ * @ncs_idx: Size of the @cs_idx array
+ */
+struct anand {
+ struct list_head node;
+ struct nand_chip chip;
+ unsigned int rb;
+ unsigned int page_sz;
+ unsigned long clk;
+ u32 data_iface;
+ u32 timings;
+ u32 ecc_conf;
+ u32 strength;
+ u16 raddr_cycles;
+ u16 caddr_cycles;
+ unsigned int ecc_bits;
+ unsigned int ecc_total;
+ unsigned int *errloc;
+ u8 *hw_ecc;
+ struct bch_control *bch;
+ int *cs_idx;
+ int ncs_idx;
+};
+
+/**
+ * struct arasan_nfc - Defines the Arasan NAND flash controller driver instance
+ * @dev: Pointer to the device structure
+ * @base: Remapped register area
+ * @controller_clk: Pointer to the system clock
+ * @bus_clk: Pointer to the flash clock
+ * @controller: Base controller structure
+ * @chips: List of all NAND chips attached to the controller
+ * @cur_clk: Current clock rate
+ * @cs_array: CS array. Native CS are left empty, the other cells are
+ * populated with their corresponding GPIO descriptor.
+ * @ncs: Size of @cs_array
+ * @cur_cs: Index in @cs_array of the currently in use CS
+ * @native_cs: Currently selected native CS
+ * @spare_cs: Native CS that is not wired (may be selected when a GPIO
+ * CS is in use)
+ */
+struct arasan_nfc {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *controller_clk;
+ struct clk *bus_clk;
+ struct nand_controller controller;
+ struct list_head chips;
+ unsigned int cur_clk;
+ struct gpio_desc **cs_array;
+ unsigned int ncs;
+ int cur_cs;
+ unsigned int native_cs;
+ unsigned int spare_cs;
+};
+
+static struct anand *to_anand(struct nand_chip *nand)
+{
+ return container_of(nand, struct anand, chip);
+}
+
+static struct arasan_nfc *to_anfc(struct nand_controller *ctrl)
+{
+ return container_of(ctrl, struct arasan_nfc, controller);
+}
+
+static int anfc_wait_for_event(struct arasan_nfc *nfc, unsigned int event)
+{
+ u32 val;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout(nfc->base + INTR_STS_REG, val,
+ val & event, 0,
+ ANFC_DFLT_TIMEOUT_US);
+ if (ret) {
+ dev_err(nfc->dev, "Timeout waiting for event 0x%x\n", event);
+ return -ETIMEDOUT;
+ }
+
+ writel_relaxed(event, nfc->base + INTR_STS_REG);
+
+ return 0;
+}
+
+static int anfc_wait_for_rb(struct arasan_nfc *nfc, struct nand_chip *chip,
+ unsigned int timeout_ms)
+{
+ struct anand *anand = to_anand(chip);
+ u32 val;
+ int ret;
+
+ /* There is no R/B interrupt, we must poll a register */
+ ret = readl_relaxed_poll_timeout(nfc->base + READY_STS_REG, val,
+ val & BIT(anand->rb),
+ 1, timeout_ms * 1000);
+ if (ret) {
+ dev_err(nfc->dev, "Timeout waiting for R/B 0x%x\n",
+ readl_relaxed(nfc->base + READY_STS_REG));
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void anfc_trigger_op(struct arasan_nfc *nfc, struct anfc_op *nfc_op)
+{
+ writel_relaxed(nfc_op->pkt_reg, nfc->base + PKT_REG);
+ writel_relaxed(nfc_op->addr1_reg, nfc->base + MEM_ADDR1_REG);
+ writel_relaxed(nfc_op->addr2_reg, nfc->base + MEM_ADDR2_REG);
+ writel_relaxed(nfc_op->cmd_reg, nfc->base + CMD_REG);
+ writel_relaxed(nfc_op->prog_reg, nfc->base + PROG_REG);
+}
+
+static int anfc_pkt_len_config(unsigned int len, unsigned int *steps,
+ unsigned int *pktsize)
+{
+ unsigned int nb, sz;
+
+ for (nb = 1; nb < ANFC_MAX_STEPS; nb *= 2) {
+ sz = len / nb;
+ if (sz <= ANFC_MAX_PKT_SIZE)
+ break;
+ }
+
+ if (sz * nb != len)
+ return -ENOTSUPP;
+
+ if (steps)
+ *steps = nb;
+
+ if (pktsize)
+ *pktsize = sz;
+
+ return 0;
+}
+
+static bool anfc_is_gpio_cs(struct arasan_nfc *nfc, int nfc_cs)
+{
+ return nfc_cs >= 0 && nfc->cs_array[nfc_cs];
+}
+
+static int anfc_relative_to_absolute_cs(struct anand *anand, int num)
+{
+ return anand->cs_idx[num];
+}
+
+static void anfc_assert_cs(struct arasan_nfc *nfc, unsigned int nfc_cs_idx)
+{
+ /* CS did not change: do nothing */
+ if (nfc->cur_cs == nfc_cs_idx)
+ return;
+
+ /* Deassert the previous CS if it was a GPIO */
+ if (anfc_is_gpio_cs(nfc, nfc->cur_cs))
+ gpiod_set_value_cansleep(nfc->cs_array[nfc->cur_cs], 1);
+
+ /* Assert the new one */
+ if (anfc_is_gpio_cs(nfc, nfc_cs_idx)) {
+ nfc->native_cs = nfc->spare_cs;
+ gpiod_set_value_cansleep(nfc->cs_array[nfc_cs_idx], 0);
+ } else {
+ nfc->native_cs = nfc_cs_idx;
+ }
+
+ nfc->cur_cs = nfc_cs_idx;
+}
+
+static int anfc_select_target(struct nand_chip *chip, int target)
+{
+ struct anand *anand = to_anand(chip);
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ unsigned int nfc_cs_idx = anfc_relative_to_absolute_cs(anand, target);
+ int ret;
+
+ anfc_assert_cs(nfc, nfc_cs_idx);
+
+ /* Update the controller timings and the potential ECC configuration */
+ writel_relaxed(anand->data_iface, nfc->base + DATA_INTERFACE_REG);
+ writel_relaxed(anand->timings, nfc->base + TIMING_REG);
+
+ /* Update clock frequency */
+ if (nfc->cur_clk != anand->clk) {
+ clk_disable_unprepare(nfc->bus_clk);
+ ret = clk_set_rate(nfc->bus_clk, anand->clk);
+ if (ret) {
+ dev_err(nfc->dev, "Failed to change clock rate\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(nfc->bus_clk);
+ if (ret) {
+ dev_err(nfc->dev,
+ "Failed to re-enable the bus clock\n");
+ return ret;
+ }
+
+ nfc->cur_clk = anand->clk;
+ }
+
+ return 0;
+}
+
+/*
+ * When using the embedded hardware ECC engine, the controller is in charge of
+ * feeding the engine with, first, the ECC residue present in the data array.
+ * A typical read operation is:
+ * 1/ Assert the read operation by sending the relevant command/address cycles
+ * but targeting the column of the first ECC bytes in the OOB area instead of
+ * the main data directly.
+ * 2/ After having read the relevant number of ECC bytes, the controller uses
+ * the RNDOUT/RNDSTART commands which are set into the "ECC Spare Command
+ * Register" to move the pointer back at the beginning of the main data.
+ * 3/ It will read the content of the main area for a given size (pktsize) and
+ * will feed the ECC engine with this buffer again.
+ * 4/ The ECC engine derives the ECC bytes for the given data and compare them
+ * with the ones already received. It eventually trigger status flags and
+ * then set the "Buffer Read Ready" flag.
+ * 5/ The corrected data is then available for reading from the data port
+ * register.
+ *
+ * The hardware BCH ECC engine is known to be inconstent in BCH mode and never
+ * reports uncorrectable errors. Because of this bug, we have to use the
+ * software BCH implementation in the read path.
+ */
+static int anfc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct anand *anand = to_anand(chip);
+ unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0);
+ unsigned int max_bitflips = 0;
+ dma_addr_t dma_addr;
+ int step, ret;
+ struct anfc_op nfc_op = {
+ .pkt_reg =
+ PKT_SIZE(chip->ecc.size) |
+ PKT_STEPS(chip->ecc.steps),
+ .addr1_reg =
+ (page & 0xFF) << (8 * (anand->caddr_cycles)) |
+ (((page >> 8) & 0xFF) << (8 * (1 + anand->caddr_cycles))),
+ .addr2_reg =
+ ((page >> 16) & 0xFF) |
+ ADDR2_STRENGTH(anand->strength) |
+ ADDR2_CS(nfc->native_cs),
+ .cmd_reg =
+ CMD_1(NAND_CMD_READ0) |
+ CMD_2(NAND_CMD_READSTART) |
+ CMD_PAGE_SIZE(anand->page_sz) |
+ CMD_DMA_ENABLE |
+ CMD_NADDRS(anand->caddr_cycles +
+ anand->raddr_cycles),
+ .prog_reg = PROG_PGRD,
+ };
+
+ dma_addr = dma_map_single(nfc->dev, (void *)buf, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(nfc->dev, dma_addr)) {
+ dev_err(nfc->dev, "Buffer mapping error");
+ return -EIO;
+ }
+
+ writel_relaxed(lower_32_bits(dma_addr), nfc->base + DMA_ADDR0_REG);
+ writel_relaxed(upper_32_bits(dma_addr), nfc->base + DMA_ADDR1_REG);
+
+ anfc_trigger_op(nfc, &nfc_op);
+
+ ret = anfc_wait_for_event(nfc, XFER_COMPLETE);
+ dma_unmap_single(nfc->dev, dma_addr, len, DMA_FROM_DEVICE);
+ if (ret) {
+ dev_err(nfc->dev, "Error reading page %d\n", page);
+ return ret;
+ }
+
+ /* Store the raw OOB bytes as well */
+ ret = nand_change_read_column_op(chip, mtd->writesize, chip->oob_poi,
+ mtd->oobsize, 0);
+ if (ret)
+ return ret;
+
+ /*
+ * For each step, compute by softare the BCH syndrome over the raw data.
+ * Compare the theoretical amount of errors and compare with the
+ * hardware engine feedback.
+ */
+ for (step = 0; step < chip->ecc.steps; step++) {
+ u8 *raw_buf = &buf[step * chip->ecc.size];
+ unsigned int bit, byte;
+ int bf, i;
+
+ /* Extract the syndrome, it is not necessarily aligned */
+ memset(anand->hw_ecc, 0, chip->ecc.bytes);
+ nand_extract_bits(anand->hw_ecc, 0,
+ &chip->oob_poi[mtd->oobsize - anand->ecc_total],
+ anand->ecc_bits * step, anand->ecc_bits);
+
+ bf = bch_decode(anand->bch, raw_buf, chip->ecc.size,
+ anand->hw_ecc, NULL, NULL, anand->errloc);
+ if (!bf) {
+ continue;
+ } else if (bf > 0) {
+ for (i = 0; i < bf; i++) {
+ /* Only correct the data, not the syndrome */
+ if (anand->errloc[i] < (chip->ecc.size * 8)) {
+ bit = BIT(anand->errloc[i] & 7);
+ byte = anand->errloc[i] >> 3;
+ raw_buf[byte] ^= bit;
+ }
+ }
+
+ mtd->ecc_stats.corrected += bf;
+ max_bitflips = max_t(unsigned int, max_bitflips, bf);
+
+ continue;
+ }
+
+ bf = nand_check_erased_ecc_chunk(raw_buf, chip->ecc.size,
+ NULL, 0, NULL, 0,
+ chip->ecc.strength);
+ if (bf > 0) {
+ mtd->ecc_stats.corrected += bf;
+ max_bitflips = max_t(unsigned int, max_bitflips, bf);
+ memset(raw_buf, 0xFF, chip->ecc.size);
+ } else if (bf < 0) {
+ mtd->ecc_stats.failed++;
+ }
+ }
+
+ return 0;
+}
+
+static int anfc_sel_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ int ret;
+
+ ret = anfc_select_target(chip, chip->cur_cs);
+ if (ret)
+ return ret;
+
+ return anfc_read_page_hw_ecc(chip, buf, oob_required, page);
+};
+
+static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct anand *anand = to_anand(chip);
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0);
+ dma_addr_t dma_addr;
+ u8 status;
+ int ret;
+ struct anfc_op nfc_op = {
+ .pkt_reg =
+ PKT_SIZE(chip->ecc.size) |
+ PKT_STEPS(chip->ecc.steps),
+ .addr1_reg =
+ (page & 0xFF) << (8 * (anand->caddr_cycles)) |
+ (((page >> 8) & 0xFF) << (8 * (1 + anand->caddr_cycles))),
+ .addr2_reg =
+ ((page >> 16) & 0xFF) |
+ ADDR2_STRENGTH(anand->strength) |
+ ADDR2_CS(nfc->native_cs),
+ .cmd_reg =
+ CMD_1(NAND_CMD_SEQIN) |
+ CMD_2(NAND_CMD_PAGEPROG) |
+ CMD_PAGE_SIZE(anand->page_sz) |
+ CMD_DMA_ENABLE |
+ CMD_NADDRS(anand->caddr_cycles +
+ anand->raddr_cycles) |
+ CMD_ECC_ENABLE,
+ .prog_reg = PROG_PGPROG,
+ };
+
+ writel_relaxed(anand->ecc_conf, nfc->base + ECC_CONF_REG);
+ writel_relaxed(ECC_SP_CMD1(NAND_CMD_RNDIN) |
+ ECC_SP_ADDRS(anand->caddr_cycles),
+ nfc->base + ECC_SP_REG);
+
+ dma_addr = dma_map_single(nfc->dev, (void *)buf, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(nfc->dev, dma_addr)) {
+ dev_err(nfc->dev, "Buffer mapping error");
+ return -EIO;
+ }
+
+ writel_relaxed(lower_32_bits(dma_addr), nfc->base + DMA_ADDR0_REG);
+ writel_relaxed(upper_32_bits(dma_addr), nfc->base + DMA_ADDR1_REG);
+
+ anfc_trigger_op(nfc, &nfc_op);
+ ret = anfc_wait_for_event(nfc, XFER_COMPLETE);
+ dma_unmap_single(nfc->dev, dma_addr, len, DMA_TO_DEVICE);
+ if (ret) {
+ dev_err(nfc->dev, "Error writing page %d\n", page);
+ return ret;
+ }
+
+ /* Spare data is not protected */
+ if (oob_required) {
+ ret = nand_write_oob_std(chip, page);
+ if (ret)
+ return ret;
+ }
+
+ /* Check write status on the chip side */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+
+static int anfc_sel_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ int ret;
+
+ ret = anfc_select_target(chip, chip->cur_cs);
+ if (ret)
+ return ret;
+
+ return anfc_write_page_hw_ecc(chip, buf, oob_required, page);
+};
+
+/* NAND framework ->exec_op() hooks and related helpers */
+static int anfc_parse_instructions(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct anfc_op *nfc_op)
+{
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ struct anand *anand = to_anand(chip);
+ const struct nand_op_instr *instr = NULL;
+ bool first_cmd = true;
+ unsigned int op_id;
+ int ret, i;
+
+ memset(nfc_op, 0, sizeof(*nfc_op));
+ nfc_op->addr2_reg = ADDR2_CS(nfc->native_cs);
+ nfc_op->cmd_reg = CMD_PAGE_SIZE(anand->page_sz);
+
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ unsigned int offset, naddrs, pktsize;
+ const u8 *addrs;
+ u8 *buf;
+
+ instr = &subop->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (first_cmd)
+ nfc_op->cmd_reg |= CMD_1(instr->ctx.cmd.opcode);
+ else
+ nfc_op->cmd_reg |= CMD_2(instr->ctx.cmd.opcode);
+
+ first_cmd = false;
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+ nfc_op->cmd_reg |= CMD_NADDRS(naddrs);
+
+ for (i = 0; i < min(ANFC_MAX_ADDR_CYC, naddrs); i++) {
+ if (i < 4)
+ nfc_op->addr1_reg |= (u32)addrs[i] << i * 8;
+ else
+ nfc_op->addr2_reg |= addrs[i];
+ }
+
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ nfc_op->read = true;
+ fallthrough;
+ case NAND_OP_DATA_OUT_INSTR:
+ offset = nand_subop_get_data_start_off(subop, op_id);
+ buf = instr->ctx.data.buf.in;
+ nfc_op->buf = &buf[offset];
+ nfc_op->len = nand_subop_get_data_len(subop, op_id);
+ ret = anfc_pkt_len_config(nfc_op->len, &nfc_op->steps,
+ &pktsize);
+ if (ret)
+ return ret;
+
+ /*
+ * Number of DATA cycles must be aligned on 4, this
+ * means the controller might read/write more than
+ * requested. This is harmless most of the time as extra
+ * DATA are discarded in the write path and read pointer
+ * adjusted in the read path.
+ *
+ * FIXME: The core should mark operations where
+ * reading/writing more is allowed so the exec_op()
+ * implementation can take the right decision when the
+ * alignment constraint is not met: adjust the number of
+ * DATA cycles when it's allowed, reject the operation
+ * otherwise.
+ */
+ nfc_op->pkt_reg |= PKT_SIZE(round_up(pktsize, 4)) |
+ PKT_STEPS(nfc_op->steps);
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int anfc_rw_pio_op(struct arasan_nfc *nfc, struct anfc_op *nfc_op)
+{
+ unsigned int dwords = (nfc_op->len / 4) / nfc_op->steps;
+ unsigned int last_len = nfc_op->len % 4;
+ unsigned int offset, dir;
+ u8 *buf = nfc_op->buf;
+ int ret, i;
+
+ for (i = 0; i < nfc_op->steps; i++) {
+ dir = nfc_op->read ? READ_READY : WRITE_READY;
+ ret = anfc_wait_for_event(nfc, dir);
+ if (ret) {
+ dev_err(nfc->dev, "PIO %s ready signal not received\n",
+ nfc_op->read ? "Read" : "Write");
+ return ret;
+ }
+
+ offset = i * (dwords * 4);
+ if (nfc_op->read)
+ ioread32_rep(nfc->base + DATA_PORT_REG, &buf[offset],
+ dwords);
+ else
+ iowrite32_rep(nfc->base + DATA_PORT_REG, &buf[offset],
+ dwords);
+ }
+
+ if (last_len) {
+ u32 remainder;
+
+ offset = nfc_op->len - last_len;
+
+ if (nfc_op->read) {
+ remainder = readl_relaxed(nfc->base + DATA_PORT_REG);
+ memcpy(&buf[offset], &remainder, last_len);
+ } else {
+ memcpy(&remainder, &buf[offset], last_len);
+ writel_relaxed(remainder, nfc->base + DATA_PORT_REG);
+ }
+ }
+
+ return anfc_wait_for_event(nfc, XFER_COMPLETE);
+}
+
+static int anfc_misc_data_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ u32 prog_reg)
+{
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ struct anfc_op nfc_op = {};
+ int ret;
+
+ ret = anfc_parse_instructions(chip, subop, &nfc_op);
+ if (ret)
+ return ret;
+
+ nfc_op.prog_reg = prog_reg;
+ anfc_trigger_op(nfc, &nfc_op);
+
+ if (nfc_op.rdy_timeout_ms) {
+ ret = anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ return anfc_rw_pio_op(nfc, &nfc_op);
+}
+
+static int anfc_param_read_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ return anfc_misc_data_type_exec(chip, subop, PROG_RDPARAM);
+}
+
+static int anfc_data_read_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ u32 prog_reg = PROG_PGRD;
+
+ /*
+ * Experience shows that while in SDR mode sending a CHANGE READ COLUMN
+ * command through the READ PAGE "type" always works fine, when in
+ * NV-DDR mode the same command simply fails. However, it was also
+ * spotted that any CHANGE READ COLUMN command sent through the CHANGE
+ * READ COLUMN ENHANCED "type" would correctly work in both cases (SDR
+ * and NV-DDR). So, for simplicity, let's program the controller with
+ * the CHANGE READ COLUMN ENHANCED "type" whenever we are requested to
+ * perform a CHANGE READ COLUMN operation.
+ */
+ if (subop->instrs[0].ctx.cmd.opcode == NAND_CMD_RNDOUT &&
+ subop->instrs[2].ctx.cmd.opcode == NAND_CMD_RNDOUTSTART)
+ prog_reg = PROG_CHG_RD_COL_ENH;
+
+ return anfc_misc_data_type_exec(chip, subop, prog_reg);
+}
+
+static int anfc_param_write_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ return anfc_misc_data_type_exec(chip, subop, PROG_SET_FEATURE);
+}
+
+static int anfc_data_write_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ return anfc_misc_data_type_exec(chip, subop, PROG_PGPROG);
+}
+
+static int anfc_misc_zerolen_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ u32 prog_reg)
+{
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ struct anfc_op nfc_op = {};
+ int ret;
+
+ ret = anfc_parse_instructions(chip, subop, &nfc_op);
+ if (ret)
+ return ret;
+
+ nfc_op.prog_reg = prog_reg;
+ anfc_trigger_op(nfc, &nfc_op);
+
+ ret = anfc_wait_for_event(nfc, XFER_COMPLETE);
+ if (ret)
+ return ret;
+
+ if (nfc_op.rdy_timeout_ms)
+ ret = anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms);
+
+ return ret;
+}
+
+static int anfc_status_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ u32 tmp;
+ int ret;
+
+ /* See anfc_check_op() for details about this constraint */
+ if (subop->instrs[0].ctx.cmd.opcode != NAND_CMD_STATUS)
+ return -ENOTSUPP;
+
+ ret = anfc_misc_zerolen_type_exec(chip, subop, PROG_STATUS);
+ if (ret)
+ return ret;
+
+ tmp = readl_relaxed(nfc->base + FLASH_STS_REG);
+ memcpy(subop->instrs[1].ctx.data.buf.in, &tmp, 1);
+
+ return 0;
+}
+
+static int anfc_reset_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ return anfc_misc_zerolen_type_exec(chip, subop, PROG_RST);
+}
+
+static int anfc_erase_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ return anfc_misc_zerolen_type_exec(chip, subop, PROG_ERASE);
+}
+
+static int anfc_wait_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ struct anfc_op nfc_op = {};
+ int ret;
+
+ ret = anfc_parse_instructions(chip, subop, &nfc_op);
+ if (ret)
+ return ret;
+
+ return anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms);
+}
+
+static const struct nand_op_parser anfc_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(
+ anfc_param_read_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_param_write_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_PARAM_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_data_read_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_data_write_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_CHUNK_SIZE),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_reset_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_erase_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_status_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_wait_type_exec,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ );
+
+static int anfc_check_op(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ const struct nand_op_instr *instr;
+ int op_id;
+
+ /*
+ * The controller abstracts all the NAND operations and do not support
+ * data only operations.
+ *
+ * TODO: The nand_op_parser framework should be extended to
+ * support custom checks on DATA instructions.
+ */
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_ADDR_INSTR:
+ if (instr->ctx.addr.naddrs > ANFC_MAX_ADDR_CYC)
+ return -ENOTSUPP;
+
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_DATA_OUT_INSTR:
+ if (instr->ctx.data.len > ANFC_MAX_CHUNK_SIZE)
+ return -ENOTSUPP;
+
+ if (anfc_pkt_len_config(instr->ctx.data.len, NULL, NULL))
+ return -ENOTSUPP;
+
+ break;
+ default:
+ break;
+ }
+ }
+
+ /*
+ * The controller does not allow to proceed with a CMD+DATA_IN cycle
+ * manually on the bus by reading data from the data register. Instead,
+ * the controller abstract a status read operation with its own status
+ * register after ordering a read status operation. Hence, we cannot
+ * support any CMD+DATA_IN operation other than a READ STATUS.
+ *
+ * TODO: The nand_op_parser() framework should be extended to describe
+ * fixed patterns instead of open-coding this check here.
+ */
+ if (op->ninstrs == 2 &&
+ op->instrs[0].type == NAND_OP_CMD_INSTR &&
+ op->instrs[0].ctx.cmd.opcode != NAND_CMD_STATUS &&
+ op->instrs[1].type == NAND_OP_DATA_IN_INSTR)
+ return -ENOTSUPP;
+
+ return nand_op_parser_exec_op(chip, &anfc_op_parser, op, true);
+}
+
+static int anfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ int ret;
+
+ if (check_only)
+ return anfc_check_op(chip, op);
+
+ ret = anfc_select_target(chip, op->cs);
+ if (ret)
+ return ret;
+
+ return nand_op_parser_exec_op(chip, &anfc_op_parser, op, check_only);
+}
+
+static int anfc_setup_interface(struct nand_chip *chip, int target,
+ const struct nand_interface_config *conf)
+{
+ struct anand *anand = to_anand(chip);
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ struct device_node *np = nfc->dev->of_node;
+ const struct nand_sdr_timings *sdr;
+ const struct nand_nvddr_timings *nvddr;
+ unsigned int tccs_min, dqs_mode, fast_tcad;
+
+ if (nand_interface_is_nvddr(conf)) {
+ nvddr = nand_get_nvddr_timings(conf);
+ if (IS_ERR(nvddr))
+ return PTR_ERR(nvddr);
+ } else {
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+ }
+
+ if (target < 0)
+ return 0;
+
+ if (nand_interface_is_sdr(conf)) {
+ anand->data_iface = DIFACE_SDR |
+ DIFACE_SDR_MODE(conf->timings.mode);
+ anand->timings = 0;
+ } else {
+ anand->data_iface = DIFACE_NVDDR |
+ DIFACE_DDR_MODE(conf->timings.mode);
+
+ if (conf->timings.nvddr.tCCS_min <= 100000)
+ tccs_min = TCCS_TIME_100NS;
+ else if (conf->timings.nvddr.tCCS_min <= 200000)
+ tccs_min = TCCS_TIME_200NS;
+ else if (conf->timings.nvddr.tCCS_min <= 300000)
+ tccs_min = TCCS_TIME_300NS;
+ else
+ tccs_min = TCCS_TIME_500NS;
+
+ fast_tcad = 0;
+ if (conf->timings.nvddr.tCAD_min < 45000)
+ fast_tcad = FAST_TCAD;
+
+ switch (conf->timings.mode) {
+ case 5:
+ case 4:
+ dqs_mode = 2;
+ break;
+ case 3:
+ dqs_mode = 3;
+ break;
+ case 2:
+ dqs_mode = 4;
+ break;
+ case 1:
+ dqs_mode = 5;
+ break;
+ case 0:
+ default:
+ dqs_mode = 6;
+ break;
+ }
+
+ anand->timings = tccs_min | fast_tcad |
+ DQS_BUFF_SEL_IN(dqs_mode) |
+ DQS_BUFF_SEL_OUT(dqs_mode);
+ }
+
+ if (nand_interface_is_sdr(conf)) {
+ anand->clk = ANFC_XLNX_SDR_DFLT_CORE_CLK;
+ } else {
+ /* ONFI timings are defined in picoseconds */
+ anand->clk = div_u64((u64)NSEC_PER_SEC * 1000,
+ conf->timings.nvddr.tCK_min);
+ }
+
+ /*
+ * Due to a hardware bug in the ZynqMP SoC, SDR timing modes 0-1 work
+ * with f > 90MHz (default clock is 100MHz) but signals are unstable
+ * with higher modes. Hence we decrease a little bit the clock rate to
+ * 80MHz when using SDR modes 2-5 with this SoC.
+ */
+ if (of_device_is_compatible(np, "xlnx,zynqmp-nand-controller") &&
+ nand_interface_is_sdr(conf) && conf->timings.mode >= 2)
+ anand->clk = ANFC_XLNX_SDR_HS_CORE_CLK;
+
+ return 0;
+}
+
+static int anfc_calc_hw_ecc_bytes(int step_size, int strength)
+{
+ unsigned int bch_gf_mag, ecc_bits;
+
+ switch (step_size) {
+ case SZ_512:
+ bch_gf_mag = 13;
+ break;
+ case SZ_1K:
+ bch_gf_mag = 14;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ecc_bits = bch_gf_mag * strength;
+
+ return DIV_ROUND_UP(ecc_bits, 8);
+}
+
+static const int anfc_hw_ecc_512_strengths[] = {4, 8, 12};
+
+static const int anfc_hw_ecc_1024_strengths[] = {24};
+
+static const struct nand_ecc_step_info anfc_hw_ecc_step_infos[] = {
+ {
+ .stepsize = SZ_512,
+ .strengths = anfc_hw_ecc_512_strengths,
+ .nstrengths = ARRAY_SIZE(anfc_hw_ecc_512_strengths),
+ },
+ {
+ .stepsize = SZ_1K,
+ .strengths = anfc_hw_ecc_1024_strengths,
+ .nstrengths = ARRAY_SIZE(anfc_hw_ecc_1024_strengths),
+ },
+};
+
+static const struct nand_ecc_caps anfc_hw_ecc_caps = {
+ .stepinfos = anfc_hw_ecc_step_infos,
+ .nstepinfos = ARRAY_SIZE(anfc_hw_ecc_step_infos),
+ .calc_ecc_bytes = anfc_calc_hw_ecc_bytes,
+};
+
+static int anfc_init_hw_ecc_controller(struct arasan_nfc *nfc,
+ struct nand_chip *chip)
+{
+ struct anand *anand = to_anand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ unsigned int bch_prim_poly = 0, bch_gf_mag = 0, ecc_offset;
+ int ret;
+
+ switch (mtd->writesize) {
+ case SZ_512:
+ case SZ_2K:
+ case SZ_4K:
+ case SZ_8K:
+ case SZ_16K:
+ break;
+ default:
+ dev_err(nfc->dev, "Unsupported page size %d\n", mtd->writesize);
+ return -EINVAL;
+ }
+
+ ret = nand_ecc_choose_conf(chip, &anfc_hw_ecc_caps, mtd->oobsize);
+ if (ret)
+ return ret;
+
+ switch (ecc->strength) {
+ case 12:
+ anand->strength = 0x1;
+ break;
+ case 8:
+ anand->strength = 0x2;
+ break;
+ case 4:
+ anand->strength = 0x3;
+ break;
+ case 24:
+ anand->strength = 0x4;
+ break;
+ default:
+ dev_err(nfc->dev, "Unsupported strength %d\n", ecc->strength);
+ return -EINVAL;
+ }
+
+ switch (ecc->size) {
+ case SZ_512:
+ bch_gf_mag = 13;
+ bch_prim_poly = 0x201b;
+ break;
+ case SZ_1K:
+ bch_gf_mag = 14;
+ bch_prim_poly = 0x4443;
+ break;
+ default:
+ dev_err(nfc->dev, "Unsupported step size %d\n", ecc->strength);
+ return -EINVAL;
+ }
+
+ mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
+
+ ecc->steps = mtd->writesize / ecc->size;
+ ecc->algo = NAND_ECC_ALGO_BCH;
+ anand->ecc_bits = bch_gf_mag * ecc->strength;
+ ecc->bytes = DIV_ROUND_UP(anand->ecc_bits, 8);
+ anand->ecc_total = DIV_ROUND_UP(anand->ecc_bits * ecc->steps, 8);
+ ecc_offset = mtd->writesize + mtd->oobsize - anand->ecc_total;
+ anand->ecc_conf = ECC_CONF_COL(ecc_offset) |
+ ECC_CONF_LEN(anand->ecc_total) |
+ ECC_CONF_BCH_EN;
+
+ anand->errloc = devm_kmalloc_array(nfc->dev, ecc->strength,
+ sizeof(*anand->errloc), GFP_KERNEL);
+ if (!anand->errloc)
+ return -ENOMEM;
+
+ anand->hw_ecc = devm_kmalloc(nfc->dev, ecc->bytes, GFP_KERNEL);
+ if (!anand->hw_ecc)
+ return -ENOMEM;
+
+ /* Enforce bit swapping to fit the hardware */
+ anand->bch = bch_init(bch_gf_mag, ecc->strength, bch_prim_poly, true);
+ if (!anand->bch)
+ return -EINVAL;
+
+ ecc->read_page = anfc_sel_read_page_hw_ecc;
+ ecc->write_page = anfc_sel_write_page_hw_ecc;
+
+ return 0;
+}
+
+static int anfc_attach_chip(struct nand_chip *chip)
+{
+ struct anand *anand = to_anand(chip);
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret = 0;
+
+ if (mtd->writesize <= SZ_512)
+ anand->caddr_cycles = 1;
+ else
+ anand->caddr_cycles = 2;
+
+ if (chip->options & NAND_ROW_ADDR_3)
+ anand->raddr_cycles = 3;
+ else
+ anand->raddr_cycles = 2;
+
+ switch (mtd->writesize) {
+ case 512:
+ anand->page_sz = 0;
+ break;
+ case 1024:
+ anand->page_sz = 5;
+ break;
+ case 2048:
+ anand->page_sz = 1;
+ break;
+ case 4096:
+ anand->page_sz = 2;
+ break;
+ case 8192:
+ anand->page_sz = 3;
+ break;
+ case 16384:
+ anand->page_sz = 4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* These hooks are valid for all ECC providers */
+ chip->ecc.read_page_raw = nand_monolithic_read_page_raw;
+ chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
+
+ switch (chip->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
+ break;
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ ret = anfc_init_hw_ecc_controller(nfc, chip);
+ break;
+ default:
+ dev_err(nfc->dev, "Unsupported ECC mode: %d\n",
+ chip->ecc.engine_type);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static void anfc_detach_chip(struct nand_chip *chip)
+{
+ struct anand *anand = to_anand(chip);
+
+ if (anand->bch)
+ bch_free(anand->bch);
+}
+
+static const struct nand_controller_ops anfc_ops = {
+ .exec_op = anfc_exec_op,
+ .setup_interface = anfc_setup_interface,
+ .attach_chip = anfc_attach_chip,
+ .detach_chip = anfc_detach_chip,
+};
+
+static int anfc_chip_init(struct arasan_nfc *nfc, struct device_node *np)
+{
+ struct anand *anand;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ int rb, ret, i;
+
+ anand = devm_kzalloc(nfc->dev, sizeof(*anand), GFP_KERNEL);
+ if (!anand)
+ return -ENOMEM;
+
+ /* Chip-select init */
+ anand->ncs_idx = of_property_count_elems_of_size(np, "reg", sizeof(u32));
+ if (anand->ncs_idx <= 0 || anand->ncs_idx > nfc->ncs) {
+ dev_err(nfc->dev, "Invalid reg property\n");
+ return -EINVAL;
+ }
+
+ anand->cs_idx = devm_kcalloc(nfc->dev, anand->ncs_idx,
+ sizeof(*anand->cs_idx), GFP_KERNEL);
+ if (!anand->cs_idx)
+ return -ENOMEM;
+
+ for (i = 0; i < anand->ncs_idx; i++) {
+ ret = of_property_read_u32_index(np, "reg", i,
+ &anand->cs_idx[i]);
+ if (ret) {
+ dev_err(nfc->dev, "invalid CS property: %d\n", ret);
+ return ret;
+ }
+ }
+
+ /* Ready-busy init */
+ ret = of_property_read_u32(np, "nand-rb", &rb);
+ if (ret)
+ return ret;
+
+ if (rb >= ANFC_MAX_CS) {
+ dev_err(nfc->dev, "Wrong RB %d\n", rb);
+ return -EINVAL;
+ }
+
+ anand->rb = rb;
+
+ chip = &anand->chip;
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = nfc->dev;
+ chip->controller = &nfc->controller;
+ chip->options = NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE |
+ NAND_USES_DMA;
+
+ nand_set_flash_node(chip, np);
+ if (!mtd->name) {
+ dev_err(nfc->dev, "NAND label property is mandatory\n");
+ return -EINVAL;
+ }
+
+ ret = nand_scan(chip, anand->ncs_idx);
+ if (ret) {
+ dev_err(nfc->dev, "Scan operation failed\n");
+ return ret;
+ }
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ list_add_tail(&anand->node, &nfc->chips);
+
+ return 0;
+}
+
+static void anfc_chips_cleanup(struct arasan_nfc *nfc)
+{
+ struct anand *anand, *tmp;
+ struct nand_chip *chip;
+ int ret;
+
+ list_for_each_entry_safe(anand, tmp, &nfc->chips, node) {
+ chip = &anand->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&anand->node);
+ }
+}
+
+static int anfc_chips_init(struct arasan_nfc *nfc)
+{
+ struct device_node *np = nfc->dev->of_node, *nand_np;
+ int nchips = of_get_child_count(np);
+ int ret;
+
+ if (!nchips) {
+ dev_err(nfc->dev, "Incorrect number of NAND chips (%d)\n",
+ nchips);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(np, nand_np) {
+ ret = anfc_chip_init(nfc, nand_np);
+ if (ret) {
+ of_node_put(nand_np);
+ anfc_chips_cleanup(nfc);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void anfc_reset(struct arasan_nfc *nfc)
+{
+ /* Disable interrupt signals */
+ writel_relaxed(0, nfc->base + INTR_SIG_EN_REG);
+
+ /* Enable interrupt status */
+ writel_relaxed(EVENT_MASK, nfc->base + INTR_STS_EN_REG);
+
+ nfc->cur_cs = -1;
+}
+
+static int anfc_parse_cs(struct arasan_nfc *nfc)
+{
+ int ret;
+
+ /* Check the gpio-cs property */
+ ret = rawnand_dt_parse_gpio_cs(nfc->dev, &nfc->cs_array, &nfc->ncs);
+ if (ret)
+ return ret;
+
+ /*
+ * The controller native CS cannot be both disabled at the same time.
+ * Hence, only one native CS can be used if GPIO CS are needed, so that
+ * the other is selected when a non-native CS must be asserted (not
+ * wired physically or configured as GPIO instead of NAND CS). In this
+ * case, the "not" chosen CS is assigned to nfc->spare_cs and selected
+ * whenever a GPIO CS must be asserted.
+ */
+ if (nfc->cs_array && nfc->ncs > 2) {
+ if (!nfc->cs_array[0] && !nfc->cs_array[1]) {
+ dev_err(nfc->dev,
+ "Assign a single native CS when using GPIOs\n");
+ return -EINVAL;
+ }
+
+ if (nfc->cs_array[0])
+ nfc->spare_cs = 0;
+ else
+ nfc->spare_cs = 1;
+ }
+
+ if (!nfc->cs_array) {
+ nfc->cs_array = anfc_default_cs_array;
+ nfc->ncs = ANFC_MAX_CS;
+ return 0;
+ }
+
+ return 0;
+}
+
+static int anfc_probe(struct platform_device *pdev)
+{
+ struct arasan_nfc *nfc;
+ int ret;
+
+ nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->dev = &pdev->dev;
+ nand_controller_init(&nfc->controller);
+ nfc->controller.ops = &anfc_ops;
+ INIT_LIST_HEAD(&nfc->chips);
+
+ nfc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(nfc->base))
+ return PTR_ERR(nfc->base);
+
+ anfc_reset(nfc);
+
+ nfc->controller_clk = devm_clk_get_enabled(&pdev->dev, "controller");
+ if (IS_ERR(nfc->controller_clk))
+ return PTR_ERR(nfc->controller_clk);
+
+ nfc->bus_clk = devm_clk_get_enabled(&pdev->dev, "bus");
+ if (IS_ERR(nfc->bus_clk))
+ return PTR_ERR(nfc->bus_clk);
+
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+
+ ret = anfc_parse_cs(nfc);
+ if (ret)
+ return ret;
+
+ ret = anfc_chips_init(nfc);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, nfc);
+
+ return 0;
+}
+
+static void anfc_remove(struct platform_device *pdev)
+{
+ struct arasan_nfc *nfc = platform_get_drvdata(pdev);
+
+ anfc_chips_cleanup(nfc);
+}
+
+static const struct of_device_id anfc_ids[] = {
+ {
+ .compatible = "xlnx,zynqmp-nand-controller",
+ },
+ {
+ .compatible = "arasan,nfc-v3p10",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, anfc_ids);
+
+static struct platform_driver anfc_driver = {
+ .driver = {
+ .name = "arasan-nand-controller",
+ .of_match_table = anfc_ids,
+ },
+ .probe = anfc_probe,
+ .remove_new = anfc_remove,
+};
+module_platform_driver(anfc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Punnaiah Choudary Kalluri <punnaia@xilinx.com>");
+MODULE_AUTHOR("Naga Sureshkumar Relli <nagasure@xilinx.com>");
+MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
+MODULE_DESCRIPTION("Arasan NAND Flash Controller Driver");
diff --git a/drivers/mtd/nand/raw/atmel/Makefile b/drivers/mtd/nand/raw/atmel/Makefile
new file mode 100644
index 0000000000..27c2dd50e8
--- /dev/null
+++ b/drivers/mtd/nand/raw/atmel/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MTD_NAND_ATMEL) += atmel-nand-controller.o atmel-pmecc.o
+
+atmel-nand-controller-objs := nand-controller.o
+atmel-pmecc-objs := pmecc.o
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
new file mode 100644
index 0000000000..3f494f7c7e
--- /dev/null
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -0,0 +1,2670 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2017 ATMEL
+ * Copyright 2017 Free Electrons
+ *
+ * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
+ *
+ * Derived from the atmel_nand.c driver which contained the following
+ * copyrights:
+ *
+ * Copyright 2003 Rick Bronson
+ *
+ * Derived from drivers/mtd/nand/autcpu12.c (removed in v3.8)
+ * Copyright 2001 Thomas Gleixner (gleixner@autronix.de)
+ *
+ * Derived from drivers/mtd/spia.c (removed in v3.8)
+ * Copyright 2000 Steven J. Hill (sjhill@cotw.com)
+ *
+ *
+ * Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
+ * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright 2007
+ *
+ * Derived from Das U-Boot source code
+ * (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
+ * Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
+ *
+ * Add Programmable Multibit ECC support for various AT91 SoC
+ * Copyright 2012 ATMEL, Hong Xu
+ *
+ * Add Nand Flash Controller support for SAMA5 SoC
+ * Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
+ *
+ * A few words about the naming convention in this file. This convention
+ * applies to structure and function names.
+ *
+ * Prefixes:
+ *
+ * - atmel_nand_: all generic structures/functions
+ * - atmel_smc_nand_: all structures/functions specific to the SMC interface
+ * (at91sam9 and avr32 SoCs)
+ * - atmel_hsmc_nand_: all structures/functions specific to the HSMC interface
+ * (sama5 SoCs and later)
+ * - atmel_nfc_: all structures/functions used to manipulate the NFC sub-block
+ * that is available in the HSMC block
+ * - <soc>_nand_: all SoC specific structures/functions
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/genalloc.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/atmel-matrix.h>
+#include <linux/mfd/syscon/atmel-smc.h>
+#include <linux/module.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/iopoll.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <soc/at91/atmel-sfr.h>
+
+#include "pmecc.h"
+
+#define ATMEL_HSMC_NFC_CFG 0x0
+#define ATMEL_HSMC_NFC_CFG_SPARESIZE(x) (((x) / 4) << 24)
+#define ATMEL_HSMC_NFC_CFG_SPARESIZE_MASK GENMASK(30, 24)
+#define ATMEL_HSMC_NFC_CFG_DTO(cyc, mul) (((cyc) << 16) | ((mul) << 20))
+#define ATMEL_HSMC_NFC_CFG_DTO_MAX GENMASK(22, 16)
+#define ATMEL_HSMC_NFC_CFG_RBEDGE BIT(13)
+#define ATMEL_HSMC_NFC_CFG_FALLING_EDGE BIT(12)
+#define ATMEL_HSMC_NFC_CFG_RSPARE BIT(9)
+#define ATMEL_HSMC_NFC_CFG_WSPARE BIT(8)
+#define ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK GENMASK(2, 0)
+#define ATMEL_HSMC_NFC_CFG_PAGESIZE(x) (fls((x) / 512) - 1)
+
+#define ATMEL_HSMC_NFC_CTRL 0x4
+#define ATMEL_HSMC_NFC_CTRL_EN BIT(0)
+#define ATMEL_HSMC_NFC_CTRL_DIS BIT(1)
+
+#define ATMEL_HSMC_NFC_SR 0x8
+#define ATMEL_HSMC_NFC_IER 0xc
+#define ATMEL_HSMC_NFC_IDR 0x10
+#define ATMEL_HSMC_NFC_IMR 0x14
+#define ATMEL_HSMC_NFC_SR_ENABLED BIT(1)
+#define ATMEL_HSMC_NFC_SR_RB_RISE BIT(4)
+#define ATMEL_HSMC_NFC_SR_RB_FALL BIT(5)
+#define ATMEL_HSMC_NFC_SR_BUSY BIT(8)
+#define ATMEL_HSMC_NFC_SR_WR BIT(11)
+#define ATMEL_HSMC_NFC_SR_CSID GENMASK(14, 12)
+#define ATMEL_HSMC_NFC_SR_XFRDONE BIT(16)
+#define ATMEL_HSMC_NFC_SR_CMDDONE BIT(17)
+#define ATMEL_HSMC_NFC_SR_DTOE BIT(20)
+#define ATMEL_HSMC_NFC_SR_UNDEF BIT(21)
+#define ATMEL_HSMC_NFC_SR_AWB BIT(22)
+#define ATMEL_HSMC_NFC_SR_NFCASE BIT(23)
+#define ATMEL_HSMC_NFC_SR_ERRORS (ATMEL_HSMC_NFC_SR_DTOE | \
+ ATMEL_HSMC_NFC_SR_UNDEF | \
+ ATMEL_HSMC_NFC_SR_AWB | \
+ ATMEL_HSMC_NFC_SR_NFCASE)
+#define ATMEL_HSMC_NFC_SR_RBEDGE(x) BIT((x) + 24)
+
+#define ATMEL_HSMC_NFC_ADDR 0x18
+#define ATMEL_HSMC_NFC_BANK 0x1c
+
+#define ATMEL_NFC_MAX_RB_ID 7
+
+#define ATMEL_NFC_SRAM_SIZE 0x2400
+
+#define ATMEL_NFC_CMD(pos, cmd) ((cmd) << (((pos) * 8) + 2))
+#define ATMEL_NFC_VCMD2 BIT(18)
+#define ATMEL_NFC_ACYCLE(naddrs) ((naddrs) << 19)
+#define ATMEL_NFC_CSID(cs) ((cs) << 22)
+#define ATMEL_NFC_DATAEN BIT(25)
+#define ATMEL_NFC_NFCWR BIT(26)
+
+#define ATMEL_NFC_MAX_ADDR_CYCLES 5
+
+#define ATMEL_NAND_ALE_OFFSET BIT(21)
+#define ATMEL_NAND_CLE_OFFSET BIT(22)
+
+#define DEFAULT_TIMEOUT_MS 1000
+#define MIN_DMA_LEN 128
+
+static bool atmel_nand_avoid_dma __read_mostly;
+
+MODULE_PARM_DESC(avoiddma, "Avoid using DMA");
+module_param_named(avoiddma, atmel_nand_avoid_dma, bool, 0400);
+
+enum atmel_nand_rb_type {
+ ATMEL_NAND_NO_RB,
+ ATMEL_NAND_NATIVE_RB,
+ ATMEL_NAND_GPIO_RB,
+};
+
+struct atmel_nand_rb {
+ enum atmel_nand_rb_type type;
+ union {
+ struct gpio_desc *gpio;
+ int id;
+ };
+};
+
+struct atmel_nand_cs {
+ int id;
+ struct atmel_nand_rb rb;
+ struct gpio_desc *csgpio;
+ struct {
+ void __iomem *virt;
+ dma_addr_t dma;
+ } io;
+
+ struct atmel_smc_cs_conf smcconf;
+};
+
+struct atmel_nand {
+ struct list_head node;
+ struct device *dev;
+ struct nand_chip base;
+ struct atmel_nand_cs *activecs;
+ struct atmel_pmecc_user *pmecc;
+ struct gpio_desc *cdgpio;
+ int numcs;
+ struct atmel_nand_cs cs[];
+};
+
+static inline struct atmel_nand *to_atmel_nand(struct nand_chip *chip)
+{
+ return container_of(chip, struct atmel_nand, base);
+}
+
+enum atmel_nfc_data_xfer {
+ ATMEL_NFC_NO_DATA,
+ ATMEL_NFC_READ_DATA,
+ ATMEL_NFC_WRITE_DATA,
+};
+
+struct atmel_nfc_op {
+ u8 cs;
+ u8 ncmds;
+ u8 cmds[2];
+ u8 naddrs;
+ u8 addrs[5];
+ enum atmel_nfc_data_xfer data;
+ u32 wait;
+ u32 errors;
+};
+
+struct atmel_nand_controller;
+struct atmel_nand_controller_caps;
+
+struct atmel_nand_controller_ops {
+ int (*probe)(struct platform_device *pdev,
+ const struct atmel_nand_controller_caps *caps);
+ int (*remove)(struct atmel_nand_controller *nc);
+ void (*nand_init)(struct atmel_nand_controller *nc,
+ struct atmel_nand *nand);
+ int (*ecc_init)(struct nand_chip *chip);
+ int (*setup_interface)(struct atmel_nand *nand, int csline,
+ const struct nand_interface_config *conf);
+ int (*exec_op)(struct atmel_nand *nand,
+ const struct nand_operation *op, bool check_only);
+};
+
+struct atmel_nand_controller_caps {
+ bool has_dma;
+ bool legacy_of_bindings;
+ u32 ale_offs;
+ u32 cle_offs;
+ const char *ebi_csa_regmap_name;
+ const struct atmel_nand_controller_ops *ops;
+};
+
+struct atmel_nand_controller {
+ struct nand_controller base;
+ const struct atmel_nand_controller_caps *caps;
+ struct device *dev;
+ struct regmap *smc;
+ struct dma_chan *dmac;
+ struct atmel_pmecc *pmecc;
+ struct list_head chips;
+ struct clk *mck;
+};
+
+static inline struct atmel_nand_controller *
+to_nand_controller(struct nand_controller *ctl)
+{
+ return container_of(ctl, struct atmel_nand_controller, base);
+}
+
+struct atmel_smc_nand_ebi_csa_cfg {
+ u32 offs;
+ u32 nfd0_on_d16;
+};
+
+struct atmel_smc_nand_controller {
+ struct atmel_nand_controller base;
+ struct regmap *ebi_csa_regmap;
+ struct atmel_smc_nand_ebi_csa_cfg *ebi_csa;
+};
+
+static inline struct atmel_smc_nand_controller *
+to_smc_nand_controller(struct nand_controller *ctl)
+{
+ return container_of(to_nand_controller(ctl),
+ struct atmel_smc_nand_controller, base);
+}
+
+struct atmel_hsmc_nand_controller {
+ struct atmel_nand_controller base;
+ struct {
+ struct gen_pool *pool;
+ void __iomem *virt;
+ dma_addr_t dma;
+ } sram;
+ const struct atmel_hsmc_reg_layout *hsmc_layout;
+ struct regmap *io;
+ struct atmel_nfc_op op;
+ struct completion complete;
+ u32 cfg;
+ int irq;
+
+ /* Only used when instantiating from legacy DT bindings. */
+ struct clk *clk;
+};
+
+static inline struct atmel_hsmc_nand_controller *
+to_hsmc_nand_controller(struct nand_controller *ctl)
+{
+ return container_of(to_nand_controller(ctl),
+ struct atmel_hsmc_nand_controller, base);
+}
+
+static bool atmel_nfc_op_done(struct atmel_nfc_op *op, u32 status)
+{
+ op->errors |= status & ATMEL_HSMC_NFC_SR_ERRORS;
+ op->wait ^= status & op->wait;
+
+ return !op->wait || op->errors;
+}
+
+static irqreturn_t atmel_nfc_interrupt(int irq, void *data)
+{
+ struct atmel_hsmc_nand_controller *nc = data;
+ u32 sr, rcvd;
+ bool done;
+
+ regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &sr);
+
+ rcvd = sr & (nc->op.wait | ATMEL_HSMC_NFC_SR_ERRORS);
+ done = atmel_nfc_op_done(&nc->op, sr);
+
+ if (rcvd)
+ regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, rcvd);
+
+ if (done)
+ complete(&nc->complete);
+
+ return rcvd ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int atmel_nfc_wait(struct atmel_hsmc_nand_controller *nc, bool poll,
+ unsigned int timeout_ms)
+{
+ int ret;
+
+ if (!timeout_ms)
+ timeout_ms = DEFAULT_TIMEOUT_MS;
+
+ if (poll) {
+ u32 status;
+
+ ret = regmap_read_poll_timeout(nc->base.smc,
+ ATMEL_HSMC_NFC_SR, status,
+ atmel_nfc_op_done(&nc->op,
+ status),
+ 0, timeout_ms * 1000);
+ } else {
+ init_completion(&nc->complete);
+ regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IER,
+ nc->op.wait | ATMEL_HSMC_NFC_SR_ERRORS);
+ ret = wait_for_completion_timeout(&nc->complete,
+ msecs_to_jiffies(timeout_ms));
+ if (!ret)
+ ret = -ETIMEDOUT;
+ else
+ ret = 0;
+
+ regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, 0xffffffff);
+ }
+
+ if (nc->op.errors & ATMEL_HSMC_NFC_SR_DTOE) {
+ dev_err(nc->base.dev, "Waiting NAND R/B Timeout\n");
+ ret = -ETIMEDOUT;
+ }
+
+ if (nc->op.errors & ATMEL_HSMC_NFC_SR_UNDEF) {
+ dev_err(nc->base.dev, "Access to an undefined area\n");
+ ret = -EIO;
+ }
+
+ if (nc->op.errors & ATMEL_HSMC_NFC_SR_AWB) {
+ dev_err(nc->base.dev, "Access while busy\n");
+ ret = -EIO;
+ }
+
+ if (nc->op.errors & ATMEL_HSMC_NFC_SR_NFCASE) {
+ dev_err(nc->base.dev, "Wrong access size\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static void atmel_nand_dma_transfer_finished(void *data)
+{
+ struct completion *finished = data;
+
+ complete(finished);
+}
+
+static int atmel_nand_dma_transfer(struct atmel_nand_controller *nc,
+ void *buf, dma_addr_t dev_dma, size_t len,
+ enum dma_data_direction dir)
+{
+ DECLARE_COMPLETION_ONSTACK(finished);
+ dma_addr_t src_dma, dst_dma, buf_dma;
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t cookie;
+
+ buf_dma = dma_map_single(nc->dev, buf, len, dir);
+ if (dma_mapping_error(nc->dev, dev_dma)) {
+ dev_err(nc->dev,
+ "Failed to prepare a buffer for DMA access\n");
+ goto err;
+ }
+
+ if (dir == DMA_FROM_DEVICE) {
+ src_dma = dev_dma;
+ dst_dma = buf_dma;
+ } else {
+ src_dma = buf_dma;
+ dst_dma = dev_dma;
+ }
+
+ tx = dmaengine_prep_dma_memcpy(nc->dmac, dst_dma, src_dma, len,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(nc->dev, "Failed to prepare DMA memcpy\n");
+ goto err_unmap;
+ }
+
+ tx->callback = atmel_nand_dma_transfer_finished;
+ tx->callback_param = &finished;
+
+ cookie = dmaengine_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_err(nc->dev, "Failed to do DMA tx_submit\n");
+ goto err_unmap;
+ }
+
+ dma_async_issue_pending(nc->dmac);
+ wait_for_completion(&finished);
+ dma_unmap_single(nc->dev, buf_dma, len, dir);
+
+ return 0;
+
+err_unmap:
+ dma_unmap_single(nc->dev, buf_dma, len, dir);
+
+err:
+ dev_dbg(nc->dev, "Fall back to CPU I/O\n");
+
+ return -EIO;
+}
+
+static int atmel_nfc_exec_op(struct atmel_hsmc_nand_controller *nc, bool poll)
+{
+ u8 *addrs = nc->op.addrs;
+ unsigned int op = 0;
+ u32 addr, val;
+ int i, ret;
+
+ nc->op.wait = ATMEL_HSMC_NFC_SR_CMDDONE;
+
+ for (i = 0; i < nc->op.ncmds; i++)
+ op |= ATMEL_NFC_CMD(i, nc->op.cmds[i]);
+
+ if (nc->op.naddrs == ATMEL_NFC_MAX_ADDR_CYCLES)
+ regmap_write(nc->base.smc, ATMEL_HSMC_NFC_ADDR, *addrs++);
+
+ op |= ATMEL_NFC_CSID(nc->op.cs) |
+ ATMEL_NFC_ACYCLE(nc->op.naddrs);
+
+ if (nc->op.ncmds > 1)
+ op |= ATMEL_NFC_VCMD2;
+
+ addr = addrs[0] | (addrs[1] << 8) | (addrs[2] << 16) |
+ (addrs[3] << 24);
+
+ if (nc->op.data != ATMEL_NFC_NO_DATA) {
+ op |= ATMEL_NFC_DATAEN;
+ nc->op.wait |= ATMEL_HSMC_NFC_SR_XFRDONE;
+
+ if (nc->op.data == ATMEL_NFC_WRITE_DATA)
+ op |= ATMEL_NFC_NFCWR;
+ }
+
+ /* Clear all flags. */
+ regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &val);
+
+ /* Send the command. */
+ regmap_write(nc->io, op, addr);
+
+ ret = atmel_nfc_wait(nc, poll, 0);
+ if (ret)
+ dev_err(nc->base.dev,
+ "Failed to send NAND command (err = %d)!",
+ ret);
+
+ /* Reset the op state. */
+ memset(&nc->op, 0, sizeof(nc->op));
+
+ return ret;
+}
+
+static void atmel_nand_data_in(struct atmel_nand *nand, void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct atmel_nand_controller *nc;
+
+ nc = to_nand_controller(nand->base.controller);
+
+ /*
+ * If the controller supports DMA, the buffer address is DMA-able and
+ * len is long enough to make DMA transfers profitable, let's trigger
+ * a DMA transfer. If it fails, fallback to PIO mode.
+ */
+ if (nc->dmac && virt_addr_valid(buf) &&
+ len >= MIN_DMA_LEN && !force_8bit &&
+ !atmel_nand_dma_transfer(nc, buf, nand->activecs->io.dma, len,
+ DMA_FROM_DEVICE))
+ return;
+
+ if ((nand->base.options & NAND_BUSWIDTH_16) && !force_8bit)
+ ioread16_rep(nand->activecs->io.virt, buf, len / 2);
+ else
+ ioread8_rep(nand->activecs->io.virt, buf, len);
+}
+
+static void atmel_nand_data_out(struct atmel_nand *nand, const void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct atmel_nand_controller *nc;
+
+ nc = to_nand_controller(nand->base.controller);
+
+ /*
+ * If the controller supports DMA, the buffer address is DMA-able and
+ * len is long enough to make DMA transfers profitable, let's trigger
+ * a DMA transfer. If it fails, fallback to PIO mode.
+ */
+ if (nc->dmac && virt_addr_valid(buf) &&
+ len >= MIN_DMA_LEN && !force_8bit &&
+ !atmel_nand_dma_transfer(nc, (void *)buf, nand->activecs->io.dma,
+ len, DMA_TO_DEVICE))
+ return;
+
+ if ((nand->base.options & NAND_BUSWIDTH_16) && !force_8bit)
+ iowrite16_rep(nand->activecs->io.virt, buf, len / 2);
+ else
+ iowrite8_rep(nand->activecs->io.virt, buf, len);
+}
+
+static int atmel_nand_waitrdy(struct atmel_nand *nand, unsigned int timeout_ms)
+{
+ if (nand->activecs->rb.type == ATMEL_NAND_NO_RB)
+ return nand_soft_waitrdy(&nand->base, timeout_ms);
+
+ return nand_gpio_waitrdy(&nand->base, nand->activecs->rb.gpio,
+ timeout_ms);
+}
+
+static int atmel_hsmc_nand_waitrdy(struct atmel_nand *nand,
+ unsigned int timeout_ms)
+{
+ struct atmel_hsmc_nand_controller *nc;
+ u32 status, mask;
+
+ if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB)
+ return atmel_nand_waitrdy(nand, timeout_ms);
+
+ nc = to_hsmc_nand_controller(nand->base.controller);
+ mask = ATMEL_HSMC_NFC_SR_RBEDGE(nand->activecs->rb.id);
+ return regmap_read_poll_timeout_atomic(nc->base.smc, ATMEL_HSMC_NFC_SR,
+ status, status & mask,
+ 10, timeout_ms * 1000);
+}
+
+static void atmel_nand_select_target(struct atmel_nand *nand,
+ unsigned int cs)
+{
+ nand->activecs = &nand->cs[cs];
+}
+
+static void atmel_hsmc_nand_select_target(struct atmel_nand *nand,
+ unsigned int cs)
+{
+ struct mtd_info *mtd = nand_to_mtd(&nand->base);
+ struct atmel_hsmc_nand_controller *nc;
+ u32 cfg = ATMEL_HSMC_NFC_CFG_PAGESIZE(mtd->writesize) |
+ ATMEL_HSMC_NFC_CFG_SPARESIZE(mtd->oobsize) |
+ ATMEL_HSMC_NFC_CFG_RSPARE;
+
+ nand->activecs = &nand->cs[cs];
+ nc = to_hsmc_nand_controller(nand->base.controller);
+ if (nc->cfg == cfg)
+ return;
+
+ regmap_update_bits(nc->base.smc, ATMEL_HSMC_NFC_CFG,
+ ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK |
+ ATMEL_HSMC_NFC_CFG_SPARESIZE_MASK |
+ ATMEL_HSMC_NFC_CFG_RSPARE |
+ ATMEL_HSMC_NFC_CFG_WSPARE,
+ cfg);
+ nc->cfg = cfg;
+}
+
+static int atmel_smc_nand_exec_instr(struct atmel_nand *nand,
+ const struct nand_op_instr *instr)
+{
+ struct atmel_nand_controller *nc;
+ unsigned int i;
+
+ nc = to_nand_controller(nand->base.controller);
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ writeb(instr->ctx.cmd.opcode,
+ nand->activecs->io.virt + nc->caps->cle_offs);
+ return 0;
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++)
+ writeb(instr->ctx.addr.addrs[i],
+ nand->activecs->io.virt + nc->caps->ale_offs);
+ return 0;
+ case NAND_OP_DATA_IN_INSTR:
+ atmel_nand_data_in(nand, instr->ctx.data.buf.in,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ return 0;
+ case NAND_OP_DATA_OUT_INSTR:
+ atmel_nand_data_out(nand, instr->ctx.data.buf.out,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ return 0;
+ case NAND_OP_WAITRDY_INSTR:
+ return atmel_nand_waitrdy(nand,
+ instr->ctx.waitrdy.timeout_ms);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int atmel_smc_nand_exec_op(struct atmel_nand *nand,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ unsigned int i;
+ int ret = 0;
+
+ if (check_only)
+ return 0;
+
+ atmel_nand_select_target(nand, op->cs);
+ gpiod_set_value(nand->activecs->csgpio, 0);
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = atmel_smc_nand_exec_instr(nand, &op->instrs[i]);
+ if (ret)
+ break;
+ }
+ gpiod_set_value(nand->activecs->csgpio, 1);
+
+ return ret;
+}
+
+static int atmel_hsmc_exec_cmd_addr(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct atmel_nand *nand = to_atmel_nand(chip);
+ struct atmel_hsmc_nand_controller *nc;
+ unsigned int i, j;
+
+ nc = to_hsmc_nand_controller(chip->controller);
+
+ nc->op.cs = nand->activecs->id;
+ for (i = 0; i < subop->ninstrs; i++) {
+ const struct nand_op_instr *instr = &subop->instrs[i];
+
+ if (instr->type == NAND_OP_CMD_INSTR) {
+ nc->op.cmds[nc->op.ncmds++] = instr->ctx.cmd.opcode;
+ continue;
+ }
+
+ for (j = nand_subop_get_addr_start_off(subop, i);
+ j < nand_subop_get_num_addr_cyc(subop, i); j++) {
+ nc->op.addrs[nc->op.naddrs] = instr->ctx.addr.addrs[j];
+ nc->op.naddrs++;
+ }
+ }
+
+ return atmel_nfc_exec_op(nc, true);
+}
+
+static int atmel_hsmc_exec_rw(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr = subop->instrs;
+ struct atmel_nand *nand = to_atmel_nand(chip);
+
+ if (instr->type == NAND_OP_DATA_IN_INSTR)
+ atmel_nand_data_in(nand, instr->ctx.data.buf.in,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ else
+ atmel_nand_data_out(nand, instr->ctx.data.buf.out,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+
+ return 0;
+}
+
+static int atmel_hsmc_exec_waitrdy(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr = subop->instrs;
+ struct atmel_nand *nand = to_atmel_nand(chip);
+
+ return atmel_hsmc_nand_waitrdy(nand, instr->ctx.waitrdy.timeout_ms);
+}
+
+static const struct nand_op_parser atmel_hsmc_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_cmd_addr,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 5),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true)),
+ NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_rw,
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 0)),
+ NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_rw,
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 0)),
+ NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_waitrdy,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+);
+
+static int atmel_hsmc_nand_exec_op(struct atmel_nand *nand,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ int ret;
+
+ if (check_only)
+ return nand_op_parser_exec_op(&nand->base,
+ &atmel_hsmc_op_parser, op, true);
+
+ atmel_hsmc_nand_select_target(nand, op->cs);
+ ret = nand_op_parser_exec_op(&nand->base, &atmel_hsmc_op_parser, op,
+ false);
+
+ return ret;
+}
+
+static void atmel_nfc_copy_to_sram(struct nand_chip *chip, const u8 *buf,
+ bool oob_required)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct atmel_hsmc_nand_controller *nc;
+ int ret = -EIO;
+
+ nc = to_hsmc_nand_controller(chip->controller);
+
+ if (nc->base.dmac)
+ ret = atmel_nand_dma_transfer(&nc->base, (void *)buf,
+ nc->sram.dma, mtd->writesize,
+ DMA_TO_DEVICE);
+
+ /* Falling back to CPU copy. */
+ if (ret)
+ memcpy_toio(nc->sram.virt, buf, mtd->writesize);
+
+ if (oob_required)
+ memcpy_toio(nc->sram.virt + mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
+}
+
+static void atmel_nfc_copy_from_sram(struct nand_chip *chip, u8 *buf,
+ bool oob_required)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct atmel_hsmc_nand_controller *nc;
+ int ret = -EIO;
+
+ nc = to_hsmc_nand_controller(chip->controller);
+
+ if (nc->base.dmac)
+ ret = atmel_nand_dma_transfer(&nc->base, buf, nc->sram.dma,
+ mtd->writesize, DMA_FROM_DEVICE);
+
+ /* Falling back to CPU copy. */
+ if (ret)
+ memcpy_fromio(buf, nc->sram.virt, mtd->writesize);
+
+ if (oob_required)
+ memcpy_fromio(chip->oob_poi, nc->sram.virt + mtd->writesize,
+ mtd->oobsize);
+}
+
+static void atmel_nfc_set_op_addr(struct nand_chip *chip, int page, int column)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct atmel_hsmc_nand_controller *nc;
+
+ nc = to_hsmc_nand_controller(chip->controller);
+
+ if (column >= 0) {
+ nc->op.addrs[nc->op.naddrs++] = column;
+
+ /*
+ * 2 address cycles for the column offset on large page NANDs.
+ */
+ if (mtd->writesize > 512)
+ nc->op.addrs[nc->op.naddrs++] = column >> 8;
+ }
+
+ if (page >= 0) {
+ nc->op.addrs[nc->op.naddrs++] = page;
+ nc->op.addrs[nc->op.naddrs++] = page >> 8;
+
+ if (chip->options & NAND_ROW_ADDR_3)
+ nc->op.addrs[nc->op.naddrs++] = page >> 16;
+ }
+}
+
+static int atmel_nand_pmecc_enable(struct nand_chip *chip, int op, bool raw)
+{
+ struct atmel_nand *nand = to_atmel_nand(chip);
+ struct atmel_nand_controller *nc;
+ int ret;
+
+ nc = to_nand_controller(chip->controller);
+
+ if (raw)
+ return 0;
+
+ ret = atmel_pmecc_enable(nand->pmecc, op);
+ if (ret)
+ dev_err(nc->dev,
+ "Failed to enable ECC engine (err = %d)\n", ret);
+
+ return ret;
+}
+
+static void atmel_nand_pmecc_disable(struct nand_chip *chip, bool raw)
+{
+ struct atmel_nand *nand = to_atmel_nand(chip);
+
+ if (!raw)
+ atmel_pmecc_disable(nand->pmecc);
+}
+
+static int atmel_nand_pmecc_generate_eccbytes(struct nand_chip *chip, bool raw)
+{
+ struct atmel_nand *nand = to_atmel_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct atmel_nand_controller *nc;
+ struct mtd_oob_region oobregion;
+ void *eccbuf;
+ int ret, i;
+
+ nc = to_nand_controller(chip->controller);
+
+ if (raw)
+ return 0;
+
+ ret = atmel_pmecc_wait_rdy(nand->pmecc);
+ if (ret) {
+ dev_err(nc->dev,
+ "Failed to transfer NAND page data (err = %d)\n",
+ ret);
+ return ret;
+ }
+
+ mtd_ooblayout_ecc(mtd, 0, &oobregion);
+ eccbuf = chip->oob_poi + oobregion.offset;
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ atmel_pmecc_get_generated_eccbytes(nand->pmecc, i,
+ eccbuf);
+ eccbuf += chip->ecc.bytes;
+ }
+
+ return 0;
+}
+
+static int atmel_nand_pmecc_correct_data(struct nand_chip *chip, void *buf,
+ bool raw)
+{
+ struct atmel_nand *nand = to_atmel_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct atmel_nand_controller *nc;
+ struct mtd_oob_region oobregion;
+ int ret, i, max_bitflips = 0;
+ void *databuf, *eccbuf;
+
+ nc = to_nand_controller(chip->controller);
+
+ if (raw)
+ return 0;
+
+ ret = atmel_pmecc_wait_rdy(nand->pmecc);
+ if (ret) {
+ dev_err(nc->dev,
+ "Failed to read NAND page data (err = %d)\n",
+ ret);
+ return ret;
+ }
+
+ mtd_ooblayout_ecc(mtd, 0, &oobregion);
+ eccbuf = chip->oob_poi + oobregion.offset;
+ databuf = buf;
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ ret = atmel_pmecc_correct_sector(nand->pmecc, i, databuf,
+ eccbuf);
+ if (ret < 0 && !atmel_pmecc_correct_erased_chunks(nand->pmecc))
+ ret = nand_check_erased_ecc_chunk(databuf,
+ chip->ecc.size,
+ eccbuf,
+ chip->ecc.bytes,
+ NULL, 0,
+ chip->ecc.strength);
+
+ if (ret >= 0) {
+ mtd->ecc_stats.corrected += ret;
+ max_bitflips = max(ret, max_bitflips);
+ } else {
+ mtd->ecc_stats.failed++;
+ }
+
+ databuf += chip->ecc.size;
+ eccbuf += chip->ecc.bytes;
+ }
+
+ return max_bitflips;
+}
+
+static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf,
+ bool oob_required, int page, bool raw)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct atmel_nand *nand = to_atmel_nand(chip);
+ int ret;
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
+ ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
+ if (ret)
+ return ret;
+
+ nand_write_data_op(chip, buf, mtd->writesize, false);
+
+ ret = atmel_nand_pmecc_generate_eccbytes(chip, raw);
+ if (ret) {
+ atmel_pmecc_disable(nand->pmecc);
+ return ret;
+ }
+
+ atmel_nand_pmecc_disable(chip, raw);
+
+ nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int atmel_nand_pmecc_write_page(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ return atmel_nand_pmecc_write_pg(chip, buf, oob_required, page, false);
+}
+
+static int atmel_nand_pmecc_write_page_raw(struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ return atmel_nand_pmecc_write_pg(chip, buf, oob_required, page, true);
+}
+
+static int atmel_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
+ bool oob_required, int page, bool raw)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
+ if (ret)
+ return ret;
+
+ ret = nand_read_data_op(chip, buf, mtd->writesize, false, false);
+ if (ret)
+ goto out_disable;
+
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false, false);
+ if (ret)
+ goto out_disable;
+
+ ret = atmel_nand_pmecc_correct_data(chip, buf, raw);
+
+out_disable:
+ atmel_nand_pmecc_disable(chip, raw);
+
+ return ret;
+}
+
+static int atmel_nand_pmecc_read_page(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, false);
+}
+
+static int atmel_nand_pmecc_read_page_raw(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, true);
+}
+
+static int atmel_hsmc_nand_pmecc_write_pg(struct nand_chip *chip,
+ const u8 *buf, bool oob_required,
+ int page, bool raw)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct atmel_nand *nand = to_atmel_nand(chip);
+ struct atmel_hsmc_nand_controller *nc;
+ int ret;
+
+ atmel_hsmc_nand_select_target(nand, chip->cur_cs);
+ nc = to_hsmc_nand_controller(chip->controller);
+
+ atmel_nfc_copy_to_sram(chip, buf, false);
+
+ nc->op.cmds[0] = NAND_CMD_SEQIN;
+ nc->op.ncmds = 1;
+ atmel_nfc_set_op_addr(chip, page, 0x0);
+ nc->op.cs = nand->activecs->id;
+ nc->op.data = ATMEL_NFC_WRITE_DATA;
+
+ ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
+ if (ret)
+ return ret;
+
+ ret = atmel_nfc_exec_op(nc, false);
+ if (ret) {
+ atmel_nand_pmecc_disable(chip, raw);
+ dev_err(nc->base.dev,
+ "Failed to transfer NAND page data (err = %d)\n",
+ ret);
+ return ret;
+ }
+
+ ret = atmel_nand_pmecc_generate_eccbytes(chip, raw);
+
+ atmel_nand_pmecc_disable(chip, raw);
+
+ if (ret)
+ return ret;
+
+ nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int atmel_hsmc_nand_pmecc_write_page(struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ return atmel_hsmc_nand_pmecc_write_pg(chip, buf, oob_required, page,
+ false);
+}
+
+static int atmel_hsmc_nand_pmecc_write_page_raw(struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ return atmel_hsmc_nand_pmecc_write_pg(chip, buf, oob_required, page,
+ true);
+}
+
+static int atmel_hsmc_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
+ bool oob_required, int page,
+ bool raw)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct atmel_nand *nand = to_atmel_nand(chip);
+ struct atmel_hsmc_nand_controller *nc;
+ int ret;
+
+ atmel_hsmc_nand_select_target(nand, chip->cur_cs);
+ nc = to_hsmc_nand_controller(chip->controller);
+
+ /*
+ * Optimized read page accessors only work when the NAND R/B pin is
+ * connected to a native SoC R/B pin. If that's not the case, fallback
+ * to the non-optimized one.
+ */
+ if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB)
+ return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page,
+ raw);
+
+ nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READ0;
+
+ if (mtd->writesize > 512)
+ nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READSTART;
+
+ atmel_nfc_set_op_addr(chip, page, 0x0);
+ nc->op.cs = nand->activecs->id;
+ nc->op.data = ATMEL_NFC_READ_DATA;
+
+ ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
+ if (ret)
+ return ret;
+
+ ret = atmel_nfc_exec_op(nc, false);
+ if (ret) {
+ atmel_nand_pmecc_disable(chip, raw);
+ dev_err(nc->base.dev,
+ "Failed to load NAND page data (err = %d)\n",
+ ret);
+ return ret;
+ }
+
+ atmel_nfc_copy_from_sram(chip, buf, true);
+
+ ret = atmel_nand_pmecc_correct_data(chip, buf, raw);
+
+ atmel_nand_pmecc_disable(chip, raw);
+
+ return ret;
+}
+
+static int atmel_hsmc_nand_pmecc_read_page(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ return atmel_hsmc_nand_pmecc_read_pg(chip, buf, oob_required, page,
+ false);
+}
+
+static int atmel_hsmc_nand_pmecc_read_page_raw(struct nand_chip *chip,
+ u8 *buf, int oob_required,
+ int page)
+{
+ return atmel_hsmc_nand_pmecc_read_pg(chip, buf, oob_required, page,
+ true);
+}
+
+static int atmel_nand_pmecc_init(struct nand_chip *chip)
+{
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&chip->base);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_device *nanddev = mtd_to_nanddev(mtd);
+ struct atmel_nand *nand = to_atmel_nand(chip);
+ struct atmel_nand_controller *nc;
+ struct atmel_pmecc_user_req req;
+
+ nc = to_nand_controller(chip->controller);
+
+ if (!nc->pmecc) {
+ dev_err(nc->dev, "HW ECC not supported\n");
+ return -ENOTSUPP;
+ }
+
+ if (nc->caps->legacy_of_bindings) {
+ u32 val;
+
+ if (!of_property_read_u32(nc->dev->of_node, "atmel,pmecc-cap",
+ &val))
+ chip->ecc.strength = val;
+
+ if (!of_property_read_u32(nc->dev->of_node,
+ "atmel,pmecc-sector-size",
+ &val))
+ chip->ecc.size = val;
+ }
+
+ if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH)
+ req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
+ else if (chip->ecc.strength)
+ req.ecc.strength = chip->ecc.strength;
+ else if (requirements->strength)
+ req.ecc.strength = requirements->strength;
+ else
+ req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
+
+ if (chip->ecc.size)
+ req.ecc.sectorsize = chip->ecc.size;
+ else if (requirements->step_size)
+ req.ecc.sectorsize = requirements->step_size;
+ else
+ req.ecc.sectorsize = ATMEL_PMECC_SECTOR_SIZE_AUTO;
+
+ req.pagesize = mtd->writesize;
+ req.oobsize = mtd->oobsize;
+
+ if (mtd->writesize <= 512) {
+ req.ecc.bytes = 4;
+ req.ecc.ooboffset = 0;
+ } else {
+ req.ecc.bytes = mtd->oobsize - 2;
+ req.ecc.ooboffset = ATMEL_PMECC_OOBOFFSET_AUTO;
+ }
+
+ nand->pmecc = atmel_pmecc_create_user(nc->pmecc, &req);
+ if (IS_ERR(nand->pmecc))
+ return PTR_ERR(nand->pmecc);
+
+ chip->ecc.algo = NAND_ECC_ALGO_BCH;
+ chip->ecc.size = req.ecc.sectorsize;
+ chip->ecc.bytes = req.ecc.bytes / req.ecc.nsectors;
+ chip->ecc.strength = req.ecc.strength;
+
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+
+ mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
+
+ return 0;
+}
+
+static int atmel_nand_ecc_init(struct nand_chip *chip)
+{
+ struct atmel_nand_controller *nc;
+ int ret;
+
+ nc = to_nand_controller(chip->controller);
+
+ switch (chip->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ /*
+ * Nothing to do, the core will initialize everything for us.
+ */
+ break;
+
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ ret = atmel_nand_pmecc_init(chip);
+ if (ret)
+ return ret;
+
+ chip->ecc.read_page = atmel_nand_pmecc_read_page;
+ chip->ecc.write_page = atmel_nand_pmecc_write_page;
+ chip->ecc.read_page_raw = atmel_nand_pmecc_read_page_raw;
+ chip->ecc.write_page_raw = atmel_nand_pmecc_write_page_raw;
+ break;
+
+ default:
+ /* Other modes are not supported. */
+ dev_err(nc->dev, "Unsupported ECC mode: %d\n",
+ chip->ecc.engine_type);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int atmel_hsmc_nand_ecc_init(struct nand_chip *chip)
+{
+ int ret;
+
+ ret = atmel_nand_ecc_init(chip);
+ if (ret)
+ return ret;
+
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ /* Adjust the ECC operations for the HSMC IP. */
+ chip->ecc.read_page = atmel_hsmc_nand_pmecc_read_page;
+ chip->ecc.write_page = atmel_hsmc_nand_pmecc_write_page;
+ chip->ecc.read_page_raw = atmel_hsmc_nand_pmecc_read_page_raw;
+ chip->ecc.write_page_raw = atmel_hsmc_nand_pmecc_write_page_raw;
+
+ return 0;
+}
+
+static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
+ const struct nand_interface_config *conf,
+ struct atmel_smc_cs_conf *smcconf)
+{
+ u32 ncycles, totalcycles, timeps, mckperiodps;
+ struct atmel_nand_controller *nc;
+ int ret;
+
+ nc = to_nand_controller(nand->base.controller);
+
+ /* DDR interface not supported. */
+ if (!nand_interface_is_sdr(conf))
+ return -ENOTSUPP;
+
+ /*
+ * tRC < 30ns implies EDO mode. This controller does not support this
+ * mode.
+ */
+ if (conf->timings.sdr.tRC_min < 30000)
+ return -ENOTSUPP;
+
+ atmel_smc_cs_conf_init(smcconf);
+
+ mckperiodps = NSEC_PER_SEC / clk_get_rate(nc->mck);
+ mckperiodps *= 1000;
+
+ /*
+ * Set write pulse timing. This one is easy to extract:
+ *
+ * NWE_PULSE = tWP
+ */
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tWP_min, mckperiodps);
+ totalcycles = ncycles;
+ ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NWE_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ /*
+ * The write setup timing depends on the operation done on the NAND.
+ * All operations goes through the same data bus, but the operation
+ * type depends on the address we are writing to (ALE/CLE address
+ * lines).
+ * Since we have no way to differentiate the different operations at
+ * the SMC level, we must consider the worst case (the biggest setup
+ * time among all operation types):
+ *
+ * NWE_SETUP = max(tCLS, tCS, tALS, tDS) - NWE_PULSE
+ */
+ timeps = max3(conf->timings.sdr.tCLS_min, conf->timings.sdr.tCS_min,
+ conf->timings.sdr.tALS_min);
+ timeps = max(timeps, conf->timings.sdr.tDS_min);
+ ncycles = DIV_ROUND_UP(timeps, mckperiodps);
+ ncycles = ncycles > totalcycles ? ncycles - totalcycles : 0;
+ totalcycles += ncycles;
+ ret = atmel_smc_cs_conf_set_setup(smcconf, ATMEL_SMC_NWE_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ /*
+ * As for the write setup timing, the write hold timing depends on the
+ * operation done on the NAND:
+ *
+ * NWE_HOLD = max(tCLH, tCH, tALH, tDH, tWH)
+ */
+ timeps = max3(conf->timings.sdr.tCLH_min, conf->timings.sdr.tCH_min,
+ conf->timings.sdr.tALH_min);
+ timeps = max3(timeps, conf->timings.sdr.tDH_min,
+ conf->timings.sdr.tWH_min);
+ ncycles = DIV_ROUND_UP(timeps, mckperiodps);
+ totalcycles += ncycles;
+
+ /*
+ * The write cycle timing is directly matching tWC, but is also
+ * dependent on the other timings on the setup and hold timings we
+ * calculated earlier, which gives:
+ *
+ * NWE_CYCLE = max(tWC, NWE_SETUP + NWE_PULSE + NWE_HOLD)
+ */
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tWC_min, mckperiodps);
+ ncycles = max(totalcycles, ncycles);
+ ret = atmel_smc_cs_conf_set_cycle(smcconf, ATMEL_SMC_NWE_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ /*
+ * We don't want the CS line to be toggled between each byte/word
+ * transfer to the NAND. The only way to guarantee that is to have the
+ * NCS_{WR,RD}_{SETUP,HOLD} timings set to 0, which in turn means:
+ *
+ * NCS_WR_PULSE = NWE_CYCLE
+ */
+ ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NCS_WR_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ /*
+ * As for the write setup timing, the read hold timing depends on the
+ * operation done on the NAND:
+ *
+ * NRD_HOLD = max(tREH, tRHOH)
+ */
+ timeps = max(conf->timings.sdr.tREH_min, conf->timings.sdr.tRHOH_min);
+ ncycles = DIV_ROUND_UP(timeps, mckperiodps);
+ totalcycles = ncycles;
+
+ /*
+ * TDF = tRHZ - NRD_HOLD
+ */
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tRHZ_max, mckperiodps);
+ ncycles -= totalcycles;
+
+ /*
+ * In ONFI 4.0 specs, tRHZ has been increased to support EDO NANDs and
+ * we might end up with a config that does not fit in the TDF field.
+ * Just take the max value in this case and hope that the NAND is more
+ * tolerant than advertised.
+ */
+ if (ncycles > ATMEL_SMC_MODE_TDF_MAX)
+ ncycles = ATMEL_SMC_MODE_TDF_MAX;
+ else if (ncycles < ATMEL_SMC_MODE_TDF_MIN)
+ ncycles = ATMEL_SMC_MODE_TDF_MIN;
+
+ smcconf->mode |= ATMEL_SMC_MODE_TDF(ncycles) |
+ ATMEL_SMC_MODE_TDFMODE_OPTIMIZED;
+
+ /*
+ * Read pulse timing directly matches tRP:
+ *
+ * NRD_PULSE = tRP
+ */
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tRP_min, mckperiodps);
+ totalcycles += ncycles;
+ ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NRD_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ /*
+ * The write cycle timing is directly matching tWC, but is also
+ * dependent on the setup and hold timings we calculated earlier,
+ * which gives:
+ *
+ * NRD_CYCLE = max(tRC, NRD_PULSE + NRD_HOLD)
+ *
+ * NRD_SETUP is always 0.
+ */
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tRC_min, mckperiodps);
+ ncycles = max(totalcycles, ncycles);
+ ret = atmel_smc_cs_conf_set_cycle(smcconf, ATMEL_SMC_NRD_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ /*
+ * We don't want the CS line to be toggled between each byte/word
+ * transfer from the NAND. The only way to guarantee that is to have
+ * the NCS_{WR,RD}_{SETUP,HOLD} timings set to 0, which in turn means:
+ *
+ * NCS_RD_PULSE = NRD_CYCLE
+ */
+ ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NCS_RD_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ /* Txxx timings are directly matching tXXX ones. */
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tCLR_min, mckperiodps);
+ ret = atmel_smc_cs_conf_set_timing(smcconf,
+ ATMEL_HSMC_TIMINGS_TCLR_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tADL_min, mckperiodps);
+ ret = atmel_smc_cs_conf_set_timing(smcconf,
+ ATMEL_HSMC_TIMINGS_TADL_SHIFT,
+ ncycles);
+ /*
+ * Version 4 of the ONFI spec mandates that tADL be at least 400
+ * nanoseconds, but, depending on the master clock rate, 400 ns may not
+ * fit in the tADL field of the SMC reg. We need to relax the check and
+ * accept the -ERANGE return code.
+ *
+ * Note that previous versions of the ONFI spec had a lower tADL_min
+ * (100 or 200 ns). It's not clear why this timing constraint got
+ * increased but it seems most NANDs are fine with values lower than
+ * 400ns, so we should be safe.
+ */
+ if (ret && ret != -ERANGE)
+ return ret;
+
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tAR_min, mckperiodps);
+ ret = atmel_smc_cs_conf_set_timing(smcconf,
+ ATMEL_HSMC_TIMINGS_TAR_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tRR_min, mckperiodps);
+ ret = atmel_smc_cs_conf_set_timing(smcconf,
+ ATMEL_HSMC_TIMINGS_TRR_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ ncycles = DIV_ROUND_UP(conf->timings.sdr.tWB_max, mckperiodps);
+ ret = atmel_smc_cs_conf_set_timing(smcconf,
+ ATMEL_HSMC_TIMINGS_TWB_SHIFT,
+ ncycles);
+ if (ret)
+ return ret;
+
+ /* Attach the CS line to the NFC logic. */
+ smcconf->timings |= ATMEL_HSMC_TIMINGS_NFSEL;
+
+ /* Set the appropriate data bus width. */
+ if (nand->base.options & NAND_BUSWIDTH_16)
+ smcconf->mode |= ATMEL_SMC_MODE_DBW_16;
+
+ /* Operate in NRD/NWE READ/WRITEMODE. */
+ smcconf->mode |= ATMEL_SMC_MODE_READMODE_NRD |
+ ATMEL_SMC_MODE_WRITEMODE_NWE;
+
+ return 0;
+}
+
+static int atmel_smc_nand_setup_interface(struct atmel_nand *nand,
+ int csline,
+ const struct nand_interface_config *conf)
+{
+ struct atmel_nand_controller *nc;
+ struct atmel_smc_cs_conf smcconf;
+ struct atmel_nand_cs *cs;
+ int ret;
+
+ nc = to_nand_controller(nand->base.controller);
+
+ ret = atmel_smc_nand_prepare_smcconf(nand, conf, &smcconf);
+ if (ret)
+ return ret;
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ cs = &nand->cs[csline];
+ cs->smcconf = smcconf;
+ atmel_smc_cs_conf_apply(nc->smc, cs->id, &cs->smcconf);
+
+ return 0;
+}
+
+static int atmel_hsmc_nand_setup_interface(struct atmel_nand *nand,
+ int csline,
+ const struct nand_interface_config *conf)
+{
+ struct atmel_hsmc_nand_controller *nc;
+ struct atmel_smc_cs_conf smcconf;
+ struct atmel_nand_cs *cs;
+ int ret;
+
+ nc = to_hsmc_nand_controller(nand->base.controller);
+
+ ret = atmel_smc_nand_prepare_smcconf(nand, conf, &smcconf);
+ if (ret)
+ return ret;
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ cs = &nand->cs[csline];
+ cs->smcconf = smcconf;
+
+ if (cs->rb.type == ATMEL_NAND_NATIVE_RB)
+ cs->smcconf.timings |= ATMEL_HSMC_TIMINGS_RBNSEL(cs->rb.id);
+
+ atmel_hsmc_cs_conf_apply(nc->base.smc, nc->hsmc_layout, cs->id,
+ &cs->smcconf);
+
+ return 0;
+}
+
+static int atmel_nand_setup_interface(struct nand_chip *chip, int csline,
+ const struct nand_interface_config *conf)
+{
+ struct atmel_nand *nand = to_atmel_nand(chip);
+ const struct nand_sdr_timings *sdr;
+ struct atmel_nand_controller *nc;
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ nc = to_nand_controller(nand->base.controller);
+
+ if (csline >= nand->numcs ||
+ (csline < 0 && csline != NAND_DATA_IFACE_CHECK_ONLY))
+ return -EINVAL;
+
+ return nc->caps->ops->setup_interface(nand, csline, conf);
+}
+
+static int atmel_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct atmel_nand *nand = to_atmel_nand(chip);
+ struct atmel_nand_controller *nc;
+
+ nc = to_nand_controller(nand->base.controller);
+
+ return nc->caps->ops->exec_op(nand, op, check_only);
+}
+
+static void atmel_nand_init(struct atmel_nand_controller *nc,
+ struct atmel_nand *nand)
+{
+ struct nand_chip *chip = &nand->base;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ mtd->dev.parent = nc->dev;
+ nand->base.controller = &nc->base;
+
+ if (!nc->mck || !nc->caps->ops->setup_interface)
+ chip->options |= NAND_KEEP_TIMINGS;
+
+ /*
+ * Use a bounce buffer when the buffer passed by the MTD user is not
+ * suitable for DMA.
+ */
+ if (nc->dmac)
+ chip->options |= NAND_USES_DMA;
+
+ /* Default to HW ECC if pmecc is available. */
+ if (nc->pmecc)
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+}
+
+static void atmel_smc_nand_init(struct atmel_nand_controller *nc,
+ struct atmel_nand *nand)
+{
+ struct nand_chip *chip = &nand->base;
+ struct atmel_smc_nand_controller *smc_nc;
+ int i;
+
+ atmel_nand_init(nc, nand);
+
+ smc_nc = to_smc_nand_controller(chip->controller);
+ if (!smc_nc->ebi_csa_regmap)
+ return;
+
+ /* Attach the CS to the NAND Flash logic. */
+ for (i = 0; i < nand->numcs; i++)
+ regmap_update_bits(smc_nc->ebi_csa_regmap,
+ smc_nc->ebi_csa->offs,
+ BIT(nand->cs[i].id), BIT(nand->cs[i].id));
+
+ if (smc_nc->ebi_csa->nfd0_on_d16)
+ regmap_update_bits(smc_nc->ebi_csa_regmap,
+ smc_nc->ebi_csa->offs,
+ smc_nc->ebi_csa->nfd0_on_d16,
+ smc_nc->ebi_csa->nfd0_on_d16);
+}
+
+static int atmel_nand_controller_remove_nand(struct atmel_nand *nand)
+{
+ struct nand_chip *chip = &nand->base;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = mtd_device_unregister(mtd);
+ if (ret)
+ return ret;
+
+ nand_cleanup(chip);
+ list_del(&nand->node);
+
+ return 0;
+}
+
+static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc,
+ struct device_node *np,
+ int reg_cells)
+{
+ struct atmel_nand *nand;
+ struct gpio_desc *gpio;
+ int numcs, ret, i;
+
+ numcs = of_property_count_elems_of_size(np, "reg",
+ reg_cells * sizeof(u32));
+ if (numcs < 1) {
+ dev_err(nc->dev, "Missing or invalid reg property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ nand = devm_kzalloc(nc->dev, struct_size(nand, cs, numcs), GFP_KERNEL);
+ if (!nand)
+ return ERR_PTR(-ENOMEM);
+
+ nand->numcs = numcs;
+
+ gpio = devm_fwnode_gpiod_get(nc->dev, of_fwnode_handle(np),
+ "det", GPIOD_IN, "nand-det");
+ if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
+ dev_err(nc->dev,
+ "Failed to get detect gpio (err = %ld)\n",
+ PTR_ERR(gpio));
+ return ERR_CAST(gpio);
+ }
+
+ if (!IS_ERR(gpio))
+ nand->cdgpio = gpio;
+
+ for (i = 0; i < numcs; i++) {
+ struct resource res;
+ u32 val;
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret) {
+ dev_err(nc->dev, "Invalid reg property (err = %d)\n",
+ ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = of_property_read_u32_index(np, "reg", i * reg_cells,
+ &val);
+ if (ret) {
+ dev_err(nc->dev, "Invalid reg property (err = %d)\n",
+ ret);
+ return ERR_PTR(ret);
+ }
+
+ nand->cs[i].id = val;
+
+ nand->cs[i].io.dma = res.start;
+ nand->cs[i].io.virt = devm_ioremap_resource(nc->dev, &res);
+ if (IS_ERR(nand->cs[i].io.virt))
+ return ERR_CAST(nand->cs[i].io.virt);
+
+ if (!of_property_read_u32(np, "atmel,rb", &val)) {
+ if (val > ATMEL_NFC_MAX_RB_ID)
+ return ERR_PTR(-EINVAL);
+
+ nand->cs[i].rb.type = ATMEL_NAND_NATIVE_RB;
+ nand->cs[i].rb.id = val;
+ } else {
+ gpio = devm_fwnode_gpiod_get_index(nc->dev,
+ of_fwnode_handle(np),
+ "rb", i, GPIOD_IN,
+ "nand-rb");
+ if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
+ dev_err(nc->dev,
+ "Failed to get R/B gpio (err = %ld)\n",
+ PTR_ERR(gpio));
+ return ERR_CAST(gpio);
+ }
+
+ if (!IS_ERR(gpio)) {
+ nand->cs[i].rb.type = ATMEL_NAND_GPIO_RB;
+ nand->cs[i].rb.gpio = gpio;
+ }
+ }
+
+ gpio = devm_fwnode_gpiod_get_index(nc->dev,
+ of_fwnode_handle(np),
+ "cs", i, GPIOD_OUT_HIGH,
+ "nand-cs");
+ if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
+ dev_err(nc->dev,
+ "Failed to get CS gpio (err = %ld)\n",
+ PTR_ERR(gpio));
+ return ERR_CAST(gpio);
+ }
+
+ if (!IS_ERR(gpio))
+ nand->cs[i].csgpio = gpio;
+ }
+
+ nand_set_flash_node(&nand->base, np);
+
+ return nand;
+}
+
+static int
+atmel_nand_controller_add_nand(struct atmel_nand_controller *nc,
+ struct atmel_nand *nand)
+{
+ struct nand_chip *chip = &nand->base;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ /* No card inserted, skip this NAND. */
+ if (nand->cdgpio && gpiod_get_value(nand->cdgpio)) {
+ dev_info(nc->dev, "No SmartMedia card inserted.\n");
+ return 0;
+ }
+
+ nc->caps->ops->nand_init(nc, nand);
+
+ ret = nand_scan(chip, nand->numcs);
+ if (ret) {
+ dev_err(nc->dev, "NAND scan failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(nc->dev, "Failed to register mtd device: %d\n", ret);
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ list_add_tail(&nand->node, &nc->chips);
+
+ return 0;
+}
+
+static int
+atmel_nand_controller_remove_nands(struct atmel_nand_controller *nc)
+{
+ struct atmel_nand *nand, *tmp;
+ int ret;
+
+ list_for_each_entry_safe(nand, tmp, &nc->chips, node) {
+ ret = atmel_nand_controller_remove_nand(nand);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller *nc)
+{
+ struct device *dev = nc->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct atmel_nand *nand;
+ struct gpio_desc *gpio;
+ struct resource *res;
+
+ /*
+ * Legacy bindings only allow connecting a single NAND with a unique CS
+ * line to the controller.
+ */
+ nand = devm_kzalloc(nc->dev, sizeof(*nand) + sizeof(*nand->cs),
+ GFP_KERNEL);
+ if (!nand)
+ return -ENOMEM;
+
+ nand->numcs = 1;
+
+ nand->cs[0].io.virt = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(nand->cs[0].io.virt))
+ return PTR_ERR(nand->cs[0].io.virt);
+
+ nand->cs[0].io.dma = res->start;
+
+ /*
+ * The old driver was hardcoding the CS id to 3 for all sama5
+ * controllers. Since this id is only meaningful for the sama5
+ * controller we can safely assign this id to 3 no matter the
+ * controller.
+ * If one wants to connect a NAND to a different CS line, he will
+ * have to use the new bindings.
+ */
+ nand->cs[0].id = 3;
+
+ /* R/B GPIO. */
+ gpio = devm_gpiod_get_index_optional(dev, NULL, 0, GPIOD_IN);
+ if (IS_ERR(gpio)) {
+ dev_err(dev, "Failed to get R/B gpio (err = %ld)\n",
+ PTR_ERR(gpio));
+ return PTR_ERR(gpio);
+ }
+
+ if (gpio) {
+ nand->cs[0].rb.type = ATMEL_NAND_GPIO_RB;
+ nand->cs[0].rb.gpio = gpio;
+ }
+
+ /* CS GPIO. */
+ gpio = devm_gpiod_get_index_optional(dev, NULL, 1, GPIOD_OUT_HIGH);
+ if (IS_ERR(gpio)) {
+ dev_err(dev, "Failed to get CS gpio (err = %ld)\n",
+ PTR_ERR(gpio));
+ return PTR_ERR(gpio);
+ }
+
+ nand->cs[0].csgpio = gpio;
+
+ /* Card detect GPIO. */
+ gpio = devm_gpiod_get_index_optional(nc->dev, NULL, 2, GPIOD_IN);
+ if (IS_ERR(gpio)) {
+ dev_err(dev,
+ "Failed to get detect gpio (err = %ld)\n",
+ PTR_ERR(gpio));
+ return PTR_ERR(gpio);
+ }
+
+ nand->cdgpio = gpio;
+
+ nand_set_flash_node(&nand->base, nc->dev->of_node);
+
+ return atmel_nand_controller_add_nand(nc, nand);
+}
+
+static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc)
+{
+ struct device_node *np, *nand_np;
+ struct device *dev = nc->dev;
+ int ret, reg_cells;
+ u32 val;
+
+ /* We do not retrieve the SMC syscon when parsing old DTs. */
+ if (nc->caps->legacy_of_bindings)
+ return atmel_nand_controller_legacy_add_nands(nc);
+
+ np = dev->of_node;
+
+ ret = of_property_read_u32(np, "#address-cells", &val);
+ if (ret) {
+ dev_err(dev, "missing #address-cells property\n");
+ return ret;
+ }
+
+ reg_cells = val;
+
+ ret = of_property_read_u32(np, "#size-cells", &val);
+ if (ret) {
+ dev_err(dev, "missing #size-cells property\n");
+ return ret;
+ }
+
+ reg_cells += val;
+
+ for_each_child_of_node(np, nand_np) {
+ struct atmel_nand *nand;
+
+ nand = atmel_nand_create(nc, nand_np, reg_cells);
+ if (IS_ERR(nand)) {
+ ret = PTR_ERR(nand);
+ goto err;
+ }
+
+ ret = atmel_nand_controller_add_nand(nc, nand);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ atmel_nand_controller_remove_nands(nc);
+
+ return ret;
+}
+
+static void atmel_nand_controller_cleanup(struct atmel_nand_controller *nc)
+{
+ if (nc->dmac)
+ dma_release_channel(nc->dmac);
+
+ clk_put(nc->mck);
+}
+
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9260_ebi_csa = {
+ .offs = AT91SAM9260_MATRIX_EBICSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9261_ebi_csa = {
+ .offs = AT91SAM9261_MATRIX_EBICSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9263_ebi_csa = {
+ .offs = AT91SAM9263_MATRIX_EBI0CSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9rl_ebi_csa = {
+ .offs = AT91SAM9RL_MATRIX_EBICSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9g45_ebi_csa = {
+ .offs = AT91SAM9G45_MATRIX_EBICSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9n12_ebi_csa = {
+ .offs = AT91SAM9N12_MATRIX_EBICSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg at91sam9x5_ebi_csa = {
+ .offs = AT91SAM9X5_MATRIX_EBICSA,
+};
+
+static const struct atmel_smc_nand_ebi_csa_cfg sam9x60_ebi_csa = {
+ .offs = AT91_SFR_CCFG_EBICSA,
+ .nfd0_on_d16 = AT91_SFR_CCFG_NFD0_ON_D16,
+};
+
+static const struct of_device_id __maybe_unused atmel_ebi_csa_regmap_of_ids[] = {
+ {
+ .compatible = "atmel,at91sam9260-matrix",
+ .data = &at91sam9260_ebi_csa,
+ },
+ {
+ .compatible = "atmel,at91sam9261-matrix",
+ .data = &at91sam9261_ebi_csa,
+ },
+ {
+ .compatible = "atmel,at91sam9263-matrix",
+ .data = &at91sam9263_ebi_csa,
+ },
+ {
+ .compatible = "atmel,at91sam9rl-matrix",
+ .data = &at91sam9rl_ebi_csa,
+ },
+ {
+ .compatible = "atmel,at91sam9g45-matrix",
+ .data = &at91sam9g45_ebi_csa,
+ },
+ {
+ .compatible = "atmel,at91sam9n12-matrix",
+ .data = &at91sam9n12_ebi_csa,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-matrix",
+ .data = &at91sam9x5_ebi_csa,
+ },
+ {
+ .compatible = "microchip,sam9x60-sfr",
+ .data = &sam9x60_ebi_csa,
+ },
+ { /* sentinel */ },
+};
+
+static int atmel_nand_attach_chip(struct nand_chip *chip)
+{
+ struct atmel_nand_controller *nc = to_nand_controller(chip->controller);
+ struct atmel_nand *nand = to_atmel_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = nc->caps->ops->ecc_init(chip);
+ if (ret)
+ return ret;
+
+ if (nc->caps->legacy_of_bindings || !nc->dev->of_node) {
+ /*
+ * We keep the MTD name unchanged to avoid breaking platforms
+ * where the MTD cmdline parser is used and the bootloader
+ * has not been updated to use the new naming scheme.
+ */
+ mtd->name = "atmel_nand";
+ } else if (!mtd->name) {
+ /*
+ * If the new bindings are used and the bootloader has not been
+ * updated to pass a new mtdparts parameter on the cmdline, you
+ * should define the following property in your nand node:
+ *
+ * label = "atmel_nand";
+ *
+ * This way, mtd->name will be set by the core when
+ * nand_set_flash_node() is called.
+ */
+ mtd->name = devm_kasprintf(nc->dev, GFP_KERNEL,
+ "%s:nand.%d", dev_name(nc->dev),
+ nand->cs[0].id);
+ if (!mtd->name) {
+ dev_err(nc->dev, "Failed to allocate mtd->name\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops atmel_nand_controller_ops = {
+ .attach_chip = atmel_nand_attach_chip,
+ .setup_interface = atmel_nand_setup_interface,
+ .exec_op = atmel_nand_exec_op,
+};
+
+static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
+ struct platform_device *pdev,
+ const struct atmel_nand_controller_caps *caps)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ nand_controller_init(&nc->base);
+ nc->base.ops = &atmel_nand_controller_ops;
+ INIT_LIST_HEAD(&nc->chips);
+ nc->dev = dev;
+ nc->caps = caps;
+
+ platform_set_drvdata(pdev, nc);
+
+ nc->pmecc = devm_atmel_pmecc_get(dev);
+ if (IS_ERR(nc->pmecc))
+ return dev_err_probe(dev, PTR_ERR(nc->pmecc),
+ "Could not get PMECC object\n");
+
+ if (nc->caps->has_dma && !atmel_nand_avoid_dma) {
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ nc->dmac = dma_request_channel(mask, NULL, NULL);
+ if (!nc->dmac)
+ dev_err(nc->dev, "Failed to request DMA channel\n");
+ }
+
+ /* We do not retrieve the SMC syscon when parsing old DTs. */
+ if (nc->caps->legacy_of_bindings)
+ return 0;
+
+ nc->mck = of_clk_get(dev->parent->of_node, 0);
+ if (IS_ERR(nc->mck)) {
+ dev_err(dev, "Failed to retrieve MCK clk\n");
+ ret = PTR_ERR(nc->mck);
+ goto out_release_dma;
+ }
+
+ np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0);
+ if (!np) {
+ dev_err(dev, "Missing or invalid atmel,smc property\n");
+ ret = -EINVAL;
+ goto out_release_dma;
+ }
+
+ nc->smc = syscon_node_to_regmap(np);
+ of_node_put(np);
+ if (IS_ERR(nc->smc)) {
+ ret = PTR_ERR(nc->smc);
+ dev_err(dev, "Could not get SMC regmap (err = %d)\n", ret);
+ goto out_release_dma;
+ }
+
+ return 0;
+
+out_release_dma:
+ if (nc->dmac)
+ dma_release_channel(nc->dmac);
+
+ return ret;
+}
+
+static int
+atmel_smc_nand_controller_init(struct atmel_smc_nand_controller *nc)
+{
+ struct device *dev = nc->base.dev;
+ const struct of_device_id *match;
+ struct device_node *np;
+ int ret;
+
+ /* We do not retrieve the EBICSA regmap when parsing old DTs. */
+ if (nc->base.caps->legacy_of_bindings)
+ return 0;
+
+ np = of_parse_phandle(dev->parent->of_node,
+ nc->base.caps->ebi_csa_regmap_name, 0);
+ if (!np)
+ return 0;
+
+ match = of_match_node(atmel_ebi_csa_regmap_of_ids, np);
+ if (!match) {
+ of_node_put(np);
+ return 0;
+ }
+
+ nc->ebi_csa_regmap = syscon_node_to_regmap(np);
+ of_node_put(np);
+ if (IS_ERR(nc->ebi_csa_regmap)) {
+ ret = PTR_ERR(nc->ebi_csa_regmap);
+ dev_err(dev, "Could not get EBICSA regmap (err = %d)\n", ret);
+ return ret;
+ }
+
+ nc->ebi_csa = (struct atmel_smc_nand_ebi_csa_cfg *)match->data;
+
+ /*
+ * The at91sam9263 has 2 EBIs, if the NAND controller is under EBI1
+ * add 4 to ->ebi_csa->offs.
+ */
+ if (of_device_is_compatible(dev->parent->of_node,
+ "atmel,at91sam9263-ebi1"))
+ nc->ebi_csa->offs += 4;
+
+ return 0;
+}
+
+static int
+atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc)
+{
+ struct regmap_config regmap_conf = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ };
+
+ struct device *dev = nc->base.dev;
+ struct device_node *nand_np, *nfc_np;
+ void __iomem *iomem;
+ struct resource res;
+ int ret;
+
+ nand_np = dev->of_node;
+ nfc_np = of_get_compatible_child(dev->of_node, "atmel,sama5d3-nfc");
+ if (!nfc_np) {
+ dev_err(dev, "Could not find device node for sama5d3-nfc\n");
+ return -ENODEV;
+ }
+
+ nc->clk = of_clk_get(nfc_np, 0);
+ if (IS_ERR(nc->clk)) {
+ ret = PTR_ERR(nc->clk);
+ dev_err(dev, "Failed to retrieve HSMC clock (err = %d)\n",
+ ret);
+ goto out;
+ }
+
+ ret = clk_prepare_enable(nc->clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable the HSMC clock (err = %d)\n",
+ ret);
+ goto out;
+ }
+
+ nc->irq = of_irq_get(nand_np, 0);
+ if (nc->irq <= 0) {
+ ret = nc->irq ?: -ENXIO;
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get IRQ number (err = %d)\n",
+ ret);
+ goto out;
+ }
+
+ ret = of_address_to_resource(nfc_np, 0, &res);
+ if (ret) {
+ dev_err(dev, "Invalid or missing NFC IO resource (err = %d)\n",
+ ret);
+ goto out;
+ }
+
+ iomem = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(iomem)) {
+ ret = PTR_ERR(iomem);
+ goto out;
+ }
+
+ regmap_conf.name = "nfc-io";
+ regmap_conf.max_register = resource_size(&res) - 4;
+ nc->io = devm_regmap_init_mmio(dev, iomem, &regmap_conf);
+ if (IS_ERR(nc->io)) {
+ ret = PTR_ERR(nc->io);
+ dev_err(dev, "Could not create NFC IO regmap (err = %d)\n",
+ ret);
+ goto out;
+ }
+
+ ret = of_address_to_resource(nfc_np, 1, &res);
+ if (ret) {
+ dev_err(dev, "Invalid or missing HSMC resource (err = %d)\n",
+ ret);
+ goto out;
+ }
+
+ iomem = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(iomem)) {
+ ret = PTR_ERR(iomem);
+ goto out;
+ }
+
+ regmap_conf.name = "smc";
+ regmap_conf.max_register = resource_size(&res) - 4;
+ nc->base.smc = devm_regmap_init_mmio(dev, iomem, &regmap_conf);
+ if (IS_ERR(nc->base.smc)) {
+ ret = PTR_ERR(nc->base.smc);
+ dev_err(dev, "Could not create NFC IO regmap (err = %d)\n",
+ ret);
+ goto out;
+ }
+
+ ret = of_address_to_resource(nfc_np, 2, &res);
+ if (ret) {
+ dev_err(dev, "Invalid or missing SRAM resource (err = %d)\n",
+ ret);
+ goto out;
+ }
+
+ nc->sram.virt = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(nc->sram.virt)) {
+ ret = PTR_ERR(nc->sram.virt);
+ goto out;
+ }
+
+ nc->sram.dma = res.start;
+
+out:
+ of_node_put(nfc_np);
+
+ return ret;
+}
+
+static int
+atmel_hsmc_nand_controller_init(struct atmel_hsmc_nand_controller *nc)
+{
+ struct device *dev = nc->base.dev;
+ struct device_node *np;
+ int ret;
+
+ np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0);
+ if (!np) {
+ dev_err(dev, "Missing or invalid atmel,smc property\n");
+ return -EINVAL;
+ }
+
+ nc->hsmc_layout = atmel_hsmc_get_reg_layout(np);
+
+ nc->irq = of_irq_get(np, 0);
+ of_node_put(np);
+ if (nc->irq <= 0) {
+ ret = nc->irq ?: -ENXIO;
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get IRQ number (err = %d)\n",
+ ret);
+ return ret;
+ }
+
+ np = of_parse_phandle(dev->of_node, "atmel,nfc-io", 0);
+ if (!np) {
+ dev_err(dev, "Missing or invalid atmel,nfc-io property\n");
+ return -EINVAL;
+ }
+
+ nc->io = syscon_node_to_regmap(np);
+ of_node_put(np);
+ if (IS_ERR(nc->io)) {
+ ret = PTR_ERR(nc->io);
+ dev_err(dev, "Could not get NFC IO regmap (err = %d)\n", ret);
+ return ret;
+ }
+
+ nc->sram.pool = of_gen_pool_get(nc->base.dev->of_node,
+ "atmel,nfc-sram", 0);
+ if (!nc->sram.pool) {
+ dev_err(nc->base.dev, "Missing SRAM\n");
+ return -ENOMEM;
+ }
+
+ nc->sram.virt = (void __iomem *)gen_pool_dma_alloc(nc->sram.pool,
+ ATMEL_NFC_SRAM_SIZE,
+ &nc->sram.dma);
+ if (!nc->sram.virt) {
+ dev_err(nc->base.dev,
+ "Could not allocate memory from the NFC SRAM pool\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int
+atmel_hsmc_nand_controller_remove(struct atmel_nand_controller *nc)
+{
+ struct atmel_hsmc_nand_controller *hsmc_nc;
+ int ret;
+
+ ret = atmel_nand_controller_remove_nands(nc);
+ if (ret)
+ return ret;
+
+ hsmc_nc = container_of(nc, struct atmel_hsmc_nand_controller, base);
+ regmap_write(hsmc_nc->base.smc, ATMEL_HSMC_NFC_CTRL,
+ ATMEL_HSMC_NFC_CTRL_DIS);
+
+ if (hsmc_nc->sram.pool)
+ gen_pool_free(hsmc_nc->sram.pool,
+ (unsigned long)hsmc_nc->sram.virt,
+ ATMEL_NFC_SRAM_SIZE);
+
+ if (hsmc_nc->clk) {
+ clk_disable_unprepare(hsmc_nc->clk);
+ clk_put(hsmc_nc->clk);
+ }
+
+ atmel_nand_controller_cleanup(nc);
+
+ return 0;
+}
+
+static int atmel_hsmc_nand_controller_probe(struct platform_device *pdev,
+ const struct atmel_nand_controller_caps *caps)
+{
+ struct device *dev = &pdev->dev;
+ struct atmel_hsmc_nand_controller *nc;
+ int ret;
+
+ nc = devm_kzalloc(dev, sizeof(*nc), GFP_KERNEL);
+ if (!nc)
+ return -ENOMEM;
+
+ ret = atmel_nand_controller_init(&nc->base, pdev, caps);
+ if (ret)
+ return ret;
+
+ if (caps->legacy_of_bindings)
+ ret = atmel_hsmc_nand_controller_legacy_init(nc);
+ else
+ ret = atmel_hsmc_nand_controller_init(nc);
+
+ if (ret)
+ return ret;
+
+ /* Make sure all irqs are masked before registering our IRQ handler. */
+ regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, 0xffffffff);
+ ret = devm_request_irq(dev, nc->irq, atmel_nfc_interrupt,
+ IRQF_SHARED, "nfc", nc);
+ if (ret) {
+ dev_err(dev,
+ "Could not get register NFC interrupt handler (err = %d)\n",
+ ret);
+ goto err;
+ }
+
+ /* Initial NFC configuration. */
+ regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CFG,
+ ATMEL_HSMC_NFC_CFG_DTO_MAX);
+ regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CTRL,
+ ATMEL_HSMC_NFC_CTRL_EN);
+
+ ret = atmel_nand_controller_add_nands(&nc->base);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ atmel_hsmc_nand_controller_remove(&nc->base);
+
+ return ret;
+}
+
+static const struct atmel_nand_controller_ops atmel_hsmc_nc_ops = {
+ .probe = atmel_hsmc_nand_controller_probe,
+ .remove = atmel_hsmc_nand_controller_remove,
+ .ecc_init = atmel_hsmc_nand_ecc_init,
+ .nand_init = atmel_nand_init,
+ .setup_interface = atmel_hsmc_nand_setup_interface,
+ .exec_op = atmel_hsmc_nand_exec_op,
+};
+
+static const struct atmel_nand_controller_caps atmel_sama5_nc_caps = {
+ .has_dma = true,
+ .ale_offs = BIT(21),
+ .cle_offs = BIT(22),
+ .ops = &atmel_hsmc_nc_ops,
+};
+
+/* Only used to parse old bindings. */
+static const struct atmel_nand_controller_caps atmel_sama5_nand_caps = {
+ .has_dma = true,
+ .ale_offs = BIT(21),
+ .cle_offs = BIT(22),
+ .ops = &atmel_hsmc_nc_ops,
+ .legacy_of_bindings = true,
+};
+
+static int atmel_smc_nand_controller_probe(struct platform_device *pdev,
+ const struct atmel_nand_controller_caps *caps)
+{
+ struct device *dev = &pdev->dev;
+ struct atmel_smc_nand_controller *nc;
+ int ret;
+
+ nc = devm_kzalloc(dev, sizeof(*nc), GFP_KERNEL);
+ if (!nc)
+ return -ENOMEM;
+
+ ret = atmel_nand_controller_init(&nc->base, pdev, caps);
+ if (ret)
+ return ret;
+
+ ret = atmel_smc_nand_controller_init(nc);
+ if (ret)
+ return ret;
+
+ return atmel_nand_controller_add_nands(&nc->base);
+}
+
+static int
+atmel_smc_nand_controller_remove(struct atmel_nand_controller *nc)
+{
+ int ret;
+
+ ret = atmel_nand_controller_remove_nands(nc);
+ if (ret)
+ return ret;
+
+ atmel_nand_controller_cleanup(nc);
+
+ return 0;
+}
+
+/*
+ * The SMC reg layout of at91rm9200 is completely different which prevents us
+ * from re-using atmel_smc_nand_setup_interface() for the
+ * ->setup_interface() hook.
+ * At this point, there's no support for the at91rm9200 SMC IP, so we leave
+ * ->setup_interface() unassigned.
+ */
+static const struct atmel_nand_controller_ops at91rm9200_nc_ops = {
+ .probe = atmel_smc_nand_controller_probe,
+ .remove = atmel_smc_nand_controller_remove,
+ .ecc_init = atmel_nand_ecc_init,
+ .nand_init = atmel_smc_nand_init,
+ .exec_op = atmel_smc_nand_exec_op,
+};
+
+static const struct atmel_nand_controller_caps atmel_rm9200_nc_caps = {
+ .ale_offs = BIT(21),
+ .cle_offs = BIT(22),
+ .ebi_csa_regmap_name = "atmel,matrix",
+ .ops = &at91rm9200_nc_ops,
+};
+
+static const struct atmel_nand_controller_ops atmel_smc_nc_ops = {
+ .probe = atmel_smc_nand_controller_probe,
+ .remove = atmel_smc_nand_controller_remove,
+ .ecc_init = atmel_nand_ecc_init,
+ .nand_init = atmel_smc_nand_init,
+ .setup_interface = atmel_smc_nand_setup_interface,
+ .exec_op = atmel_smc_nand_exec_op,
+};
+
+static const struct atmel_nand_controller_caps atmel_sam9260_nc_caps = {
+ .ale_offs = BIT(21),
+ .cle_offs = BIT(22),
+ .ebi_csa_regmap_name = "atmel,matrix",
+ .ops = &atmel_smc_nc_ops,
+};
+
+static const struct atmel_nand_controller_caps atmel_sam9261_nc_caps = {
+ .ale_offs = BIT(22),
+ .cle_offs = BIT(21),
+ .ebi_csa_regmap_name = "atmel,matrix",
+ .ops = &atmel_smc_nc_ops,
+};
+
+static const struct atmel_nand_controller_caps atmel_sam9g45_nc_caps = {
+ .has_dma = true,
+ .ale_offs = BIT(21),
+ .cle_offs = BIT(22),
+ .ebi_csa_regmap_name = "atmel,matrix",
+ .ops = &atmel_smc_nc_ops,
+};
+
+static const struct atmel_nand_controller_caps microchip_sam9x60_nc_caps = {
+ .has_dma = true,
+ .ale_offs = BIT(21),
+ .cle_offs = BIT(22),
+ .ebi_csa_regmap_name = "microchip,sfr",
+ .ops = &atmel_smc_nc_ops,
+};
+
+/* Only used to parse old bindings. */
+static const struct atmel_nand_controller_caps atmel_rm9200_nand_caps = {
+ .ale_offs = BIT(21),
+ .cle_offs = BIT(22),
+ .ops = &atmel_smc_nc_ops,
+ .legacy_of_bindings = true,
+};
+
+static const struct atmel_nand_controller_caps atmel_sam9261_nand_caps = {
+ .ale_offs = BIT(22),
+ .cle_offs = BIT(21),
+ .ops = &atmel_smc_nc_ops,
+ .legacy_of_bindings = true,
+};
+
+static const struct atmel_nand_controller_caps atmel_sam9g45_nand_caps = {
+ .has_dma = true,
+ .ale_offs = BIT(21),
+ .cle_offs = BIT(22),
+ .ops = &atmel_smc_nc_ops,
+ .legacy_of_bindings = true,
+};
+
+static const struct of_device_id atmel_nand_controller_of_ids[] = {
+ {
+ .compatible = "atmel,at91rm9200-nand-controller",
+ .data = &atmel_rm9200_nc_caps,
+ },
+ {
+ .compatible = "atmel,at91sam9260-nand-controller",
+ .data = &atmel_sam9260_nc_caps,
+ },
+ {
+ .compatible = "atmel,at91sam9261-nand-controller",
+ .data = &atmel_sam9261_nc_caps,
+ },
+ {
+ .compatible = "atmel,at91sam9g45-nand-controller",
+ .data = &atmel_sam9g45_nc_caps,
+ },
+ {
+ .compatible = "atmel,sama5d3-nand-controller",
+ .data = &atmel_sama5_nc_caps,
+ },
+ {
+ .compatible = "microchip,sam9x60-nand-controller",
+ .data = &microchip_sam9x60_nc_caps,
+ },
+ /* Support for old/deprecated bindings: */
+ {
+ .compatible = "atmel,at91rm9200-nand",
+ .data = &atmel_rm9200_nand_caps,
+ },
+ {
+ .compatible = "atmel,sama5d4-nand",
+ .data = &atmel_rm9200_nand_caps,
+ },
+ {
+ .compatible = "atmel,sama5d2-nand",
+ .data = &atmel_rm9200_nand_caps,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, atmel_nand_controller_of_ids);
+
+static int atmel_nand_controller_probe(struct platform_device *pdev)
+{
+ const struct atmel_nand_controller_caps *caps;
+
+ if (pdev->id_entry)
+ caps = (void *)pdev->id_entry->driver_data;
+ else
+ caps = of_device_get_match_data(&pdev->dev);
+
+ if (!caps) {
+ dev_err(&pdev->dev, "Could not retrieve NFC caps\n");
+ return -EINVAL;
+ }
+
+ if (caps->legacy_of_bindings) {
+ struct device_node *nfc_node;
+ u32 ale_offs = 21;
+
+ /*
+ * If we are parsing legacy DT props and the DT contains a
+ * valid NFC node, forward the request to the sama5 logic.
+ */
+ nfc_node = of_get_compatible_child(pdev->dev.of_node,
+ "atmel,sama5d3-nfc");
+ if (nfc_node) {
+ caps = &atmel_sama5_nand_caps;
+ of_node_put(nfc_node);
+ }
+
+ /*
+ * Even if the compatible says we are dealing with an
+ * at91rm9200 controller, the atmel,nand-has-dma specify that
+ * this controller supports DMA, which means we are in fact
+ * dealing with an at91sam9g45+ controller.
+ */
+ if (!caps->has_dma &&
+ of_property_read_bool(pdev->dev.of_node,
+ "atmel,nand-has-dma"))
+ caps = &atmel_sam9g45_nand_caps;
+
+ /*
+ * All SoCs except the at91sam9261 are assigning ALE to A21 and
+ * CLE to A22. If atmel,nand-addr-offset != 21 this means we're
+ * actually dealing with an at91sam9261 controller.
+ */
+ of_property_read_u32(pdev->dev.of_node,
+ "atmel,nand-addr-offset", &ale_offs);
+ if (ale_offs != 21)
+ caps = &atmel_sam9261_nand_caps;
+ }
+
+ return caps->ops->probe(pdev, caps);
+}
+
+static void atmel_nand_controller_remove(struct platform_device *pdev)
+{
+ struct atmel_nand_controller *nc = platform_get_drvdata(pdev);
+
+ WARN_ON(nc->caps->ops->remove(nc));
+}
+
+static __maybe_unused int atmel_nand_controller_resume(struct device *dev)
+{
+ struct atmel_nand_controller *nc = dev_get_drvdata(dev);
+ struct atmel_nand *nand;
+
+ if (nc->pmecc)
+ atmel_pmecc_reset(nc->pmecc);
+
+ list_for_each_entry(nand, &nc->chips, node) {
+ int i;
+
+ for (i = 0; i < nand->numcs; i++)
+ nand_reset(&nand->base, i);
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(atmel_nand_controller_pm_ops, NULL,
+ atmel_nand_controller_resume);
+
+static struct platform_driver atmel_nand_controller_driver = {
+ .driver = {
+ .name = "atmel-nand-controller",
+ .of_match_table = atmel_nand_controller_of_ids,
+ .pm = &atmel_nand_controller_pm_ops,
+ },
+ .probe = atmel_nand_controller_probe,
+ .remove_new = atmel_nand_controller_remove,
+};
+module_platform_driver(atmel_nand_controller_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
+MODULE_DESCRIPTION("NAND Flash Controller driver for Atmel SoCs");
+MODULE_ALIAS("platform:atmel-nand-controller");
diff --git a/drivers/mtd/nand/raw/atmel/pmecc.c b/drivers/mtd/nand/raw/atmel/pmecc.c
new file mode 100644
index 0000000000..4d7dc8a9c3
--- /dev/null
+++ b/drivers/mtd/nand/raw/atmel/pmecc.c
@@ -0,0 +1,1015 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2017 ATMEL
+ * Copyright 2017 Free Electrons
+ *
+ * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
+ *
+ * Derived from the atmel_nand.c driver which contained the following
+ * copyrights:
+ *
+ * Copyright 2003 Rick Bronson
+ *
+ * Derived from drivers/mtd/nand/autcpu12.c (removed in v3.8)
+ * Copyright 2001 Thomas Gleixner (gleixner@autronix.de)
+ *
+ * Derived from drivers/mtd/spia.c (removed in v3.8)
+ * Copyright 2000 Steven J. Hill (sjhill@cotw.com)
+ *
+ * Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
+ * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright 2007
+ *
+ * Derived from Das U-Boot source code
+ * (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
+ * Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
+ *
+ * Add Programmable Multibit ECC support for various AT91 SoC
+ * Copyright 2012 ATMEL, Hong Xu
+ *
+ * Add Nand Flash Controller support for SAMA5 SoC
+ * Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
+ *
+ * The PMECC is an hardware assisted BCH engine, which means part of the
+ * ECC algorithm is left to the software. The hardware/software repartition
+ * is explained in the "PMECC Controller Functional Description" chapter in
+ * Atmel datasheets, and some of the functions in this file are directly
+ * implementing the algorithms described in the "Software Implementation"
+ * sub-section.
+ *
+ * TODO: it seems that the software BCH implementation in lib/bch.c is already
+ * providing some of the logic we are implementing here. It would be smart
+ * to expose the needed lib/bch.c helpers/functions and re-use them here.
+ */
+
+#include <linux/genalloc.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "pmecc.h"
+
+/* Galois field dimension */
+#define PMECC_GF_DIMENSION_13 13
+#define PMECC_GF_DIMENSION_14 14
+
+/* Primitive Polynomial used by PMECC */
+#define PMECC_GF_13_PRIMITIVE_POLY 0x201b
+#define PMECC_GF_14_PRIMITIVE_POLY 0x4443
+
+#define PMECC_LOOKUP_TABLE_SIZE_512 0x2000
+#define PMECC_LOOKUP_TABLE_SIZE_1024 0x4000
+
+/* Time out value for reading PMECC status register */
+#define PMECC_MAX_TIMEOUT_MS 100
+
+/* PMECC Register Definitions */
+#define ATMEL_PMECC_CFG 0x0
+#define PMECC_CFG_BCH_STRENGTH(x) (x)
+#define PMECC_CFG_BCH_STRENGTH_MASK GENMASK(2, 0)
+#define PMECC_CFG_SECTOR512 (0 << 4)
+#define PMECC_CFG_SECTOR1024 (1 << 4)
+#define PMECC_CFG_NSECTORS(x) ((fls(x) - 1) << 8)
+#define PMECC_CFG_READ_OP (0 << 12)
+#define PMECC_CFG_WRITE_OP (1 << 12)
+#define PMECC_CFG_SPARE_ENABLE BIT(16)
+#define PMECC_CFG_AUTO_ENABLE BIT(20)
+
+#define ATMEL_PMECC_SAREA 0x4
+#define ATMEL_PMECC_SADDR 0x8
+#define ATMEL_PMECC_EADDR 0xc
+
+#define ATMEL_PMECC_CLK 0x10
+#define PMECC_CLK_133MHZ (2 << 0)
+
+#define ATMEL_PMECC_CTRL 0x14
+#define PMECC_CTRL_RST BIT(0)
+#define PMECC_CTRL_DATA BIT(1)
+#define PMECC_CTRL_USER BIT(2)
+#define PMECC_CTRL_ENABLE BIT(4)
+#define PMECC_CTRL_DISABLE BIT(5)
+
+#define ATMEL_PMECC_SR 0x18
+#define PMECC_SR_BUSY BIT(0)
+#define PMECC_SR_ENABLE BIT(4)
+
+#define ATMEL_PMECC_IER 0x1c
+#define ATMEL_PMECC_IDR 0x20
+#define ATMEL_PMECC_IMR 0x24
+#define ATMEL_PMECC_ISR 0x28
+#define PMECC_ERROR_INT BIT(0)
+
+#define ATMEL_PMECC_ECC(sector, n) \
+ ((((sector) + 1) * 0x40) + (n))
+
+#define ATMEL_PMECC_REM(sector, n) \
+ ((((sector) + 1) * 0x40) + ((n) * 4) + 0x200)
+
+/* PMERRLOC Register Definitions */
+#define ATMEL_PMERRLOC_ELCFG 0x0
+#define PMERRLOC_ELCFG_SECTOR_512 (0 << 0)
+#define PMERRLOC_ELCFG_SECTOR_1024 (1 << 0)
+#define PMERRLOC_ELCFG_NUM_ERRORS(n) ((n) << 16)
+
+#define ATMEL_PMERRLOC_ELPRIM 0x4
+#define ATMEL_PMERRLOC_ELEN 0x8
+#define ATMEL_PMERRLOC_ELDIS 0xc
+#define PMERRLOC_DISABLE BIT(0)
+
+#define ATMEL_PMERRLOC_ELSR 0x10
+#define PMERRLOC_ELSR_BUSY BIT(0)
+
+#define ATMEL_PMERRLOC_ELIER 0x14
+#define ATMEL_PMERRLOC_ELIDR 0x18
+#define ATMEL_PMERRLOC_ELIMR 0x1c
+#define ATMEL_PMERRLOC_ELISR 0x20
+#define PMERRLOC_ERR_NUM_MASK GENMASK(12, 8)
+#define PMERRLOC_CALC_DONE BIT(0)
+
+#define ATMEL_PMERRLOC_SIGMA(x) (((x) * 0x4) + 0x28)
+
+#define ATMEL_PMERRLOC_EL(offs, x) (((x) * 0x4) + (offs))
+
+struct atmel_pmecc_gf_tables {
+ u16 *alpha_to;
+ u16 *index_of;
+};
+
+struct atmel_pmecc_caps {
+ const int *strengths;
+ int nstrengths;
+ int el_offset;
+ bool correct_erased_chunks;
+};
+
+struct atmel_pmecc {
+ struct device *dev;
+ const struct atmel_pmecc_caps *caps;
+
+ struct {
+ void __iomem *base;
+ void __iomem *errloc;
+ } regs;
+
+ struct mutex lock;
+};
+
+struct atmel_pmecc_user_conf_cache {
+ u32 cfg;
+ u32 sarea;
+ u32 saddr;
+ u32 eaddr;
+};
+
+struct atmel_pmecc_user {
+ struct atmel_pmecc_user_conf_cache cache;
+ struct atmel_pmecc *pmecc;
+ const struct atmel_pmecc_gf_tables *gf_tables;
+ int eccbytes;
+ s16 *partial_syn;
+ s16 *si;
+ s16 *lmu;
+ s16 *smu;
+ s32 *mu;
+ s32 *dmu;
+ s32 *delta;
+ u32 isr;
+};
+
+static DEFINE_MUTEX(pmecc_gf_tables_lock);
+static const struct atmel_pmecc_gf_tables *pmecc_gf_tables_512;
+static const struct atmel_pmecc_gf_tables *pmecc_gf_tables_1024;
+
+static inline int deg(unsigned int poly)
+{
+ /* polynomial degree is the most-significant bit index */
+ return fls(poly) - 1;
+}
+
+static int atmel_pmecc_build_gf_tables(int mm, unsigned int poly,
+ struct atmel_pmecc_gf_tables *gf_tables)
+{
+ unsigned int i, x = 1;
+ const unsigned int k = BIT(deg(poly));
+ unsigned int nn = BIT(mm) - 1;
+
+ /* primitive polynomial must be of degree m */
+ if (k != (1u << mm))
+ return -EINVAL;
+
+ for (i = 0; i < nn; i++) {
+ gf_tables->alpha_to[i] = x;
+ gf_tables->index_of[x] = i;
+ if (i && (x == 1))
+ /* polynomial is not primitive (a^i=1 with 0<i<2^m-1) */
+ return -EINVAL;
+ x <<= 1;
+ if (x & k)
+ x ^= poly;
+ }
+ gf_tables->alpha_to[nn] = 1;
+ gf_tables->index_of[0] = 0;
+
+ return 0;
+}
+
+static const struct atmel_pmecc_gf_tables *
+atmel_pmecc_create_gf_tables(const struct atmel_pmecc_user_req *req)
+{
+ struct atmel_pmecc_gf_tables *gf_tables;
+ unsigned int poly, degree, table_size;
+ int ret;
+
+ if (req->ecc.sectorsize == 512) {
+ degree = PMECC_GF_DIMENSION_13;
+ poly = PMECC_GF_13_PRIMITIVE_POLY;
+ table_size = PMECC_LOOKUP_TABLE_SIZE_512;
+ } else {
+ degree = PMECC_GF_DIMENSION_14;
+ poly = PMECC_GF_14_PRIMITIVE_POLY;
+ table_size = PMECC_LOOKUP_TABLE_SIZE_1024;
+ }
+
+ gf_tables = kzalloc(sizeof(*gf_tables) +
+ (2 * table_size * sizeof(u16)),
+ GFP_KERNEL);
+ if (!gf_tables)
+ return ERR_PTR(-ENOMEM);
+
+ gf_tables->alpha_to = (void *)(gf_tables + 1);
+ gf_tables->index_of = gf_tables->alpha_to + table_size;
+
+ ret = atmel_pmecc_build_gf_tables(degree, poly, gf_tables);
+ if (ret) {
+ kfree(gf_tables);
+ return ERR_PTR(ret);
+ }
+
+ return gf_tables;
+}
+
+static const struct atmel_pmecc_gf_tables *
+atmel_pmecc_get_gf_tables(const struct atmel_pmecc_user_req *req)
+{
+ const struct atmel_pmecc_gf_tables **gf_tables, *ret;
+
+ mutex_lock(&pmecc_gf_tables_lock);
+ if (req->ecc.sectorsize == 512)
+ gf_tables = &pmecc_gf_tables_512;
+ else
+ gf_tables = &pmecc_gf_tables_1024;
+
+ ret = *gf_tables;
+
+ if (!ret) {
+ ret = atmel_pmecc_create_gf_tables(req);
+ if (!IS_ERR(ret))
+ *gf_tables = ret;
+ }
+ mutex_unlock(&pmecc_gf_tables_lock);
+
+ return ret;
+}
+
+static int atmel_pmecc_prepare_user_req(struct atmel_pmecc *pmecc,
+ struct atmel_pmecc_user_req *req)
+{
+ int i, max_eccbytes, eccbytes = 0, eccstrength = 0;
+
+ if (req->pagesize <= 0 || req->oobsize <= 0 || req->ecc.bytes <= 0)
+ return -EINVAL;
+
+ if (req->ecc.ooboffset >= 0 &&
+ req->ecc.ooboffset + req->ecc.bytes > req->oobsize)
+ return -EINVAL;
+
+ if (req->ecc.sectorsize == ATMEL_PMECC_SECTOR_SIZE_AUTO) {
+ if (req->ecc.strength != ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH)
+ return -EINVAL;
+
+ if (req->pagesize > 512)
+ req->ecc.sectorsize = 1024;
+ else
+ req->ecc.sectorsize = 512;
+ }
+
+ if (req->ecc.sectorsize != 512 && req->ecc.sectorsize != 1024)
+ return -EINVAL;
+
+ if (req->pagesize % req->ecc.sectorsize)
+ return -EINVAL;
+
+ req->ecc.nsectors = req->pagesize / req->ecc.sectorsize;
+
+ max_eccbytes = req->ecc.bytes;
+
+ for (i = 0; i < pmecc->caps->nstrengths; i++) {
+ int nbytes, strength = pmecc->caps->strengths[i];
+
+ if (req->ecc.strength != ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH &&
+ strength < req->ecc.strength)
+ continue;
+
+ nbytes = DIV_ROUND_UP(strength * fls(8 * req->ecc.sectorsize),
+ 8);
+ nbytes *= req->ecc.nsectors;
+
+ if (nbytes > max_eccbytes)
+ break;
+
+ eccstrength = strength;
+ eccbytes = nbytes;
+
+ if (req->ecc.strength != ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH)
+ break;
+ }
+
+ if (!eccstrength)
+ return -EINVAL;
+
+ req->ecc.bytes = eccbytes;
+ req->ecc.strength = eccstrength;
+
+ if (req->ecc.ooboffset < 0)
+ req->ecc.ooboffset = req->oobsize - eccbytes;
+
+ return 0;
+}
+
+struct atmel_pmecc_user *
+atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
+ struct atmel_pmecc_user_req *req)
+{
+ struct atmel_pmecc_user *user;
+ const struct atmel_pmecc_gf_tables *gf_tables;
+ int strength, size, ret;
+
+ ret = atmel_pmecc_prepare_user_req(pmecc, req);
+ if (ret)
+ return ERR_PTR(ret);
+
+ size = sizeof(*user);
+ size = ALIGN(size, sizeof(u16));
+ /* Reserve space for partial_syn, si and smu */
+ size += ((2 * req->ecc.strength) + 1) * sizeof(u16) *
+ (2 + req->ecc.strength + 2);
+ /* Reserve space for lmu. */
+ size += (req->ecc.strength + 1) * sizeof(u16);
+ /* Reserve space for mu, dmu and delta. */
+ size = ALIGN(size, sizeof(s32));
+ size += (req->ecc.strength + 1) * sizeof(s32) * 3;
+
+ user = kzalloc(size, GFP_KERNEL);
+ if (!user)
+ return ERR_PTR(-ENOMEM);
+
+ user->pmecc = pmecc;
+
+ user->partial_syn = (s16 *)PTR_ALIGN(user + 1, sizeof(u16));
+ user->si = user->partial_syn + ((2 * req->ecc.strength) + 1);
+ user->lmu = user->si + ((2 * req->ecc.strength) + 1);
+ user->smu = user->lmu + (req->ecc.strength + 1);
+ user->mu = (s32 *)PTR_ALIGN(user->smu +
+ (((2 * req->ecc.strength) + 1) *
+ (req->ecc.strength + 2)),
+ sizeof(s32));
+ user->dmu = user->mu + req->ecc.strength + 1;
+ user->delta = user->dmu + req->ecc.strength + 1;
+
+ gf_tables = atmel_pmecc_get_gf_tables(req);
+ if (IS_ERR(gf_tables)) {
+ kfree(user);
+ return ERR_CAST(gf_tables);
+ }
+
+ user->gf_tables = gf_tables;
+
+ user->eccbytes = req->ecc.bytes / req->ecc.nsectors;
+
+ for (strength = 0; strength < pmecc->caps->nstrengths; strength++) {
+ if (pmecc->caps->strengths[strength] == req->ecc.strength)
+ break;
+ }
+
+ user->cache.cfg = PMECC_CFG_BCH_STRENGTH(strength) |
+ PMECC_CFG_NSECTORS(req->ecc.nsectors);
+
+ if (req->ecc.sectorsize == 1024)
+ user->cache.cfg |= PMECC_CFG_SECTOR1024;
+
+ user->cache.sarea = req->oobsize - 1;
+ user->cache.saddr = req->ecc.ooboffset;
+ user->cache.eaddr = req->ecc.ooboffset + req->ecc.bytes - 1;
+
+ return user;
+}
+EXPORT_SYMBOL_GPL(atmel_pmecc_create_user);
+
+void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user)
+{
+ kfree(user);
+}
+EXPORT_SYMBOL_GPL(atmel_pmecc_destroy_user);
+
+static int get_strength(struct atmel_pmecc_user *user)
+{
+ const int *strengths = user->pmecc->caps->strengths;
+
+ return strengths[user->cache.cfg & PMECC_CFG_BCH_STRENGTH_MASK];
+}
+
+static int get_sectorsize(struct atmel_pmecc_user *user)
+{
+ return user->cache.cfg & PMECC_CFG_SECTOR1024 ? 1024 : 512;
+}
+
+static void atmel_pmecc_gen_syndrome(struct atmel_pmecc_user *user, int sector)
+{
+ int strength = get_strength(user);
+ u32 value;
+ int i;
+
+ /* Fill odd syndromes */
+ for (i = 0; i < strength; i++) {
+ value = readl_relaxed(user->pmecc->regs.base +
+ ATMEL_PMECC_REM(sector, i / 2));
+ if (i & 1)
+ value >>= 16;
+
+ user->partial_syn[(2 * i) + 1] = value;
+ }
+}
+
+static void atmel_pmecc_substitute(struct atmel_pmecc_user *user)
+{
+ int degree = get_sectorsize(user) == 512 ? 13 : 14;
+ int cw_len = BIT(degree) - 1;
+ int strength = get_strength(user);
+ s16 *alpha_to = user->gf_tables->alpha_to;
+ s16 *index_of = user->gf_tables->index_of;
+ s16 *partial_syn = user->partial_syn;
+ s16 *si;
+ int i, j;
+
+ /*
+ * si[] is a table that holds the current syndrome value,
+ * an element of that table belongs to the field
+ */
+ si = user->si;
+
+ memset(&si[1], 0, sizeof(s16) * ((2 * strength) - 1));
+
+ /* Computation 2t syndromes based on S(x) */
+ /* Odd syndromes */
+ for (i = 1; i < 2 * strength; i += 2) {
+ for (j = 0; j < degree; j++) {
+ if (partial_syn[i] & BIT(j))
+ si[i] = alpha_to[i * j] ^ si[i];
+ }
+ }
+ /* Even syndrome = (Odd syndrome) ** 2 */
+ for (i = 2, j = 1; j <= strength; i = ++j << 1) {
+ if (si[j] == 0) {
+ si[i] = 0;
+ } else {
+ s16 tmp;
+
+ tmp = index_of[si[j]];
+ tmp = (tmp * 2) % cw_len;
+ si[i] = alpha_to[tmp];
+ }
+ }
+}
+
+static void atmel_pmecc_get_sigma(struct atmel_pmecc_user *user)
+{
+ s16 *lmu = user->lmu;
+ s16 *si = user->si;
+ s32 *mu = user->mu;
+ s32 *dmu = user->dmu;
+ s32 *delta = user->delta;
+ int degree = get_sectorsize(user) == 512 ? 13 : 14;
+ int cw_len = BIT(degree) - 1;
+ int strength = get_strength(user);
+ int num = 2 * strength + 1;
+ s16 *index_of = user->gf_tables->index_of;
+ s16 *alpha_to = user->gf_tables->alpha_to;
+ int i, j, k;
+ u32 dmu_0_count, tmp;
+ s16 *smu = user->smu;
+
+ /* index of largest delta */
+ int ro;
+ int largest;
+ int diff;
+
+ dmu_0_count = 0;
+
+ /* First Row */
+
+ /* Mu */
+ mu[0] = -1;
+
+ memset(smu, 0, sizeof(s16) * num);
+ smu[0] = 1;
+
+ /* discrepancy set to 1 */
+ dmu[0] = 1;
+ /* polynom order set to 0 */
+ lmu[0] = 0;
+ delta[0] = (mu[0] * 2 - lmu[0]) >> 1;
+
+ /* Second Row */
+
+ /* Mu */
+ mu[1] = 0;
+ /* Sigma(x) set to 1 */
+ memset(&smu[num], 0, sizeof(s16) * num);
+ smu[num] = 1;
+
+ /* discrepancy set to S1 */
+ dmu[1] = si[1];
+
+ /* polynom order set to 0 */
+ lmu[1] = 0;
+
+ delta[1] = (mu[1] * 2 - lmu[1]) >> 1;
+
+ /* Init the Sigma(x) last row */
+ memset(&smu[(strength + 1) * num], 0, sizeof(s16) * num);
+
+ for (i = 1; i <= strength; i++) {
+ mu[i + 1] = i << 1;
+ /* Begin Computing Sigma (Mu+1) and L(mu) */
+ /* check if discrepancy is set to 0 */
+ if (dmu[i] == 0) {
+ dmu_0_count++;
+
+ tmp = ((strength - (lmu[i] >> 1) - 1) / 2);
+ if ((strength - (lmu[i] >> 1) - 1) & 0x1)
+ tmp += 2;
+ else
+ tmp += 1;
+
+ if (dmu_0_count == tmp) {
+ for (j = 0; j <= (lmu[i] >> 1) + 1; j++)
+ smu[(strength + 1) * num + j] =
+ smu[i * num + j];
+
+ lmu[strength + 1] = lmu[i];
+ return;
+ }
+
+ /* copy polynom */
+ for (j = 0; j <= lmu[i] >> 1; j++)
+ smu[(i + 1) * num + j] = smu[i * num + j];
+
+ /* copy previous polynom order to the next */
+ lmu[i + 1] = lmu[i];
+ } else {
+ ro = 0;
+ largest = -1;
+ /* find largest delta with dmu != 0 */
+ for (j = 0; j < i; j++) {
+ if ((dmu[j]) && (delta[j] > largest)) {
+ largest = delta[j];
+ ro = j;
+ }
+ }
+
+ /* compute difference */
+ diff = (mu[i] - mu[ro]);
+
+ /* Compute degree of the new smu polynomial */
+ if ((lmu[i] >> 1) > ((lmu[ro] >> 1) + diff))
+ lmu[i + 1] = lmu[i];
+ else
+ lmu[i + 1] = ((lmu[ro] >> 1) + diff) * 2;
+
+ /* Init smu[i+1] with 0 */
+ for (k = 0; k < num; k++)
+ smu[(i + 1) * num + k] = 0;
+
+ /* Compute smu[i+1] */
+ for (k = 0; k <= lmu[ro] >> 1; k++) {
+ s16 a, b, c;
+
+ if (!(smu[ro * num + k] && dmu[i]))
+ continue;
+
+ a = index_of[dmu[i]];
+ b = index_of[dmu[ro]];
+ c = index_of[smu[ro * num + k]];
+ tmp = a + (cw_len - b) + c;
+ a = alpha_to[tmp % cw_len];
+ smu[(i + 1) * num + (k + diff)] = a;
+ }
+
+ for (k = 0; k <= lmu[i] >> 1; k++)
+ smu[(i + 1) * num + k] ^= smu[i * num + k];
+ }
+
+ /* End Computing Sigma (Mu+1) and L(mu) */
+ /* In either case compute delta */
+ delta[i + 1] = (mu[i + 1] * 2 - lmu[i + 1]) >> 1;
+
+ /* Do not compute discrepancy for the last iteration */
+ if (i >= strength)
+ continue;
+
+ for (k = 0; k <= (lmu[i + 1] >> 1); k++) {
+ tmp = 2 * (i - 1);
+ if (k == 0) {
+ dmu[i + 1] = si[tmp + 3];
+ } else if (smu[(i + 1) * num + k] && si[tmp + 3 - k]) {
+ s16 a, b, c;
+
+ a = index_of[smu[(i + 1) * num + k]];
+ b = si[2 * (i - 1) + 3 - k];
+ c = index_of[b];
+ tmp = a + c;
+ tmp %= cw_len;
+ dmu[i + 1] = alpha_to[tmp] ^ dmu[i + 1];
+ }
+ }
+ }
+}
+
+static int atmel_pmecc_err_location(struct atmel_pmecc_user *user)
+{
+ int sector_size = get_sectorsize(user);
+ int degree = sector_size == 512 ? 13 : 14;
+ struct atmel_pmecc *pmecc = user->pmecc;
+ int strength = get_strength(user);
+ int ret, roots_nbr, i, err_nbr = 0;
+ int num = (2 * strength) + 1;
+ s16 *smu = user->smu;
+ u32 val;
+
+ writel(PMERRLOC_DISABLE, pmecc->regs.errloc + ATMEL_PMERRLOC_ELDIS);
+
+ for (i = 0; i <= user->lmu[strength + 1] >> 1; i++) {
+ writel_relaxed(smu[(strength + 1) * num + i],
+ pmecc->regs.errloc + ATMEL_PMERRLOC_SIGMA(i));
+ err_nbr++;
+ }
+
+ val = (err_nbr - 1) << 16;
+ if (sector_size == 1024)
+ val |= 1;
+
+ writel(val, pmecc->regs.errloc + ATMEL_PMERRLOC_ELCFG);
+ writel((sector_size * 8) + (degree * strength),
+ pmecc->regs.errloc + ATMEL_PMERRLOC_ELEN);
+
+ ret = readl_relaxed_poll_timeout(pmecc->regs.errloc +
+ ATMEL_PMERRLOC_ELISR,
+ val, val & PMERRLOC_CALC_DONE, 0,
+ PMECC_MAX_TIMEOUT_MS * 1000);
+ if (ret) {
+ dev_err(pmecc->dev,
+ "PMECC: Timeout to calculate error location.\n");
+ return ret;
+ }
+
+ roots_nbr = (val & PMERRLOC_ERR_NUM_MASK) >> 8;
+ /* Number of roots == degree of smu hence <= cap */
+ if (roots_nbr == user->lmu[strength + 1] >> 1)
+ return err_nbr - 1;
+
+ /*
+ * Number of roots does not match the degree of smu
+ * unable to correct error.
+ */
+ return -EBADMSG;
+}
+
+int atmel_pmecc_correct_sector(struct atmel_pmecc_user *user, int sector,
+ void *data, void *ecc)
+{
+ struct atmel_pmecc *pmecc = user->pmecc;
+ int sectorsize = get_sectorsize(user);
+ int eccbytes = user->eccbytes;
+ int i, nerrors;
+
+ if (!(user->isr & BIT(sector)))
+ return 0;
+
+ atmel_pmecc_gen_syndrome(user, sector);
+ atmel_pmecc_substitute(user);
+ atmel_pmecc_get_sigma(user);
+
+ nerrors = atmel_pmecc_err_location(user);
+ if (nerrors < 0)
+ return nerrors;
+
+ for (i = 0; i < nerrors; i++) {
+ const char *area;
+ int byte, bit;
+ u32 errpos;
+ u8 *ptr;
+
+ errpos = readl_relaxed(pmecc->regs.errloc +
+ ATMEL_PMERRLOC_EL(pmecc->caps->el_offset, i));
+ errpos--;
+
+ byte = errpos / 8;
+ bit = errpos % 8;
+
+ if (byte < sectorsize) {
+ ptr = data + byte;
+ area = "data";
+ } else if (byte < sectorsize + eccbytes) {
+ ptr = ecc + byte - sectorsize;
+ area = "ECC";
+ } else {
+ dev_dbg(pmecc->dev,
+ "Invalid errpos value (%d, max is %d)\n",
+ errpos, (sectorsize + eccbytes) * 8);
+ return -EINVAL;
+ }
+
+ dev_dbg(pmecc->dev,
+ "Bit flip in %s area, byte %d: 0x%02x -> 0x%02x\n",
+ area, byte, *ptr, (unsigned int)(*ptr ^ BIT(bit)));
+
+ *ptr ^= BIT(bit);
+ }
+
+ return nerrors;
+}
+EXPORT_SYMBOL_GPL(atmel_pmecc_correct_sector);
+
+bool atmel_pmecc_correct_erased_chunks(struct atmel_pmecc_user *user)
+{
+ return user->pmecc->caps->correct_erased_chunks;
+}
+EXPORT_SYMBOL_GPL(atmel_pmecc_correct_erased_chunks);
+
+void atmel_pmecc_get_generated_eccbytes(struct atmel_pmecc_user *user,
+ int sector, void *ecc)
+{
+ struct atmel_pmecc *pmecc = user->pmecc;
+ u8 *ptr = ecc;
+ int i;
+
+ for (i = 0; i < user->eccbytes; i++)
+ ptr[i] = readb_relaxed(pmecc->regs.base +
+ ATMEL_PMECC_ECC(sector, i));
+}
+EXPORT_SYMBOL_GPL(atmel_pmecc_get_generated_eccbytes);
+
+void atmel_pmecc_reset(struct atmel_pmecc *pmecc)
+{
+ writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
+ writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
+}
+EXPORT_SYMBOL_GPL(atmel_pmecc_reset);
+
+int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op)
+{
+ struct atmel_pmecc *pmecc = user->pmecc;
+ u32 cfg;
+
+ if (op != NAND_ECC_READ && op != NAND_ECC_WRITE) {
+ dev_err(pmecc->dev, "Bad ECC operation!");
+ return -EINVAL;
+ }
+
+ mutex_lock(&user->pmecc->lock);
+
+ cfg = user->cache.cfg;
+ if (op == NAND_ECC_WRITE)
+ cfg |= PMECC_CFG_WRITE_OP;
+ else
+ cfg |= PMECC_CFG_AUTO_ENABLE;
+
+ writel(cfg, pmecc->regs.base + ATMEL_PMECC_CFG);
+ writel(user->cache.sarea, pmecc->regs.base + ATMEL_PMECC_SAREA);
+ writel(user->cache.saddr, pmecc->regs.base + ATMEL_PMECC_SADDR);
+ writel(user->cache.eaddr, pmecc->regs.base + ATMEL_PMECC_EADDR);
+
+ writel(PMECC_CTRL_ENABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
+ writel(PMECC_CTRL_DATA, pmecc->regs.base + ATMEL_PMECC_CTRL);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(atmel_pmecc_enable);
+
+void atmel_pmecc_disable(struct atmel_pmecc_user *user)
+{
+ atmel_pmecc_reset(user->pmecc);
+ mutex_unlock(&user->pmecc->lock);
+}
+EXPORT_SYMBOL_GPL(atmel_pmecc_disable);
+
+int atmel_pmecc_wait_rdy(struct atmel_pmecc_user *user)
+{
+ struct atmel_pmecc *pmecc = user->pmecc;
+ u32 status;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout(pmecc->regs.base +
+ ATMEL_PMECC_SR,
+ status, !(status & PMECC_SR_BUSY), 0,
+ PMECC_MAX_TIMEOUT_MS * 1000);
+ if (ret) {
+ dev_err(pmecc->dev,
+ "Timeout while waiting for PMECC ready.\n");
+ return ret;
+ }
+
+ user->isr = readl_relaxed(pmecc->regs.base + ATMEL_PMECC_ISR);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(atmel_pmecc_wait_rdy);
+
+static struct atmel_pmecc *atmel_pmecc_create(struct platform_device *pdev,
+ const struct atmel_pmecc_caps *caps,
+ int pmecc_res_idx, int errloc_res_idx)
+{
+ struct device *dev = &pdev->dev;
+ struct atmel_pmecc *pmecc;
+
+ pmecc = devm_kzalloc(dev, sizeof(*pmecc), GFP_KERNEL);
+ if (!pmecc)
+ return ERR_PTR(-ENOMEM);
+
+ pmecc->caps = caps;
+ pmecc->dev = dev;
+ mutex_init(&pmecc->lock);
+
+ pmecc->regs.base = devm_platform_ioremap_resource(pdev, pmecc_res_idx);
+ if (IS_ERR(pmecc->regs.base))
+ return ERR_CAST(pmecc->regs.base);
+
+ pmecc->regs.errloc = devm_platform_ioremap_resource(pdev, errloc_res_idx);
+ if (IS_ERR(pmecc->regs.errloc))
+ return ERR_CAST(pmecc->regs.errloc);
+
+ /* Disable all interrupts before registering the PMECC handler. */
+ writel(0xffffffff, pmecc->regs.base + ATMEL_PMECC_IDR);
+ atmel_pmecc_reset(pmecc);
+
+ return pmecc;
+}
+
+static void devm_atmel_pmecc_put(struct device *dev, void *res)
+{
+ struct atmel_pmecc **pmecc = res;
+
+ put_device((*pmecc)->dev);
+}
+
+static struct atmel_pmecc *atmel_pmecc_get_by_node(struct device *userdev,
+ struct device_node *np)
+{
+ struct platform_device *pdev;
+ struct atmel_pmecc *pmecc, **ptr;
+ int ret;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ return ERR_PTR(-EPROBE_DEFER);
+ pmecc = platform_get_drvdata(pdev);
+ if (!pmecc) {
+ ret = -EPROBE_DEFER;
+ goto err_put_device;
+ }
+
+ ptr = devres_alloc(devm_atmel_pmecc_put, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr) {
+ ret = -ENOMEM;
+ goto err_put_device;
+ }
+
+ *ptr = pmecc;
+
+ devres_add(userdev, ptr);
+
+ return pmecc;
+
+err_put_device:
+ put_device(&pdev->dev);
+ return ERR_PTR(ret);
+}
+
+static const int atmel_pmecc_strengths[] = { 2, 4, 8, 12, 24, 32 };
+
+static struct atmel_pmecc_caps at91sam9g45_caps = {
+ .strengths = atmel_pmecc_strengths,
+ .nstrengths = 5,
+ .el_offset = 0x8c,
+};
+
+static struct atmel_pmecc_caps sama5d4_caps = {
+ .strengths = atmel_pmecc_strengths,
+ .nstrengths = 5,
+ .el_offset = 0x8c,
+ .correct_erased_chunks = true,
+};
+
+static struct atmel_pmecc_caps sama5d2_caps = {
+ .strengths = atmel_pmecc_strengths,
+ .nstrengths = 6,
+ .el_offset = 0xac,
+ .correct_erased_chunks = true,
+};
+
+static const struct of_device_id __maybe_unused atmel_pmecc_legacy_match[] = {
+ { .compatible = "atmel,sama5d4-nand", &sama5d4_caps },
+ { .compatible = "atmel,sama5d2-nand", &sama5d2_caps },
+ { /* sentinel */ }
+};
+
+struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev)
+{
+ struct atmel_pmecc *pmecc;
+ struct device_node *np;
+
+ if (!userdev)
+ return ERR_PTR(-EINVAL);
+
+ if (!userdev->of_node)
+ return NULL;
+
+ np = of_parse_phandle(userdev->of_node, "ecc-engine", 0);
+ if (np) {
+ pmecc = atmel_pmecc_get_by_node(userdev, np);
+ of_node_put(np);
+ } else {
+ /*
+ * Support old DT bindings: in this case the PMECC iomem
+ * resources are directly defined in the user pdev at position
+ * 1 and 2. Extract all relevant information from there.
+ */
+ struct platform_device *pdev = to_platform_device(userdev);
+ const struct atmel_pmecc_caps *caps;
+ const struct of_device_id *match;
+
+ /* No PMECC engine available. */
+ if (!of_property_read_bool(userdev->of_node,
+ "atmel,has-pmecc"))
+ return NULL;
+
+ caps = &at91sam9g45_caps;
+
+ /* Find the caps associated to the NAND dev node. */
+ match = of_match_node(atmel_pmecc_legacy_match,
+ userdev->of_node);
+ if (match && match->data)
+ caps = match->data;
+
+ pmecc = atmel_pmecc_create(pdev, caps, 1, 2);
+ }
+
+ return pmecc;
+}
+EXPORT_SYMBOL(devm_atmel_pmecc_get);
+
+static const struct of_device_id atmel_pmecc_match[] = {
+ { .compatible = "atmel,at91sam9g45-pmecc", &at91sam9g45_caps },
+ { .compatible = "atmel,sama5d4-pmecc", &sama5d4_caps },
+ { .compatible = "atmel,sama5d2-pmecc", &sama5d2_caps },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, atmel_pmecc_match);
+
+static int atmel_pmecc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct atmel_pmecc_caps *caps;
+ struct atmel_pmecc *pmecc;
+
+ caps = of_device_get_match_data(&pdev->dev);
+ if (!caps) {
+ dev_err(dev, "Invalid caps\n");
+ return -EINVAL;
+ }
+
+ pmecc = atmel_pmecc_create(pdev, caps, 0, 1);
+ if (IS_ERR(pmecc))
+ return PTR_ERR(pmecc);
+
+ platform_set_drvdata(pdev, pmecc);
+
+ return 0;
+}
+
+static struct platform_driver atmel_pmecc_driver = {
+ .driver = {
+ .name = "atmel-pmecc",
+ .of_match_table = atmel_pmecc_match,
+ },
+ .probe = atmel_pmecc_probe,
+};
+module_platform_driver(atmel_pmecc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
+MODULE_DESCRIPTION("PMECC engine driver");
+MODULE_ALIAS("platform:atmel_pmecc");
diff --git a/drivers/mtd/nand/raw/atmel/pmecc.h b/drivers/mtd/nand/raw/atmel/pmecc.h
new file mode 100644
index 0000000000..7851c05126
--- /dev/null
+++ b/drivers/mtd/nand/raw/atmel/pmecc.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * © Copyright 2016 ATMEL
+ * © Copyright 2016 Free Electrons
+ *
+ * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
+ *
+ * Derived from the atmel_nand.c driver which contained the following
+ * copyrights:
+ *
+ * Copyright © 2003 Rick Bronson
+ *
+ * Derived from drivers/mtd/nand/autcpu12.c (removed in v3.8)
+ * Copyright © 2001 Thomas Gleixner (gleixner@autronix.de)
+ *
+ * Derived from drivers/mtd/spia.c (removed in v3.8)
+ * Copyright © 2000 Steven J. Hill (sjhill@cotw.com)
+ *
+ *
+ * Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
+ * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright © 2007
+ *
+ * Derived from Das U-Boot source code
+ * (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
+ * © Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
+ *
+ * Add Programmable Multibit ECC support for various AT91 SoC
+ * © Copyright 2012 ATMEL, Hong Xu
+ *
+ * Add Nand Flash Controller support for SAMA5 SoC
+ * © Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
+ */
+
+#ifndef ATMEL_PMECC_H
+#define ATMEL_PMECC_H
+
+#define ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH 0
+#define ATMEL_PMECC_SECTOR_SIZE_AUTO 0
+#define ATMEL_PMECC_OOBOFFSET_AUTO -1
+
+struct atmel_pmecc_user_req {
+ int pagesize;
+ int oobsize;
+ struct {
+ int strength;
+ int bytes;
+ int sectorsize;
+ int nsectors;
+ int ooboffset;
+ } ecc;
+};
+
+struct atmel_pmecc *devm_atmel_pmecc_get(struct device *dev);
+
+struct atmel_pmecc_user *
+atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
+ struct atmel_pmecc_user_req *req);
+void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user);
+
+void atmel_pmecc_reset(struct atmel_pmecc *pmecc);
+int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op);
+void atmel_pmecc_disable(struct atmel_pmecc_user *user);
+int atmel_pmecc_wait_rdy(struct atmel_pmecc_user *user);
+int atmel_pmecc_correct_sector(struct atmel_pmecc_user *user, int sector,
+ void *data, void *ecc);
+bool atmel_pmecc_correct_erased_chunks(struct atmel_pmecc_user *user);
+void atmel_pmecc_get_generated_eccbytes(struct atmel_pmecc_user *user,
+ int sector, void *ecc);
+
+#endif /* ATMEL_PMECC_H */
diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c
new file mode 100644
index 0000000000..063a5e0b8d
--- /dev/null
+++ b/drivers/mtd/nand/raw/au1550nd.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004 Embedded Edge, LLC
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
+#include <asm/io.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/au1550nd.h>
+
+
+struct au1550nd_ctx {
+ struct nand_controller controller;
+ struct nand_chip chip;
+
+ int cs;
+ void __iomem *base;
+};
+
+static struct au1550nd_ctx *chip_to_au_ctx(struct nand_chip *this)
+{
+ return container_of(this, struct au1550nd_ctx, chip);
+}
+
+/**
+ * au_write_buf - write buffer to chip
+ * @this: NAND chip object
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * write function for 8bit buswidth
+ */
+static void au_write_buf(struct nand_chip *this, const void *buf,
+ unsigned int len)
+{
+ struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+ const u8 *p = buf;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ writeb(p[i], ctx->base + MEM_STNAND_DATA);
+ wmb(); /* drain writebuffer */
+ }
+}
+
+/**
+ * au_read_buf - read chip data into buffer
+ * @this: NAND chip object
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * read function for 8bit buswidth
+ */
+static void au_read_buf(struct nand_chip *this, void *buf,
+ unsigned int len)
+{
+ struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+ u8 *p = buf;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ p[i] = readb(ctx->base + MEM_STNAND_DATA);
+ wmb(); /* drain writebuffer */
+ }
+}
+
+/**
+ * au_write_buf16 - write buffer to chip
+ * @this: NAND chip object
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * write function for 16bit buswidth
+ */
+static void au_write_buf16(struct nand_chip *this, const void *buf,
+ unsigned int len)
+{
+ struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+ const u16 *p = buf;
+ unsigned int i;
+
+ len >>= 1;
+ for (i = 0; i < len; i++) {
+ writew(p[i], ctx->base + MEM_STNAND_DATA);
+ wmb(); /* drain writebuffer */
+ }
+}
+
+/**
+ * au_read_buf16 - read chip data into buffer
+ * @this: NAND chip object
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * read function for 16bit buswidth
+ */
+static void au_read_buf16(struct nand_chip *this, void *buf, unsigned int len)
+{
+ struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+ unsigned int i;
+ u16 *p = buf;
+
+ len >>= 1;
+ for (i = 0; i < len; i++) {
+ p[i] = readw(ctx->base + MEM_STNAND_DATA);
+ wmb(); /* drain writebuffer */
+ }
+}
+
+static int find_nand_cs(unsigned long nand_base)
+{
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1000_STATIC_MEM_PHYS_ADDR);
+ unsigned long addr, staddr, start, mask, end;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ addr = 0x1000 + (i * 0x10); /* CSx */
+ staddr = __raw_readl(base + addr + 0x08); /* STADDRx */
+ /* figure out the decoded range of this CS */
+ start = (staddr << 4) & 0xfffc0000;
+ mask = (staddr << 18) & 0xfffc0000;
+ end = (start | (start - 1)) & ~(start ^ mask);
+ if ((nand_base >= start) && (nand_base < end))
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+static int au1550nd_waitrdy(struct nand_chip *this, unsigned int timeout_ms)
+{
+ unsigned long timeout_jiffies = jiffies;
+
+ timeout_jiffies += msecs_to_jiffies(timeout_ms) + 1;
+ do {
+ if (alchemy_rdsmem(AU1000_MEM_STSTAT) & 0x1)
+ return 0;
+
+ usleep_range(10, 100);
+ } while (time_before(jiffies, timeout_jiffies));
+
+ return -ETIMEDOUT;
+}
+
+static int au1550nd_exec_instr(struct nand_chip *this,
+ const struct nand_op_instr *instr)
+{
+ struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+ unsigned int i;
+ int ret = 0;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ writeb(instr->ctx.cmd.opcode,
+ ctx->base + MEM_STNAND_CMD);
+ /* Drain the writebuffer */
+ wmb();
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ writeb(instr->ctx.addr.addrs[i],
+ ctx->base + MEM_STNAND_ADDR);
+ /* Drain the writebuffer */
+ wmb();
+ }
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ if ((this->options & NAND_BUSWIDTH_16) &&
+ !instr->ctx.data.force_8bit)
+ au_read_buf16(this, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ else
+ au_read_buf(this, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ if ((this->options & NAND_BUSWIDTH_16) &&
+ !instr->ctx.data.force_8bit)
+ au_write_buf16(this, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ else
+ au_write_buf(this, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ ret = au1550nd_waitrdy(this, instr->ctx.waitrdy.timeout_ms);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (instr->delay_ns)
+ ndelay(instr->delay_ns);
+
+ return ret;
+}
+
+static int au1550nd_exec_op(struct nand_chip *this,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+ unsigned int i;
+ int ret;
+
+ if (check_only)
+ return 0;
+
+ /* assert (force assert) chip enable */
+ alchemy_wrsmem((1 << (4 + ctx->cs)), AU1000_MEM_STNDCTL);
+ /* Drain the writebuffer */
+ wmb();
+
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = au1550nd_exec_instr(this, &op->instrs[i]);
+ if (ret)
+ break;
+ }
+
+ /* deassert chip enable */
+ alchemy_wrsmem(0, AU1000_MEM_STNDCTL);
+ /* Drain the writebuffer */
+ wmb();
+
+ return ret;
+}
+
+static int au1550nd_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+ chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ return 0;
+}
+
+static const struct nand_controller_ops au1550nd_ops = {
+ .exec_op = au1550nd_exec_op,
+ .attach_chip = au1550nd_attach_chip,
+};
+
+static int au1550nd_probe(struct platform_device *pdev)
+{
+ struct au1550nd_platdata *pd;
+ struct au1550nd_ctx *ctx;
+ struct nand_chip *this;
+ struct mtd_info *mtd;
+ struct resource *r;
+ int ret, cs;
+
+ pd = dev_get_platdata(&pdev->dev);
+ if (!pd) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -ENODEV;
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no NAND memory resource\n");
+ ret = -ENODEV;
+ goto out1;
+ }
+ if (request_mem_region(r->start, resource_size(r), "au1550-nand")) {
+ dev_err(&pdev->dev, "cannot claim NAND memory area\n");
+ ret = -ENOMEM;
+ goto out1;
+ }
+
+ ctx->base = ioremap(r->start, 0x1000);
+ if (!ctx->base) {
+ dev_err(&pdev->dev, "cannot remap NAND memory area\n");
+ ret = -ENODEV;
+ goto out2;
+ }
+
+ this = &ctx->chip;
+ mtd = nand_to_mtd(this);
+ mtd->dev.parent = &pdev->dev;
+
+ /* figure out which CS# r->start belongs to */
+ cs = find_nand_cs(r->start);
+ if (cs < 0) {
+ dev_err(&pdev->dev, "cannot detect NAND chipselect\n");
+ ret = -ENODEV;
+ goto out3;
+ }
+ ctx->cs = cs;
+
+ nand_controller_init(&ctx->controller);
+ ctx->controller.ops = &au1550nd_ops;
+ this->controller = &ctx->controller;
+
+ if (pd->devwidth)
+ this->options |= NAND_BUSWIDTH_16;
+
+ /*
+ * This driver assumes that the default ECC engine should be TYPE_SOFT.
+ * Set ->engine_type before registering the NAND devices in order to
+ * provide a driver specific default value.
+ */
+ this->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
+ ret = nand_scan(this, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "NAND scan failed with %d\n", ret);
+ goto out3;
+ }
+
+ mtd_device_register(mtd, pd->parts, pd->num_parts);
+
+ platform_set_drvdata(pdev, ctx);
+
+ return 0;
+
+out3:
+ iounmap(ctx->base);
+out2:
+ release_mem_region(r->start, resource_size(r));
+out1:
+ kfree(ctx);
+ return ret;
+}
+
+static void au1550nd_remove(struct platform_device *pdev)
+{
+ struct au1550nd_ctx *ctx = platform_get_drvdata(pdev);
+ struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct nand_chip *chip = &ctx->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ iounmap(ctx->base);
+ release_mem_region(r->start, 0x1000);
+ kfree(ctx);
+}
+
+static struct platform_driver au1550nd_driver = {
+ .driver = {
+ .name = "au1550-nand",
+ },
+ .probe = au1550nd_probe,
+ .remove_new = au1550nd_remove,
+};
+
+module_platform_driver(au1550nd_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Embedded Edge, LLC");
+MODULE_DESCRIPTION("Board-specific glue layer for NAND flash on Pb1550 board");
diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/Makefile b/drivers/mtd/nand/raw/bcm47xxnflash/Makefile
new file mode 100644
index 0000000000..b531a630c9
--- /dev/null
+++ b/drivers/mtd/nand/raw/bcm47xxnflash/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+bcm47xxnflash-y += main.o
+bcm47xxnflash-y += ops_bcm4706.o
+
+obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash.o
diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/bcm47xxnflash.h b/drivers/mtd/nand/raw/bcm47xxnflash/bcm47xxnflash.h
new file mode 100644
index 0000000000..201b9baa52
--- /dev/null
+++ b/drivers/mtd/nand/raw/bcm47xxnflash/bcm47xxnflash.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BCM47XXNFLASH_H
+#define __BCM47XXNFLASH_H
+
+#ifndef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#endif
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+
+struct bcm47xxnflash {
+ struct bcma_drv_cc *cc;
+
+ struct nand_chip nand_chip;
+
+ unsigned curr_command;
+ int curr_page_addr;
+ int curr_column;
+
+ u8 id_data[8];
+};
+
+int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n);
+
+#endif /* BCM47XXNFLASH */
diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/main.c b/drivers/mtd/nand/raw/bcm47xxnflash/main.c
new file mode 100644
index 0000000000..ebcf508e06
--- /dev/null
+++ b/drivers/mtd/nand/raw/bcm47xxnflash/main.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * BCM47XX NAND flash driver
+ *
+ * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
+ */
+
+#include "bcm47xxnflash.h"
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/bcma/bcma.h>
+
+MODULE_DESCRIPTION("NAND flash driver for BCMA bus");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rafał Miłecki");
+
+static const char *probes[] = { "bcm47xxpart", NULL };
+
+static int bcm47xxnflash_probe(struct platform_device *pdev)
+{
+ struct bcma_nflash *nflash = dev_get_platdata(&pdev->dev);
+ struct bcm47xxnflash *b47n;
+ struct mtd_info *mtd;
+ int err = 0;
+
+ b47n = devm_kzalloc(&pdev->dev, sizeof(*b47n), GFP_KERNEL);
+ if (!b47n)
+ return -ENOMEM;
+
+ nand_set_controller_data(&b47n->nand_chip, b47n);
+ mtd = nand_to_mtd(&b47n->nand_chip);
+ mtd->dev.parent = &pdev->dev;
+ b47n->cc = container_of(nflash, struct bcma_drv_cc, nflash);
+
+ if (b47n->cc->core->bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
+ err = bcm47xxnflash_ops_bcm4706_init(b47n);
+ } else {
+ pr_err("Device not supported\n");
+ err = -ENOTSUPP;
+ }
+ if (err) {
+ pr_err("Initialization failed: %d\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, b47n);
+
+ err = mtd_device_parse_register(mtd, probes, NULL, NULL, 0);
+ if (err) {
+ pr_err("Failed to register MTD device: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void bcm47xxnflash_remove(struct platform_device *pdev)
+{
+ struct bcm47xxnflash *nflash = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &nflash->nand_chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+}
+
+static struct platform_driver bcm47xxnflash_driver = {
+ .probe = bcm47xxnflash_probe,
+ .remove_new = bcm47xxnflash_remove,
+ .driver = {
+ .name = "bcma_nflash",
+ },
+};
+
+module_platform_driver(bcm47xxnflash_driver);
diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
new file mode 100644
index 0000000000..6487dfc642
--- /dev/null
+++ b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * BCM47XX NAND flash driver
+ *
+ * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
+ */
+
+#include "bcm47xxnflash.h"
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/bcma/bcma.h>
+
+/* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has
+ * shown ~1000 retries as maximum. */
+#define NFLASH_READY_RETRIES 10000
+
+#define NFLASH_SECTOR_SIZE 512
+
+#define NCTL_CMD0 0x00010000
+#define NCTL_COL 0x00020000 /* Update column with value from BCMA_CC_NFLASH_COL_ADDR */
+#define NCTL_ROW 0x00040000 /* Update row (page) with value from BCMA_CC_NFLASH_ROW_ADDR */
+#define NCTL_CMD1W 0x00080000
+#define NCTL_READ 0x00100000
+#define NCTL_WRITE 0x00200000
+#define NCTL_SPECADDR 0x01000000
+#define NCTL_READY 0x04000000
+#define NCTL_ERR 0x08000000
+#define NCTL_CSA 0x40000000
+#define NCTL_START 0x80000000
+
+/**************************************************
+ * Various helpers
+ **************************************************/
+
+static inline u8 bcm47xxnflash_ops_bcm4706_ns_to_cycle(u16 ns, u16 clock)
+{
+ return ((ns * 1000 * clock) / 1000000) + 1;
+}
+
+static int bcm47xxnflash_ops_bcm4706_ctl_cmd(struct bcma_drv_cc *cc, u32 code)
+{
+ int i = 0;
+
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_CTL, NCTL_START | code);
+ for (i = 0; i < NFLASH_READY_RETRIES; i++) {
+ if (!(bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) & NCTL_START)) {
+ i = 0;
+ break;
+ }
+ }
+ if (i) {
+ pr_err("NFLASH control command not ready!\n");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int bcm47xxnflash_ops_bcm4706_poll(struct bcma_drv_cc *cc)
+{
+ int i;
+
+ for (i = 0; i < NFLASH_READY_RETRIES; i++) {
+ if (bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) & NCTL_READY) {
+ if (bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) &
+ BCMA_CC_NFLASH_CTL_ERR) {
+ pr_err("Error on polling\n");
+ return -EBUSY;
+ } else {
+ return 0;
+ }
+ }
+ }
+
+ pr_err("Polling timeout!\n");
+ return -EBUSY;
+}
+
+/**************************************************
+ * R/W
+ **************************************************/
+
+static void bcm47xxnflash_ops_bcm4706_read(struct mtd_info *mtd, uint8_t *buf,
+ int len)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
+
+ u32 ctlcode;
+ u32 *dest = (u32 *)buf;
+ int i;
+ int toread;
+
+ BUG_ON(b47n->curr_page_addr & ~nand_chip->pagemask);
+ /* Don't validate column using nand_chip->page_shift, it may be bigger
+ * when accessing OOB */
+
+ while (len) {
+ /* We can read maximum of 0x200 bytes at once */
+ toread = min(len, 0x200);
+
+ /* Set page and column */
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_COL_ADDR,
+ b47n->curr_column);
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_ROW_ADDR,
+ b47n->curr_page_addr);
+
+ /* Prepare to read */
+ ctlcode = NCTL_CSA | NCTL_CMD1W | NCTL_ROW | NCTL_COL |
+ NCTL_CMD0;
+ ctlcode |= NAND_CMD_READSTART << 8;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode))
+ return;
+ if (bcm47xxnflash_ops_bcm4706_poll(b47n->cc))
+ return;
+
+ /* Eventually read some data :) */
+ for (i = 0; i < toread; i += 4, dest++) {
+ ctlcode = NCTL_CSA | 0x30000000 | NCTL_READ;
+ if (i == toread - 4) /* Last read goes without that */
+ ctlcode &= ~NCTL_CSA;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc,
+ ctlcode))
+ return;
+ *dest = bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_DATA);
+ }
+
+ b47n->curr_column += toread;
+ len -= toread;
+ }
+}
+
+static void bcm47xxnflash_ops_bcm4706_write(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
+ struct bcma_drv_cc *cc = b47n->cc;
+
+ u32 ctlcode;
+ const u32 *data = (u32 *)buf;
+ int i;
+
+ BUG_ON(b47n->curr_page_addr & ~nand_chip->pagemask);
+ /* Don't validate column using nand_chip->page_shift, it may be bigger
+ * when accessing OOB */
+
+ for (i = 0; i < len; i += 4, data++) {
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_DATA, *data);
+
+ ctlcode = NCTL_CSA | 0x30000000 | NCTL_WRITE;
+ if (i == len - 4) /* Last read goes without that */
+ ctlcode &= ~NCTL_CSA;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode)) {
+ pr_err("%s ctl_cmd didn't work!\n", __func__);
+ return;
+ }
+ }
+
+ b47n->curr_column += len;
+}
+
+/**************************************************
+ * NAND chip ops
+ **************************************************/
+
+static void bcm47xxnflash_ops_bcm4706_cmd_ctrl(struct nand_chip *nand_chip,
+ int cmd, unsigned int ctrl)
+{
+ struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
+ u32 code = 0;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (cmd & NAND_CTRL_CLE)
+ code = cmd | NCTL_CMD0;
+
+ /* nCS is not needed for reset command */
+ if (cmd != NAND_CMD_RESET)
+ code |= NCTL_CSA;
+
+ bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, code);
+}
+
+/* Default nand_select_chip calls cmd_ctrl, which is not used in BCM4706 */
+static void bcm47xxnflash_ops_bcm4706_select_chip(struct nand_chip *chip,
+ int cs)
+{
+ return;
+}
+
+static int bcm47xxnflash_ops_bcm4706_dev_ready(struct nand_chip *nand_chip)
+{
+ struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
+
+ return !!(bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_CTL) & NCTL_READY);
+}
+
+/*
+ * Default nand_command and nand_command_lp don't match BCM4706 hardware layout.
+ * For example, reading chip id is performed in a non-standard way.
+ * Setting column and page is also handled differently, we use a special
+ * registers of ChipCommon core. Hacking cmd_ctrl to understand and convert
+ * standard commands would be much more complicated.
+ */
+static void bcm47xxnflash_ops_bcm4706_cmdfunc(struct nand_chip *nand_chip,
+ unsigned command, int column,
+ int page_addr)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand_chip);
+ struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
+ struct bcma_drv_cc *cc = b47n->cc;
+ u32 ctlcode;
+ int i;
+
+ if (column != -1)
+ b47n->curr_column = column;
+ if (page_addr != -1)
+ b47n->curr_page_addr = page_addr;
+
+ switch (command) {
+ case NAND_CMD_RESET:
+ nand_chip->legacy.cmd_ctrl(nand_chip, command, NAND_CTRL_CLE);
+
+ ndelay(100);
+ nand_wait_ready(nand_chip);
+ break;
+ case NAND_CMD_READID:
+ ctlcode = NCTL_CSA | 0x01000000 | NCTL_CMD1W | NCTL_CMD0;
+ ctlcode |= NAND_CMD_READID;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode)) {
+ pr_err("READID error\n");
+ break;
+ }
+
+ /*
+ * Reading is specific, last one has to go without NCTL_CSA
+ * bit. We don't know how many reads NAND subsystem is going
+ * to perform, so cache everything.
+ */
+ for (i = 0; i < ARRAY_SIZE(b47n->id_data); i++) {
+ ctlcode = NCTL_CSA | NCTL_READ;
+ if (i == ARRAY_SIZE(b47n->id_data) - 1)
+ ctlcode &= ~NCTL_CSA;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc,
+ ctlcode)) {
+ pr_err("READID error\n");
+ break;
+ }
+ b47n->id_data[i] =
+ bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_DATA)
+ & 0xFF;
+ }
+
+ break;
+ case NAND_CMD_STATUS:
+ ctlcode = NCTL_CSA | NCTL_CMD0 | NAND_CMD_STATUS;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
+ pr_err("STATUS command error\n");
+ break;
+ case NAND_CMD_READ0:
+ break;
+ case NAND_CMD_READOOB:
+ if (page_addr != -1)
+ b47n->curr_column += mtd->writesize;
+ break;
+ case NAND_CMD_ERASE1:
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR,
+ b47n->curr_page_addr);
+ ctlcode = NCTL_ROW | NCTL_CMD1W | NCTL_CMD0 |
+ NAND_CMD_ERASE1 | (NAND_CMD_ERASE2 << 8);
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
+ pr_err("ERASE1 failed\n");
+ break;
+ case NAND_CMD_ERASE2:
+ break;
+ case NAND_CMD_SEQIN:
+ /* Set page and column */
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_COL_ADDR,
+ b47n->curr_column);
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR,
+ b47n->curr_page_addr);
+
+ /* Prepare to write */
+ ctlcode = 0x40000000 | NCTL_ROW | NCTL_COL | NCTL_CMD0;
+ ctlcode |= NAND_CMD_SEQIN;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
+ pr_err("SEQIN failed\n");
+ break;
+ case NAND_CMD_PAGEPROG:
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, NCTL_CMD0 |
+ NAND_CMD_PAGEPROG))
+ pr_err("PAGEPROG failed\n");
+ if (bcm47xxnflash_ops_bcm4706_poll(cc))
+ pr_err("PAGEPROG not ready\n");
+ break;
+ default:
+ pr_err("Command 0x%X unsupported\n", command);
+ break;
+ }
+ b47n->curr_command = command;
+}
+
+static u8 bcm47xxnflash_ops_bcm4706_read_byte(struct nand_chip *nand_chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand_chip);
+ struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
+ struct bcma_drv_cc *cc = b47n->cc;
+ u32 tmp = 0;
+
+ switch (b47n->curr_command) {
+ case NAND_CMD_READID:
+ if (b47n->curr_column >= ARRAY_SIZE(b47n->id_data)) {
+ pr_err("Requested invalid id_data: %d\n",
+ b47n->curr_column);
+ return 0;
+ }
+ return b47n->id_data[b47n->curr_column++];
+ case NAND_CMD_STATUS:
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, NCTL_READ))
+ return 0;
+ return bcma_cc_read32(cc, BCMA_CC_NFLASH_DATA) & 0xff;
+ case NAND_CMD_READOOB:
+ bcm47xxnflash_ops_bcm4706_read(mtd, (u8 *)&tmp, 4);
+ return tmp & 0xFF;
+ }
+
+ pr_err("Invalid command for byte read: 0x%X\n", b47n->curr_command);
+ return 0;
+}
+
+static void bcm47xxnflash_ops_bcm4706_read_buf(struct nand_chip *nand_chip,
+ uint8_t *buf, int len)
+{
+ struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
+
+ switch (b47n->curr_command) {
+ case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
+ bcm47xxnflash_ops_bcm4706_read(nand_to_mtd(nand_chip), buf,
+ len);
+ return;
+ }
+
+ pr_err("Invalid command for buf read: 0x%X\n", b47n->curr_command);
+}
+
+static void bcm47xxnflash_ops_bcm4706_write_buf(struct nand_chip *nand_chip,
+ const uint8_t *buf, int len)
+{
+ struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
+
+ switch (b47n->curr_command) {
+ case NAND_CMD_SEQIN:
+ bcm47xxnflash_ops_bcm4706_write(nand_to_mtd(nand_chip), buf,
+ len);
+ return;
+ }
+
+ pr_err("Invalid command for buf write: 0x%X\n", b47n->curr_command);
+}
+
+/**************************************************
+ * Init
+ **************************************************/
+
+int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)&b47n->nand_chip;
+ int err;
+ u32 freq;
+ u16 clock;
+ u8 w0, w1, w2, w3, w4;
+
+ unsigned long chipsize; /* MiB */
+ u8 tbits, col_bits, col_size, row_bits, row_bsize;
+ u32 val;
+
+ nand_chip->legacy.select_chip = bcm47xxnflash_ops_bcm4706_select_chip;
+ nand_chip->legacy.cmd_ctrl = bcm47xxnflash_ops_bcm4706_cmd_ctrl;
+ nand_chip->legacy.dev_ready = bcm47xxnflash_ops_bcm4706_dev_ready;
+ b47n->nand_chip.legacy.cmdfunc = bcm47xxnflash_ops_bcm4706_cmdfunc;
+ b47n->nand_chip.legacy.read_byte = bcm47xxnflash_ops_bcm4706_read_byte;
+ b47n->nand_chip.legacy.read_buf = bcm47xxnflash_ops_bcm4706_read_buf;
+ b47n->nand_chip.legacy.write_buf = bcm47xxnflash_ops_bcm4706_write_buf;
+ b47n->nand_chip.legacy.set_features = nand_get_set_features_notsupp;
+ b47n->nand_chip.legacy.get_features = nand_get_set_features_notsupp;
+
+ nand_chip->legacy.chip_delay = 50;
+ b47n->nand_chip.bbt_options = NAND_BBT_USE_FLASH;
+ /* TODO: implement ECC */
+ b47n->nand_chip.ecc.engine_type = NAND_ECC_ENGINE_TYPE_NONE;
+
+ /* Enable NAND flash access */
+ bcma_cc_set32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
+ BCMA_CC_4706_FLASHSCFG_NF1);
+
+ /* Configure wait counters */
+ if (b47n->cc->status & BCMA_CC_CHIPST_4706_PKG_OPTION) {
+ /* 400 MHz */
+ freq = 400000000 / 4;
+ } else {
+ freq = bcma_chipco_pll_read(b47n->cc, 4);
+ freq = (freq & 0xFFF) >> 3;
+ /* Fixed reference clock 25 MHz and m = 2 */
+ freq = (freq * 25000000 / 2) / 4;
+ }
+ clock = freq / 1000000;
+ w0 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(15, clock);
+ w1 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(20, clock);
+ w2 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock);
+ w3 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock);
+ w4 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(100, clock);
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_WAITCNT0,
+ (w4 << 24 | w3 << 18 | w2 << 12 | w1 << 6 | w0));
+
+ /* Scan NAND */
+ err = nand_scan(&b47n->nand_chip, 1);
+ if (err) {
+ pr_err("Could not scan NAND flash: %d\n", err);
+ goto exit;
+ }
+
+ /* Configure FLASH */
+ chipsize = nanddev_target_size(&b47n->nand_chip.base) >> 20;
+ tbits = ffs(chipsize); /* find first bit set */
+ if (!tbits || tbits != fls(chipsize)) {
+ pr_err("Invalid flash size: 0x%lX\n", chipsize);
+ err = -ENOTSUPP;
+ goto exit;
+ }
+ tbits += 19; /* Broadcom increases *index* by 20, we increase *pos* */
+
+ col_bits = b47n->nand_chip.page_shift + 1;
+ col_size = (col_bits + 7) / 8;
+
+ row_bits = tbits - col_bits + 1;
+ row_bsize = (row_bits + 7) / 8;
+
+ val = ((row_bsize - 1) << 6) | ((col_size - 1) << 4) | 2;
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_CONF, val);
+
+exit:
+ if (err)
+ bcma_cc_mask32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
+ ~BCMA_CC_4706_FLASHSCFG_NF1);
+ return err;
+}
diff --git a/drivers/mtd/nand/raw/brcmnand/Kconfig b/drivers/mtd/nand/raw/brcmnand/Kconfig
new file mode 100644
index 0000000000..4bc51bf60a
--- /dev/null
+++ b/drivers/mtd/nand/raw/brcmnand/Kconfig
@@ -0,0 +1,49 @@
+config MTD_NAND_BRCMNAND
+ tristate "Broadcom STB NAND controller"
+ depends on ARM || ARM64 || MIPS || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Enables the Broadcom NAND controller driver. The controller was
+ originally designed for Set-Top Box but is used on various BCM7xxx,
+ BCM3xxx, BCM63xxx, iProc/Cygnus and more.
+
+if MTD_NAND_BRCMNAND
+
+config MTD_NAND_BRCMNAND_BCM63XX
+ tristate "Broadcom BCM63xx NAND controller glue"
+ default BCM63XX
+ help
+ Enables the BRCMNAND glue driver to register the NAND controller
+ on Broadcom BCM63xx MIPS-based DSL platforms.
+
+config MTD_NAND_BRCMNAND_BCMA
+ tristate "Broadcom BCMA NAND controller"
+ depends on BCMA_NFLASH
+ depends on BCMA
+ help
+ Enables the BRCMNAND controller over BCMA on BCM47186/BCM5358 SoCs.
+ The glue driver will take care of performing the low-level I/O
+ operations to interface the BRCMNAND controller over the BCMA bus.
+
+config MTD_NAND_BRCMNAND_BCMBCA
+ tristate "Broadcom BCMBCA NAND controller glue"
+ default ARCH_BCMBCA
+ help
+ Enables the BRCMNAND glue driver to register the NAND controller
+ on Broadcom BCA platforms.
+
+config MTD_NAND_BRCMNAND_BRCMSTB
+ tristate "Broadcom STB Nand controller glue"
+ default ARCH_BRCMSTB
+ help
+ Enables the BRCMNAND glue driver to register the NAND controller
+ on Broadcom STB platforms.
+
+config MTD_NAND_BRCMNAND_IPROC
+ tristate "Broadcom iProc NAND controller glue"
+ default ARCH_BCM_IPROC
+ help
+ Enables the BRCMNAND controller glue driver to register the NAND
+ controller on Broadcom iProc platforms.
+
+endif # MTD_NAND_BRCMNAND
diff --git a/drivers/mtd/nand/raw/brcmnand/Makefile b/drivers/mtd/nand/raw/brcmnand/Makefile
new file mode 100644
index 0000000000..9907e3ec4b
--- /dev/null
+++ b/drivers/mtd/nand/raw/brcmnand/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+# link order matters; don't link the more generic brcmstb_nand.o before the
+# more specific iproc_nand.o, for instance
+obj-$(CONFIG_MTD_NAND_BRCMNAND_IPROC) += iproc_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND_BCMBCA) += bcm63138_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND_BCM63XX) += bcm6368_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND_BRCMSTB) += brcmstb_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand.o
+
+obj-$(CONFIG_MTD_NAND_BRCMNAND_BCMA) += bcma_nand.o
diff --git a/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c b/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c
new file mode 100644
index 0000000000..9596629000
--- /dev/null
+++ b/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2015 Broadcom Corporation
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "brcmnand.h"
+
+struct bcm63138_nand_soc {
+ struct brcmnand_soc soc;
+ void __iomem *base;
+};
+
+#define BCM63138_NAND_INT_STATUS 0x00
+#define BCM63138_NAND_INT_EN 0x04
+
+enum {
+ BCM63138_CTLRDY = BIT(4),
+};
+
+static bool bcm63138_nand_intc_ack(struct brcmnand_soc *soc)
+{
+ struct bcm63138_nand_soc *priv =
+ container_of(soc, struct bcm63138_nand_soc, soc);
+ void __iomem *mmio = priv->base + BCM63138_NAND_INT_STATUS;
+ u32 val = brcmnand_readl(mmio);
+
+ if (val & BCM63138_CTLRDY) {
+ brcmnand_writel(val & ~BCM63138_CTLRDY, mmio);
+ return true;
+ }
+
+ return false;
+}
+
+static void bcm63138_nand_intc_set(struct brcmnand_soc *soc, bool en)
+{
+ struct bcm63138_nand_soc *priv =
+ container_of(soc, struct bcm63138_nand_soc, soc);
+ void __iomem *mmio = priv->base + BCM63138_NAND_INT_EN;
+ u32 val = brcmnand_readl(mmio);
+
+ if (en)
+ val |= BCM63138_CTLRDY;
+ else
+ val &= ~BCM63138_CTLRDY;
+
+ brcmnand_writel(val, mmio);
+}
+
+static int bcm63138_nand_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcm63138_nand_soc *priv;
+ struct brcmnand_soc *soc;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ soc = &priv->soc;
+
+ priv->base = devm_platform_ioremap_resource_byname(pdev, "nand-int-base");
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ soc->ctlrdy_ack = bcm63138_nand_intc_ack;
+ soc->ctlrdy_set_enabled = bcm63138_nand_intc_set;
+
+ return brcmnand_probe(pdev, soc);
+}
+
+static const struct of_device_id bcm63138_nand_of_match[] = {
+ { .compatible = "brcm,nand-bcm63138" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm63138_nand_of_match);
+
+static struct platform_driver bcm63138_nand_driver = {
+ .probe = bcm63138_nand_probe,
+ .remove = brcmnand_remove,
+ .driver = {
+ .name = "bcm63138_nand",
+ .pm = &brcmnand_pm_ops,
+ .of_match_table = bcm63138_nand_of_match,
+ }
+};
+module_platform_driver(bcm63138_nand_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Brian Norris");
+MODULE_DESCRIPTION("NAND driver for BCM63138");
diff --git a/drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c b/drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c
new file mode 100644
index 0000000000..a06cd87f83
--- /dev/null
+++ b/drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2015 Simon Arlott
+ *
+ * Derived from bcm63138_nand.c:
+ * Copyright © 2015 Broadcom Corporation
+ *
+ * Derived from bcm963xx_4.12L.06B_consumer/shared/opensource/include/bcm963xx/63268_map_part.h:
+ * Copyright 2000-2010 Broadcom Corporation
+ *
+ * Derived from bcm963xx_4.12L.06B_consumer/shared/opensource/flash/nandflash.c:
+ * Copyright 2000-2010 Broadcom Corporation
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "brcmnand.h"
+
+struct bcm6368_nand_soc {
+ struct brcmnand_soc soc;
+ void __iomem *base;
+};
+
+#define BCM6368_NAND_INT 0x00
+#define BCM6368_NAND_STATUS_SHIFT 0
+#define BCM6368_NAND_STATUS_MASK (0xfff << BCM6368_NAND_STATUS_SHIFT)
+#define BCM6368_NAND_ENABLE_SHIFT 16
+#define BCM6368_NAND_ENABLE_MASK (0xffff << BCM6368_NAND_ENABLE_SHIFT)
+#define BCM6368_NAND_BASE_ADDR0 0x04
+#define BCM6368_NAND_BASE_ADDR1 0x0c
+
+enum {
+ BCM6368_NP_READ = BIT(0),
+ BCM6368_BLOCK_ERASE = BIT(1),
+ BCM6368_COPY_BACK = BIT(2),
+ BCM6368_PAGE_PGM = BIT(3),
+ BCM6368_CTRL_READY = BIT(4),
+ BCM6368_DEV_RBPIN = BIT(5),
+ BCM6368_ECC_ERR_UNC = BIT(6),
+ BCM6368_ECC_ERR_CORR = BIT(7),
+};
+
+static bool bcm6368_nand_intc_ack(struct brcmnand_soc *soc)
+{
+ struct bcm6368_nand_soc *priv =
+ container_of(soc, struct bcm6368_nand_soc, soc);
+ void __iomem *mmio = priv->base + BCM6368_NAND_INT;
+ u32 val = brcmnand_readl(mmio);
+
+ if (val & (BCM6368_CTRL_READY << BCM6368_NAND_STATUS_SHIFT)) {
+ /* Ack interrupt */
+ val &= ~BCM6368_NAND_STATUS_MASK;
+ val |= BCM6368_CTRL_READY << BCM6368_NAND_STATUS_SHIFT;
+ brcmnand_writel(val, mmio);
+ return true;
+ }
+
+ return false;
+}
+
+static void bcm6368_nand_intc_set(struct brcmnand_soc *soc, bool en)
+{
+ struct bcm6368_nand_soc *priv =
+ container_of(soc, struct bcm6368_nand_soc, soc);
+ void __iomem *mmio = priv->base + BCM6368_NAND_INT;
+ u32 val = brcmnand_readl(mmio);
+
+ /* Don't ack any interrupts */
+ val &= ~BCM6368_NAND_STATUS_MASK;
+
+ if (en)
+ val |= BCM6368_CTRL_READY << BCM6368_NAND_ENABLE_SHIFT;
+ else
+ val &= ~(BCM6368_CTRL_READY << BCM6368_NAND_ENABLE_SHIFT);
+
+ brcmnand_writel(val, mmio);
+}
+
+static int bcm6368_nand_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcm6368_nand_soc *priv;
+ struct brcmnand_soc *soc;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ soc = &priv->soc;
+
+ priv->base = devm_platform_ioremap_resource_byname(pdev, "nand-int-base");
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ soc->ctlrdy_ack = bcm6368_nand_intc_ack;
+ soc->ctlrdy_set_enabled = bcm6368_nand_intc_set;
+
+ /* Disable and ack all interrupts */
+ brcmnand_writel(0, priv->base + BCM6368_NAND_INT);
+ brcmnand_writel(BCM6368_NAND_STATUS_MASK,
+ priv->base + BCM6368_NAND_INT);
+
+ return brcmnand_probe(pdev, soc);
+}
+
+static const struct of_device_id bcm6368_nand_of_match[] = {
+ { .compatible = "brcm,nand-bcm6368" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm6368_nand_of_match);
+
+static struct platform_driver bcm6368_nand_driver = {
+ .probe = bcm6368_nand_probe,
+ .remove = brcmnand_remove,
+ .driver = {
+ .name = "bcm6368_nand",
+ .pm = &brcmnand_pm_ops,
+ .of_match_table = bcm6368_nand_of_match,
+ }
+};
+module_platform_driver(bcm6368_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Simon Arlott");
+MODULE_DESCRIPTION("NAND driver for BCM6368");
diff --git a/drivers/mtd/nand/raw/brcmnand/bcma_nand.c b/drivers/mtd/nand/raw/brcmnand/bcma_nand.c
new file mode 100644
index 0000000000..dd27977919
--- /dev/null
+++ b/drivers/mtd/nand/raw/brcmnand/bcma_nand.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2021 Broadcom
+ */
+#include <linux/bcma/bcma.h>
+#include <linux/bcma/bcma_driver_chipcommon.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "brcmnand.h"
+
+struct brcmnand_bcma_soc {
+ struct brcmnand_soc soc;
+ struct bcma_drv_cc *cc;
+};
+
+static inline bool brcmnand_bcma_needs_swapping(u32 offset)
+{
+ switch (offset) {
+ case BCMA_CC_NAND_SPARE_RD0:
+ case BCMA_CC_NAND_SPARE_RD4:
+ case BCMA_CC_NAND_SPARE_RD8:
+ case BCMA_CC_NAND_SPARE_RD12:
+ case BCMA_CC_NAND_SPARE_WR0:
+ case BCMA_CC_NAND_SPARE_WR4:
+ case BCMA_CC_NAND_SPARE_WR8:
+ case BCMA_CC_NAND_SPARE_WR12:
+ case BCMA_CC_NAND_DEVID:
+ case BCMA_CC_NAND_DEVID_X:
+ case BCMA_CC_NAND_SPARE_RD16:
+ case BCMA_CC_NAND_SPARE_RD20:
+ case BCMA_CC_NAND_SPARE_RD24:
+ case BCMA_CC_NAND_SPARE_RD28:
+ return true;
+ }
+
+ return false;
+}
+
+static inline struct brcmnand_bcma_soc *to_bcma_soc(struct brcmnand_soc *soc)
+{
+ return container_of(soc, struct brcmnand_bcma_soc, soc);
+}
+
+static u32 brcmnand_bcma_read_reg(struct brcmnand_soc *soc, u32 offset)
+{
+ struct brcmnand_bcma_soc *sc = to_bcma_soc(soc);
+ u32 val;
+
+ /* Offset into the NAND block and deal with the flash cache separately */
+ if (offset == BRCMNAND_NON_MMIO_FC_ADDR)
+ offset = BCMA_CC_NAND_CACHE_DATA;
+ else
+ offset += BCMA_CC_NAND_REVISION;
+
+ val = bcma_cc_read32(sc->cc, offset);
+
+ /* Swap if necessary */
+ if (brcmnand_bcma_needs_swapping(offset))
+ val = be32_to_cpu((__force __be32)val);
+ return val;
+}
+
+static void brcmnand_bcma_write_reg(struct brcmnand_soc *soc, u32 val,
+ u32 offset)
+{
+ struct brcmnand_bcma_soc *sc = to_bcma_soc(soc);
+
+ /* Offset into the NAND block */
+ if (offset == BRCMNAND_NON_MMIO_FC_ADDR)
+ offset = BCMA_CC_NAND_CACHE_DATA;
+ else
+ offset += BCMA_CC_NAND_REVISION;
+
+ /* Swap if necessary */
+ if (brcmnand_bcma_needs_swapping(offset))
+ val = (__force u32)cpu_to_be32(val);
+
+ bcma_cc_write32(sc->cc, offset, val);
+}
+
+static struct brcmnand_io_ops brcmnand_bcma_io_ops = {
+ .read_reg = brcmnand_bcma_read_reg,
+ .write_reg = brcmnand_bcma_write_reg,
+};
+
+static void brcmnand_bcma_prepare_data_bus(struct brcmnand_soc *soc, bool prepare,
+ bool is_param)
+{
+ struct brcmnand_bcma_soc *sc = to_bcma_soc(soc);
+
+ /* Reset the cache address to ensure we are already accessing the
+ * beginning of a sub-page.
+ */
+ bcma_cc_write32(sc->cc, BCMA_CC_NAND_CACHE_ADDR, 0);
+}
+
+static int brcmnand_bcma_nand_probe(struct platform_device *pdev)
+{
+ struct bcma_nflash *nflash = dev_get_platdata(&pdev->dev);
+ struct brcmnand_bcma_soc *soc;
+
+ soc = devm_kzalloc(&pdev->dev, sizeof(*soc), GFP_KERNEL);
+ if (!soc)
+ return -ENOMEM;
+
+ soc->cc = container_of(nflash, struct bcma_drv_cc, nflash);
+ soc->soc.prepare_data_bus = brcmnand_bcma_prepare_data_bus;
+ soc->soc.ops = &brcmnand_bcma_io_ops;
+
+ if (soc->cc->core->bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
+ dev_err(&pdev->dev, "Use bcm47xxnflash for 4706!\n");
+ return -ENODEV;
+ }
+
+ return brcmnand_probe(pdev, &soc->soc);
+}
+
+static struct platform_driver brcmnand_bcma_nand_driver = {
+ .probe = brcmnand_bcma_nand_probe,
+ .remove = brcmnand_remove,
+ .driver = {
+ .name = "bcma_brcmnand",
+ .pm = &brcmnand_pm_ops,
+ }
+};
+module_platform_driver(brcmnand_bcma_nand_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("NAND controller driver glue for BCMA chips");
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
new file mode 100644
index 0000000000..440bef4779
--- /dev/null
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -0,0 +1,3328 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2010-2015 Broadcom Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/brcmnand.h>
+#include <linux/err.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/ioport.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/mm.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/static_key.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+
+#include "brcmnand.h"
+
+/*
+ * This flag controls if WP stays on between erase/write commands to mitigate
+ * flash corruption due to power glitches. Values:
+ * 0: NAND_WP is not used or not available
+ * 1: NAND_WP is set by default, cleared for erase/write operations
+ * 2: NAND_WP is always cleared
+ */
+static int wp_on = 1;
+module_param(wp_on, int, 0444);
+
+/***********************************************************************
+ * Definitions
+ ***********************************************************************/
+
+#define DRV_NAME "brcmnand"
+
+#define CMD_NULL 0x00
+#define CMD_PAGE_READ 0x01
+#define CMD_SPARE_AREA_READ 0x02
+#define CMD_STATUS_READ 0x03
+#define CMD_PROGRAM_PAGE 0x04
+#define CMD_PROGRAM_SPARE_AREA 0x05
+#define CMD_COPY_BACK 0x06
+#define CMD_DEVICE_ID_READ 0x07
+#define CMD_BLOCK_ERASE 0x08
+#define CMD_FLASH_RESET 0x09
+#define CMD_BLOCKS_LOCK 0x0a
+#define CMD_BLOCKS_LOCK_DOWN 0x0b
+#define CMD_BLOCKS_UNLOCK 0x0c
+#define CMD_READ_BLOCKS_LOCK_STATUS 0x0d
+#define CMD_PARAMETER_READ 0x0e
+#define CMD_PARAMETER_CHANGE_COL 0x0f
+#define CMD_LOW_LEVEL_OP 0x10
+
+struct brcm_nand_dma_desc {
+ u32 next_desc;
+ u32 next_desc_ext;
+ u32 cmd_irq;
+ u32 dram_addr;
+ u32 dram_addr_ext;
+ u32 tfr_len;
+ u32 total_len;
+ u32 flash_addr;
+ u32 flash_addr_ext;
+ u32 cs;
+ u32 pad2[5];
+ u32 status_valid;
+} __packed;
+
+/* Bitfields for brcm_nand_dma_desc::status_valid */
+#define FLASH_DMA_ECC_ERROR (1 << 8)
+#define FLASH_DMA_CORR_ERROR (1 << 9)
+
+/* Bitfields for DMA_MODE */
+#define FLASH_DMA_MODE_STOP_ON_ERROR BIT(1) /* stop in Uncorr ECC error */
+#define FLASH_DMA_MODE_MODE BIT(0) /* link list */
+#define FLASH_DMA_MODE_MASK (FLASH_DMA_MODE_STOP_ON_ERROR | \
+ FLASH_DMA_MODE_MODE)
+
+/* 512B flash cache in the NAND controller HW */
+#define FC_SHIFT 9U
+#define FC_BYTES 512U
+#define FC_WORDS (FC_BYTES >> 2)
+
+#define BRCMNAND_MIN_PAGESIZE 512
+#define BRCMNAND_MIN_BLOCKSIZE (8 * 1024)
+#define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024)
+
+#define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY)
+#define NAND_POLL_STATUS_TIMEOUT_MS 100
+
+#define EDU_CMD_WRITE 0x00
+#define EDU_CMD_READ 0x01
+#define EDU_STATUS_ACTIVE BIT(0)
+#define EDU_ERR_STATUS_ERRACK BIT(0)
+#define EDU_DONE_MASK GENMASK(1, 0)
+
+#define EDU_CONFIG_MODE_NAND BIT(0)
+#define EDU_CONFIG_SWAP_BYTE BIT(1)
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define EDU_CONFIG_SWAP_CFG EDU_CONFIG_SWAP_BYTE
+#else
+#define EDU_CONFIG_SWAP_CFG 0
+#endif
+
+/* edu registers */
+enum edu_reg {
+ EDU_CONFIG = 0,
+ EDU_DRAM_ADDR,
+ EDU_EXT_ADDR,
+ EDU_LENGTH,
+ EDU_CMD,
+ EDU_STOP,
+ EDU_STATUS,
+ EDU_DONE,
+ EDU_ERR_STATUS,
+};
+
+static const u16 edu_regs[] = {
+ [EDU_CONFIG] = 0x00,
+ [EDU_DRAM_ADDR] = 0x04,
+ [EDU_EXT_ADDR] = 0x08,
+ [EDU_LENGTH] = 0x0c,
+ [EDU_CMD] = 0x10,
+ [EDU_STOP] = 0x14,
+ [EDU_STATUS] = 0x18,
+ [EDU_DONE] = 0x1c,
+ [EDU_ERR_STATUS] = 0x20,
+};
+
+/* flash_dma registers */
+enum flash_dma_reg {
+ FLASH_DMA_REVISION = 0,
+ FLASH_DMA_FIRST_DESC,
+ FLASH_DMA_FIRST_DESC_EXT,
+ FLASH_DMA_CTRL,
+ FLASH_DMA_MODE,
+ FLASH_DMA_STATUS,
+ FLASH_DMA_INTERRUPT_DESC,
+ FLASH_DMA_INTERRUPT_DESC_EXT,
+ FLASH_DMA_ERROR_STATUS,
+ FLASH_DMA_CURRENT_DESC,
+ FLASH_DMA_CURRENT_DESC_EXT,
+};
+
+/* flash_dma registers v0*/
+static const u16 flash_dma_regs_v0[] = {
+ [FLASH_DMA_REVISION] = 0x00,
+ [FLASH_DMA_FIRST_DESC] = 0x04,
+ [FLASH_DMA_CTRL] = 0x08,
+ [FLASH_DMA_MODE] = 0x0c,
+ [FLASH_DMA_STATUS] = 0x10,
+ [FLASH_DMA_INTERRUPT_DESC] = 0x14,
+ [FLASH_DMA_ERROR_STATUS] = 0x18,
+ [FLASH_DMA_CURRENT_DESC] = 0x1c,
+};
+
+/* flash_dma registers v1*/
+static const u16 flash_dma_regs_v1[] = {
+ [FLASH_DMA_REVISION] = 0x00,
+ [FLASH_DMA_FIRST_DESC] = 0x04,
+ [FLASH_DMA_FIRST_DESC_EXT] = 0x08,
+ [FLASH_DMA_CTRL] = 0x0c,
+ [FLASH_DMA_MODE] = 0x10,
+ [FLASH_DMA_STATUS] = 0x14,
+ [FLASH_DMA_INTERRUPT_DESC] = 0x18,
+ [FLASH_DMA_INTERRUPT_DESC_EXT] = 0x1c,
+ [FLASH_DMA_ERROR_STATUS] = 0x20,
+ [FLASH_DMA_CURRENT_DESC] = 0x24,
+ [FLASH_DMA_CURRENT_DESC_EXT] = 0x28,
+};
+
+/* flash_dma registers v4 */
+static const u16 flash_dma_regs_v4[] = {
+ [FLASH_DMA_REVISION] = 0x00,
+ [FLASH_DMA_FIRST_DESC] = 0x08,
+ [FLASH_DMA_FIRST_DESC_EXT] = 0x0c,
+ [FLASH_DMA_CTRL] = 0x10,
+ [FLASH_DMA_MODE] = 0x14,
+ [FLASH_DMA_STATUS] = 0x18,
+ [FLASH_DMA_INTERRUPT_DESC] = 0x20,
+ [FLASH_DMA_INTERRUPT_DESC_EXT] = 0x24,
+ [FLASH_DMA_ERROR_STATUS] = 0x28,
+ [FLASH_DMA_CURRENT_DESC] = 0x30,
+ [FLASH_DMA_CURRENT_DESC_EXT] = 0x34,
+};
+
+/* Controller feature flags */
+enum {
+ BRCMNAND_HAS_1K_SECTORS = BIT(0),
+ BRCMNAND_HAS_PREFETCH = BIT(1),
+ BRCMNAND_HAS_CACHE_MODE = BIT(2),
+ BRCMNAND_HAS_WP = BIT(3),
+};
+
+struct brcmnand_host;
+
+static DEFINE_STATIC_KEY_FALSE(brcmnand_soc_has_ops_key);
+
+struct brcmnand_controller {
+ struct device *dev;
+ struct nand_controller controller;
+ void __iomem *nand_base;
+ void __iomem *nand_fc; /* flash cache */
+ void __iomem *flash_dma_base;
+ int irq;
+ unsigned int dma_irq;
+ int nand_version;
+
+ /* Some SoCs provide custom interrupt status register(s) */
+ struct brcmnand_soc *soc;
+
+ /* Some SoCs have a gateable clock for the controller */
+ struct clk *clk;
+
+ int cmd_pending;
+ bool dma_pending;
+ bool edu_pending;
+ struct completion done;
+ struct completion dma_done;
+ struct completion edu_done;
+
+ /* List of NAND hosts (one for each chip-select) */
+ struct list_head host_list;
+
+ /* EDU info, per-transaction */
+ const u16 *edu_offsets;
+ void __iomem *edu_base;
+ int edu_irq;
+ int edu_count;
+ u64 edu_dram_addr;
+ u32 edu_ext_addr;
+ u32 edu_cmd;
+ u32 edu_config;
+ int sas; /* spare area size, per flash cache */
+ int sector_size_1k;
+ u8 *oob;
+
+ /* flash_dma reg */
+ const u16 *flash_dma_offsets;
+ struct brcm_nand_dma_desc *dma_desc;
+ dma_addr_t dma_pa;
+
+ int (*dma_trans)(struct brcmnand_host *host, u64 addr, u32 *buf,
+ u8 *oob, u32 len, u8 dma_cmd);
+
+ /* in-memory cache of the FLASH_CACHE, used only for some commands */
+ u8 flash_cache[FC_BYTES];
+
+ /* Controller revision details */
+ const u16 *reg_offsets;
+ unsigned int reg_spacing; /* between CS1, CS2, ... regs */
+ const u8 *cs_offsets; /* within each chip-select */
+ const u8 *cs0_offsets; /* within CS0, if different */
+ unsigned int max_block_size;
+ const unsigned int *block_sizes;
+ unsigned int max_page_size;
+ const unsigned int *page_sizes;
+ unsigned int page_size_shift;
+ unsigned int max_oob;
+ u32 ecc_level_shift;
+ u32 features;
+
+ /* for low-power standby/resume only */
+ u32 nand_cs_nand_select;
+ u32 nand_cs_nand_xor;
+ u32 corr_stat_threshold;
+ u32 flash_dma_mode;
+ u32 flash_edu_mode;
+ bool pio_poll_mode;
+};
+
+struct brcmnand_cfg {
+ u64 device_size;
+ unsigned int block_size;
+ unsigned int page_size;
+ unsigned int spare_area_size;
+ unsigned int device_width;
+ unsigned int col_adr_bytes;
+ unsigned int blk_adr_bytes;
+ unsigned int ful_adr_bytes;
+ unsigned int sector_size_1k;
+ unsigned int ecc_level;
+ /* use for low-power standby/resume only */
+ u32 acc_control;
+ u32 config;
+ u32 config_ext;
+ u32 timing_1;
+ u32 timing_2;
+};
+
+struct brcmnand_host {
+ struct list_head node;
+
+ struct nand_chip chip;
+ struct platform_device *pdev;
+ int cs;
+
+ unsigned int last_cmd;
+ unsigned int last_byte;
+ u64 last_addr;
+ struct brcmnand_cfg hwcfg;
+ struct brcmnand_controller *ctrl;
+};
+
+enum brcmnand_reg {
+ BRCMNAND_CMD_START = 0,
+ BRCMNAND_CMD_EXT_ADDRESS,
+ BRCMNAND_CMD_ADDRESS,
+ BRCMNAND_INTFC_STATUS,
+ BRCMNAND_CS_SELECT,
+ BRCMNAND_CS_XOR,
+ BRCMNAND_LL_OP,
+ BRCMNAND_CS0_BASE,
+ BRCMNAND_CS1_BASE, /* CS1 regs, if non-contiguous */
+ BRCMNAND_CORR_THRESHOLD,
+ BRCMNAND_CORR_THRESHOLD_EXT,
+ BRCMNAND_UNCORR_COUNT,
+ BRCMNAND_CORR_COUNT,
+ BRCMNAND_CORR_EXT_ADDR,
+ BRCMNAND_CORR_ADDR,
+ BRCMNAND_UNCORR_EXT_ADDR,
+ BRCMNAND_UNCORR_ADDR,
+ BRCMNAND_SEMAPHORE,
+ BRCMNAND_ID,
+ BRCMNAND_ID_EXT,
+ BRCMNAND_LL_RDATA,
+ BRCMNAND_OOB_READ_BASE,
+ BRCMNAND_OOB_READ_10_BASE, /* offset 0x10, if non-contiguous */
+ BRCMNAND_OOB_WRITE_BASE,
+ BRCMNAND_OOB_WRITE_10_BASE, /* offset 0x10, if non-contiguous */
+ BRCMNAND_FC_BASE,
+};
+
+/* BRCMNAND v2.1-v2.2 */
+static const u16 brcmnand_regs_v21[] = {
+ [BRCMNAND_CMD_START] = 0x04,
+ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
+ [BRCMNAND_CMD_ADDRESS] = 0x0c,
+ [BRCMNAND_INTFC_STATUS] = 0x5c,
+ [BRCMNAND_CS_SELECT] = 0x14,
+ [BRCMNAND_CS_XOR] = 0x18,
+ [BRCMNAND_LL_OP] = 0,
+ [BRCMNAND_CS0_BASE] = 0x40,
+ [BRCMNAND_CS1_BASE] = 0,
+ [BRCMNAND_CORR_THRESHOLD] = 0,
+ [BRCMNAND_CORR_THRESHOLD_EXT] = 0,
+ [BRCMNAND_UNCORR_COUNT] = 0,
+ [BRCMNAND_CORR_COUNT] = 0,
+ [BRCMNAND_CORR_EXT_ADDR] = 0x60,
+ [BRCMNAND_CORR_ADDR] = 0x64,
+ [BRCMNAND_UNCORR_EXT_ADDR] = 0x68,
+ [BRCMNAND_UNCORR_ADDR] = 0x6c,
+ [BRCMNAND_SEMAPHORE] = 0x50,
+ [BRCMNAND_ID] = 0x54,
+ [BRCMNAND_ID_EXT] = 0,
+ [BRCMNAND_LL_RDATA] = 0,
+ [BRCMNAND_OOB_READ_BASE] = 0x20,
+ [BRCMNAND_OOB_READ_10_BASE] = 0,
+ [BRCMNAND_OOB_WRITE_BASE] = 0x30,
+ [BRCMNAND_OOB_WRITE_10_BASE] = 0,
+ [BRCMNAND_FC_BASE] = 0x200,
+};
+
+/* BRCMNAND v3.3-v4.0 */
+static const u16 brcmnand_regs_v33[] = {
+ [BRCMNAND_CMD_START] = 0x04,
+ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
+ [BRCMNAND_CMD_ADDRESS] = 0x0c,
+ [BRCMNAND_INTFC_STATUS] = 0x6c,
+ [BRCMNAND_CS_SELECT] = 0x14,
+ [BRCMNAND_CS_XOR] = 0x18,
+ [BRCMNAND_LL_OP] = 0x178,
+ [BRCMNAND_CS0_BASE] = 0x40,
+ [BRCMNAND_CS1_BASE] = 0xd0,
+ [BRCMNAND_CORR_THRESHOLD] = 0x84,
+ [BRCMNAND_CORR_THRESHOLD_EXT] = 0,
+ [BRCMNAND_UNCORR_COUNT] = 0,
+ [BRCMNAND_CORR_COUNT] = 0,
+ [BRCMNAND_CORR_EXT_ADDR] = 0x70,
+ [BRCMNAND_CORR_ADDR] = 0x74,
+ [BRCMNAND_UNCORR_EXT_ADDR] = 0x78,
+ [BRCMNAND_UNCORR_ADDR] = 0x7c,
+ [BRCMNAND_SEMAPHORE] = 0x58,
+ [BRCMNAND_ID] = 0x60,
+ [BRCMNAND_ID_EXT] = 0x64,
+ [BRCMNAND_LL_RDATA] = 0x17c,
+ [BRCMNAND_OOB_READ_BASE] = 0x20,
+ [BRCMNAND_OOB_READ_10_BASE] = 0x130,
+ [BRCMNAND_OOB_WRITE_BASE] = 0x30,
+ [BRCMNAND_OOB_WRITE_10_BASE] = 0,
+ [BRCMNAND_FC_BASE] = 0x200,
+};
+
+/* BRCMNAND v5.0 */
+static const u16 brcmnand_regs_v50[] = {
+ [BRCMNAND_CMD_START] = 0x04,
+ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
+ [BRCMNAND_CMD_ADDRESS] = 0x0c,
+ [BRCMNAND_INTFC_STATUS] = 0x6c,
+ [BRCMNAND_CS_SELECT] = 0x14,
+ [BRCMNAND_CS_XOR] = 0x18,
+ [BRCMNAND_LL_OP] = 0x178,
+ [BRCMNAND_CS0_BASE] = 0x40,
+ [BRCMNAND_CS1_BASE] = 0xd0,
+ [BRCMNAND_CORR_THRESHOLD] = 0x84,
+ [BRCMNAND_CORR_THRESHOLD_EXT] = 0,
+ [BRCMNAND_UNCORR_COUNT] = 0,
+ [BRCMNAND_CORR_COUNT] = 0,
+ [BRCMNAND_CORR_EXT_ADDR] = 0x70,
+ [BRCMNAND_CORR_ADDR] = 0x74,
+ [BRCMNAND_UNCORR_EXT_ADDR] = 0x78,
+ [BRCMNAND_UNCORR_ADDR] = 0x7c,
+ [BRCMNAND_SEMAPHORE] = 0x58,
+ [BRCMNAND_ID] = 0x60,
+ [BRCMNAND_ID_EXT] = 0x64,
+ [BRCMNAND_LL_RDATA] = 0x17c,
+ [BRCMNAND_OOB_READ_BASE] = 0x20,
+ [BRCMNAND_OOB_READ_10_BASE] = 0x130,
+ [BRCMNAND_OOB_WRITE_BASE] = 0x30,
+ [BRCMNAND_OOB_WRITE_10_BASE] = 0x140,
+ [BRCMNAND_FC_BASE] = 0x200,
+};
+
+/* BRCMNAND v6.0 - v7.1 */
+static const u16 brcmnand_regs_v60[] = {
+ [BRCMNAND_CMD_START] = 0x04,
+ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
+ [BRCMNAND_CMD_ADDRESS] = 0x0c,
+ [BRCMNAND_INTFC_STATUS] = 0x14,
+ [BRCMNAND_CS_SELECT] = 0x18,
+ [BRCMNAND_CS_XOR] = 0x1c,
+ [BRCMNAND_LL_OP] = 0x20,
+ [BRCMNAND_CS0_BASE] = 0x50,
+ [BRCMNAND_CS1_BASE] = 0,
+ [BRCMNAND_CORR_THRESHOLD] = 0xc0,
+ [BRCMNAND_CORR_THRESHOLD_EXT] = 0xc4,
+ [BRCMNAND_UNCORR_COUNT] = 0xfc,
+ [BRCMNAND_CORR_COUNT] = 0x100,
+ [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
+ [BRCMNAND_CORR_ADDR] = 0x110,
+ [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
+ [BRCMNAND_UNCORR_ADDR] = 0x118,
+ [BRCMNAND_SEMAPHORE] = 0x150,
+ [BRCMNAND_ID] = 0x194,
+ [BRCMNAND_ID_EXT] = 0x198,
+ [BRCMNAND_LL_RDATA] = 0x19c,
+ [BRCMNAND_OOB_READ_BASE] = 0x200,
+ [BRCMNAND_OOB_READ_10_BASE] = 0,
+ [BRCMNAND_OOB_WRITE_BASE] = 0x280,
+ [BRCMNAND_OOB_WRITE_10_BASE] = 0,
+ [BRCMNAND_FC_BASE] = 0x400,
+};
+
+/* BRCMNAND v7.1 */
+static const u16 brcmnand_regs_v71[] = {
+ [BRCMNAND_CMD_START] = 0x04,
+ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
+ [BRCMNAND_CMD_ADDRESS] = 0x0c,
+ [BRCMNAND_INTFC_STATUS] = 0x14,
+ [BRCMNAND_CS_SELECT] = 0x18,
+ [BRCMNAND_CS_XOR] = 0x1c,
+ [BRCMNAND_LL_OP] = 0x20,
+ [BRCMNAND_CS0_BASE] = 0x50,
+ [BRCMNAND_CS1_BASE] = 0,
+ [BRCMNAND_CORR_THRESHOLD] = 0xdc,
+ [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
+ [BRCMNAND_UNCORR_COUNT] = 0xfc,
+ [BRCMNAND_CORR_COUNT] = 0x100,
+ [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
+ [BRCMNAND_CORR_ADDR] = 0x110,
+ [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
+ [BRCMNAND_UNCORR_ADDR] = 0x118,
+ [BRCMNAND_SEMAPHORE] = 0x150,
+ [BRCMNAND_ID] = 0x194,
+ [BRCMNAND_ID_EXT] = 0x198,
+ [BRCMNAND_LL_RDATA] = 0x19c,
+ [BRCMNAND_OOB_READ_BASE] = 0x200,
+ [BRCMNAND_OOB_READ_10_BASE] = 0,
+ [BRCMNAND_OOB_WRITE_BASE] = 0x280,
+ [BRCMNAND_OOB_WRITE_10_BASE] = 0,
+ [BRCMNAND_FC_BASE] = 0x400,
+};
+
+/* BRCMNAND v7.2 */
+static const u16 brcmnand_regs_v72[] = {
+ [BRCMNAND_CMD_START] = 0x04,
+ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
+ [BRCMNAND_CMD_ADDRESS] = 0x0c,
+ [BRCMNAND_INTFC_STATUS] = 0x14,
+ [BRCMNAND_CS_SELECT] = 0x18,
+ [BRCMNAND_CS_XOR] = 0x1c,
+ [BRCMNAND_LL_OP] = 0x20,
+ [BRCMNAND_CS0_BASE] = 0x50,
+ [BRCMNAND_CS1_BASE] = 0,
+ [BRCMNAND_CORR_THRESHOLD] = 0xdc,
+ [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
+ [BRCMNAND_UNCORR_COUNT] = 0xfc,
+ [BRCMNAND_CORR_COUNT] = 0x100,
+ [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
+ [BRCMNAND_CORR_ADDR] = 0x110,
+ [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
+ [BRCMNAND_UNCORR_ADDR] = 0x118,
+ [BRCMNAND_SEMAPHORE] = 0x150,
+ [BRCMNAND_ID] = 0x194,
+ [BRCMNAND_ID_EXT] = 0x198,
+ [BRCMNAND_LL_RDATA] = 0x19c,
+ [BRCMNAND_OOB_READ_BASE] = 0x200,
+ [BRCMNAND_OOB_READ_10_BASE] = 0,
+ [BRCMNAND_OOB_WRITE_BASE] = 0x400,
+ [BRCMNAND_OOB_WRITE_10_BASE] = 0,
+ [BRCMNAND_FC_BASE] = 0x600,
+};
+
+enum brcmnand_cs_reg {
+ BRCMNAND_CS_CFG_EXT = 0,
+ BRCMNAND_CS_CFG,
+ BRCMNAND_CS_ACC_CONTROL,
+ BRCMNAND_CS_TIMING1,
+ BRCMNAND_CS_TIMING2,
+};
+
+/* Per chip-select offsets for v7.1 */
+static const u8 brcmnand_cs_offsets_v71[] = {
+ [BRCMNAND_CS_ACC_CONTROL] = 0x00,
+ [BRCMNAND_CS_CFG_EXT] = 0x04,
+ [BRCMNAND_CS_CFG] = 0x08,
+ [BRCMNAND_CS_TIMING1] = 0x0c,
+ [BRCMNAND_CS_TIMING2] = 0x10,
+};
+
+/* Per chip-select offsets for pre v7.1, except CS0 on <= v5.0 */
+static const u8 brcmnand_cs_offsets[] = {
+ [BRCMNAND_CS_ACC_CONTROL] = 0x00,
+ [BRCMNAND_CS_CFG_EXT] = 0x04,
+ [BRCMNAND_CS_CFG] = 0x04,
+ [BRCMNAND_CS_TIMING1] = 0x08,
+ [BRCMNAND_CS_TIMING2] = 0x0c,
+};
+
+/* Per chip-select offset for <= v5.0 on CS0 only */
+static const u8 brcmnand_cs_offsets_cs0[] = {
+ [BRCMNAND_CS_ACC_CONTROL] = 0x00,
+ [BRCMNAND_CS_CFG_EXT] = 0x08,
+ [BRCMNAND_CS_CFG] = 0x08,
+ [BRCMNAND_CS_TIMING1] = 0x10,
+ [BRCMNAND_CS_TIMING2] = 0x14,
+};
+
+/*
+ * Bitfields for the CFG and CFG_EXT registers. Pre-v7.1 controllers only had
+ * one config register, but once the bitfields overflowed, newer controllers
+ * (v7.1 and newer) added a CFG_EXT register and shuffled a few fields around.
+ */
+enum {
+ CFG_BLK_ADR_BYTES_SHIFT = 8,
+ CFG_COL_ADR_BYTES_SHIFT = 12,
+ CFG_FUL_ADR_BYTES_SHIFT = 16,
+ CFG_BUS_WIDTH_SHIFT = 23,
+ CFG_BUS_WIDTH = BIT(CFG_BUS_WIDTH_SHIFT),
+ CFG_DEVICE_SIZE_SHIFT = 24,
+
+ /* Only for v2.1 */
+ CFG_PAGE_SIZE_SHIFT_v2_1 = 30,
+
+ /* Only for pre-v7.1 (with no CFG_EXT register) */
+ CFG_PAGE_SIZE_SHIFT = 20,
+ CFG_BLK_SIZE_SHIFT = 28,
+
+ /* Only for v7.1+ (with CFG_EXT register) */
+ CFG_EXT_PAGE_SIZE_SHIFT = 0,
+ CFG_EXT_BLK_SIZE_SHIFT = 4,
+};
+
+/* BRCMNAND_INTFC_STATUS */
+enum {
+ INTFC_FLASH_STATUS = GENMASK(7, 0),
+
+ INTFC_ERASED = BIT(27),
+ INTFC_OOB_VALID = BIT(28),
+ INTFC_CACHE_VALID = BIT(29),
+ INTFC_FLASH_READY = BIT(30),
+ INTFC_CTLR_READY = BIT(31),
+};
+
+/***********************************************************************
+ * NAND ACC CONTROL bitfield
+ *
+ * Some bits have remained constant throughout hardware revision, while
+ * others have shifted around.
+ ***********************************************************************/
+
+/* Constant for all versions (where supported) */
+enum {
+ /* See BRCMNAND_HAS_CACHE_MODE */
+ ACC_CONTROL_CACHE_MODE = BIT(22),
+
+ /* See BRCMNAND_HAS_PREFETCH */
+ ACC_CONTROL_PREFETCH = BIT(23),
+
+ ACC_CONTROL_PAGE_HIT = BIT(24),
+ ACC_CONTROL_WR_PREEMPT = BIT(25),
+ ACC_CONTROL_PARTIAL_PAGE = BIT(26),
+ ACC_CONTROL_RD_ERASED = BIT(27),
+ ACC_CONTROL_FAST_PGM_RDIN = BIT(28),
+ ACC_CONTROL_WR_ECC = BIT(30),
+ ACC_CONTROL_RD_ECC = BIT(31),
+};
+
+#define ACC_CONTROL_ECC_SHIFT 16
+/* Only for v7.2 */
+#define ACC_CONTROL_ECC_EXT_SHIFT 13
+
+static inline bool brcmnand_non_mmio_ops(struct brcmnand_controller *ctrl)
+{
+#if IS_ENABLED(CONFIG_MTD_NAND_BRCMNAND_BCMA)
+ return static_branch_unlikely(&brcmnand_soc_has_ops_key);
+#else
+ return false;
+#endif
+}
+
+static inline u32 nand_readreg(struct brcmnand_controller *ctrl, u32 offs)
+{
+ if (brcmnand_non_mmio_ops(ctrl))
+ return brcmnand_soc_read(ctrl->soc, offs);
+ return brcmnand_readl(ctrl->nand_base + offs);
+}
+
+static inline void nand_writereg(struct brcmnand_controller *ctrl, u32 offs,
+ u32 val)
+{
+ if (brcmnand_non_mmio_ops(ctrl))
+ brcmnand_soc_write(ctrl->soc, val, offs);
+ else
+ brcmnand_writel(val, ctrl->nand_base + offs);
+}
+
+static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
+{
+ static const unsigned int block_sizes_v6[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 };
+ static const unsigned int block_sizes_v4[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 };
+ static const unsigned int block_sizes_v2_2[] = { 16, 128, 8, 512, 256, 0 };
+ static const unsigned int block_sizes_v2_1[] = { 16, 128, 8, 512, 0 };
+ static const unsigned int page_sizes_v3_4[] = { 512, 2048, 4096, 8192, 0 };
+ static const unsigned int page_sizes_v2_2[] = { 512, 2048, 4096, 0 };
+ static const unsigned int page_sizes_v2_1[] = { 512, 2048, 0 };
+
+ ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff;
+
+ /* Only support v2.1+ */
+ if (ctrl->nand_version < 0x0201) {
+ dev_err(ctrl->dev, "version %#x not supported\n",
+ ctrl->nand_version);
+ return -ENODEV;
+ }
+
+ /* Register offsets */
+ if (ctrl->nand_version >= 0x0702)
+ ctrl->reg_offsets = brcmnand_regs_v72;
+ else if (ctrl->nand_version == 0x0701)
+ ctrl->reg_offsets = brcmnand_regs_v71;
+ else if (ctrl->nand_version >= 0x0600)
+ ctrl->reg_offsets = brcmnand_regs_v60;
+ else if (ctrl->nand_version >= 0x0500)
+ ctrl->reg_offsets = brcmnand_regs_v50;
+ else if (ctrl->nand_version >= 0x0303)
+ ctrl->reg_offsets = brcmnand_regs_v33;
+ else if (ctrl->nand_version >= 0x0201)
+ ctrl->reg_offsets = brcmnand_regs_v21;
+
+ /* Chip-select stride */
+ if (ctrl->nand_version >= 0x0701)
+ ctrl->reg_spacing = 0x14;
+ else
+ ctrl->reg_spacing = 0x10;
+
+ /* Per chip-select registers */
+ if (ctrl->nand_version >= 0x0701) {
+ ctrl->cs_offsets = brcmnand_cs_offsets_v71;
+ } else {
+ ctrl->cs_offsets = brcmnand_cs_offsets;
+
+ /* v3.3-5.0 have a different CS0 offset layout */
+ if (ctrl->nand_version >= 0x0303 &&
+ ctrl->nand_version <= 0x0500)
+ ctrl->cs0_offsets = brcmnand_cs_offsets_cs0;
+ }
+
+ /* Page / block sizes */
+ if (ctrl->nand_version >= 0x0701) {
+ /* >= v7.1 use nice power-of-2 values! */
+ ctrl->max_page_size = 16 * 1024;
+ ctrl->max_block_size = 2 * 1024 * 1024;
+ } else {
+ if (ctrl->nand_version >= 0x0304)
+ ctrl->page_sizes = page_sizes_v3_4;
+ else if (ctrl->nand_version >= 0x0202)
+ ctrl->page_sizes = page_sizes_v2_2;
+ else
+ ctrl->page_sizes = page_sizes_v2_1;
+
+ if (ctrl->nand_version >= 0x0202)
+ ctrl->page_size_shift = CFG_PAGE_SIZE_SHIFT;
+ else
+ ctrl->page_size_shift = CFG_PAGE_SIZE_SHIFT_v2_1;
+
+ if (ctrl->nand_version >= 0x0600)
+ ctrl->block_sizes = block_sizes_v6;
+ else if (ctrl->nand_version >= 0x0400)
+ ctrl->block_sizes = block_sizes_v4;
+ else if (ctrl->nand_version >= 0x0202)
+ ctrl->block_sizes = block_sizes_v2_2;
+ else
+ ctrl->block_sizes = block_sizes_v2_1;
+
+ if (ctrl->nand_version < 0x0400) {
+ if (ctrl->nand_version < 0x0202)
+ ctrl->max_page_size = 2048;
+ else
+ ctrl->max_page_size = 4096;
+ ctrl->max_block_size = 512 * 1024;
+ }
+ }
+
+ /* Maximum spare area sector size (per 512B) */
+ if (ctrl->nand_version == 0x0702)
+ ctrl->max_oob = 128;
+ else if (ctrl->nand_version >= 0x0600)
+ ctrl->max_oob = 64;
+ else if (ctrl->nand_version >= 0x0500)
+ ctrl->max_oob = 32;
+ else
+ ctrl->max_oob = 16;
+
+ /* v6.0 and newer (except v6.1) have prefetch support */
+ if (ctrl->nand_version >= 0x0600 && ctrl->nand_version != 0x0601)
+ ctrl->features |= BRCMNAND_HAS_PREFETCH;
+
+ /*
+ * v6.x has cache mode, but it's implemented differently. Ignore it for
+ * now.
+ */
+ if (ctrl->nand_version >= 0x0700)
+ ctrl->features |= BRCMNAND_HAS_CACHE_MODE;
+
+ if (ctrl->nand_version >= 0x0500)
+ ctrl->features |= BRCMNAND_HAS_1K_SECTORS;
+
+ if (ctrl->nand_version >= 0x0700)
+ ctrl->features |= BRCMNAND_HAS_WP;
+ else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp"))
+ ctrl->features |= BRCMNAND_HAS_WP;
+
+ /* v7.2 has different ecc level shift in the acc register */
+ if (ctrl->nand_version == 0x0702)
+ ctrl->ecc_level_shift = ACC_CONTROL_ECC_EXT_SHIFT;
+ else
+ ctrl->ecc_level_shift = ACC_CONTROL_ECC_SHIFT;
+
+ return 0;
+}
+
+static void brcmnand_flash_dma_revision_init(struct brcmnand_controller *ctrl)
+{
+ /* flash_dma register offsets */
+ if (ctrl->nand_version >= 0x0703)
+ ctrl->flash_dma_offsets = flash_dma_regs_v4;
+ else if (ctrl->nand_version == 0x0602)
+ ctrl->flash_dma_offsets = flash_dma_regs_v0;
+ else
+ ctrl->flash_dma_offsets = flash_dma_regs_v1;
+}
+
+static inline u32 brcmnand_read_reg(struct brcmnand_controller *ctrl,
+ enum brcmnand_reg reg)
+{
+ u16 offs = ctrl->reg_offsets[reg];
+
+ if (offs)
+ return nand_readreg(ctrl, offs);
+ else
+ return 0;
+}
+
+static inline void brcmnand_write_reg(struct brcmnand_controller *ctrl,
+ enum brcmnand_reg reg, u32 val)
+{
+ u16 offs = ctrl->reg_offsets[reg];
+
+ if (offs)
+ nand_writereg(ctrl, offs, val);
+}
+
+static inline void brcmnand_rmw_reg(struct brcmnand_controller *ctrl,
+ enum brcmnand_reg reg, u32 mask, unsigned
+ int shift, u32 val)
+{
+ u32 tmp = brcmnand_read_reg(ctrl, reg);
+
+ tmp &= ~mask;
+ tmp |= val << shift;
+ brcmnand_write_reg(ctrl, reg, tmp);
+}
+
+static inline u32 brcmnand_read_fc(struct brcmnand_controller *ctrl, int word)
+{
+ if (brcmnand_non_mmio_ops(ctrl))
+ return brcmnand_soc_read(ctrl->soc, BRCMNAND_NON_MMIO_FC_ADDR);
+ return __raw_readl(ctrl->nand_fc + word * 4);
+}
+
+static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl,
+ int word, u32 val)
+{
+ if (brcmnand_non_mmio_ops(ctrl))
+ brcmnand_soc_write(ctrl->soc, val, BRCMNAND_NON_MMIO_FC_ADDR);
+ else
+ __raw_writel(val, ctrl->nand_fc + word * 4);
+}
+
+static inline void edu_writel(struct brcmnand_controller *ctrl,
+ enum edu_reg reg, u32 val)
+{
+ u16 offs = ctrl->edu_offsets[reg];
+
+ brcmnand_writel(val, ctrl->edu_base + offs);
+}
+
+static inline u32 edu_readl(struct brcmnand_controller *ctrl,
+ enum edu_reg reg)
+{
+ u16 offs = ctrl->edu_offsets[reg];
+
+ return brcmnand_readl(ctrl->edu_base + offs);
+}
+
+static void brcmnand_clear_ecc_addr(struct brcmnand_controller *ctrl)
+{
+
+ /* Clear error addresses */
+ brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0);
+ brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0);
+ brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0);
+ brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0);
+}
+
+static u64 brcmnand_get_uncorrecc_addr(struct brcmnand_controller *ctrl)
+{
+ u64 err_addr;
+
+ err_addr = brcmnand_read_reg(ctrl, BRCMNAND_UNCORR_ADDR);
+ err_addr |= ((u64)(brcmnand_read_reg(ctrl,
+ BRCMNAND_UNCORR_EXT_ADDR)
+ & 0xffff) << 32);
+
+ return err_addr;
+}
+
+static u64 brcmnand_get_correcc_addr(struct brcmnand_controller *ctrl)
+{
+ u64 err_addr;
+
+ err_addr = brcmnand_read_reg(ctrl, BRCMNAND_CORR_ADDR);
+ err_addr |= ((u64)(brcmnand_read_reg(ctrl,
+ BRCMNAND_CORR_EXT_ADDR)
+ & 0xffff) << 32);
+
+ return err_addr;
+}
+
+static void brcmnand_set_cmd_addr(struct mtd_info *mtd, u64 addr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+
+ brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
+ (host->cs << 16) | ((addr >> 32) & 0xffff));
+ (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
+ brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
+ lower_32_bits(addr));
+ (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
+}
+
+static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs,
+ enum brcmnand_cs_reg reg)
+{
+ u16 offs_cs0 = ctrl->reg_offsets[BRCMNAND_CS0_BASE];
+ u16 offs_cs1 = ctrl->reg_offsets[BRCMNAND_CS1_BASE];
+ u8 cs_offs;
+
+ if (cs == 0 && ctrl->cs0_offsets)
+ cs_offs = ctrl->cs0_offsets[reg];
+ else
+ cs_offs = ctrl->cs_offsets[reg];
+
+ if (cs && offs_cs1)
+ return offs_cs1 + (cs - 1) * ctrl->reg_spacing + cs_offs;
+
+ return offs_cs0 + cs * ctrl->reg_spacing + cs_offs;
+}
+
+static inline u32 brcmnand_count_corrected(struct brcmnand_controller *ctrl)
+{
+ if (ctrl->nand_version < 0x0600)
+ return 1;
+ return brcmnand_read_reg(ctrl, BRCMNAND_CORR_COUNT);
+}
+
+static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ unsigned int shift = 0, bits;
+ enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD;
+ int cs = host->cs;
+
+ if (!ctrl->reg_offsets[reg])
+ return;
+
+ if (ctrl->nand_version == 0x0702)
+ bits = 7;
+ else if (ctrl->nand_version >= 0x0600)
+ bits = 6;
+ else if (ctrl->nand_version >= 0x0500)
+ bits = 5;
+ else
+ bits = 4;
+
+ if (ctrl->nand_version >= 0x0702) {
+ if (cs >= 4)
+ reg = BRCMNAND_CORR_THRESHOLD_EXT;
+ shift = (cs % 4) * bits;
+ } else if (ctrl->nand_version >= 0x0600) {
+ if (cs >= 5)
+ reg = BRCMNAND_CORR_THRESHOLD_EXT;
+ shift = (cs % 5) * bits;
+ }
+ brcmnand_rmw_reg(ctrl, reg, (bits - 1) << shift, shift, val);
+}
+
+static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl)
+{
+ /* Kludge for the BCMA-based NAND controller which does not actually
+ * shift the command
+ */
+ if (ctrl->nand_version == 0x0304 && brcmnand_non_mmio_ops(ctrl))
+ return 0;
+
+ if (ctrl->nand_version < 0x0602)
+ return 24;
+ return 0;
+}
+
+static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
+{
+ if (ctrl->nand_version == 0x0702)
+ return GENMASK(7, 0);
+ else if (ctrl->nand_version >= 0x0600)
+ return GENMASK(6, 0);
+ else if (ctrl->nand_version >= 0x0303)
+ return GENMASK(5, 0);
+ else
+ return GENMASK(4, 0);
+}
+
+static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl)
+{
+ u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f;
+
+ mask <<= ACC_CONTROL_ECC_SHIFT;
+
+ /* v7.2 includes additional ECC levels */
+ if (ctrl->nand_version == 0x0702)
+ mask |= 0x7 << ACC_CONTROL_ECC_EXT_SHIFT;
+
+ return mask;
+}
+
+static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ u16 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
+ u32 acc_control = nand_readreg(ctrl, offs);
+ u32 ecc_flags = ACC_CONTROL_WR_ECC | ACC_CONTROL_RD_ECC;
+
+ if (en) {
+ acc_control |= ecc_flags; /* enable RD/WR ECC */
+ acc_control &= ~brcmnand_ecc_level_mask(ctrl);
+ acc_control |= host->hwcfg.ecc_level << ctrl->ecc_level_shift;
+ } else {
+ acc_control &= ~ecc_flags; /* disable RD/WR ECC */
+ acc_control &= ~brcmnand_ecc_level_mask(ctrl);
+ }
+
+ nand_writereg(ctrl, offs, acc_control);
+}
+
+static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl)
+{
+ if (ctrl->nand_version >= 0x0702)
+ return 9;
+ else if (ctrl->nand_version >= 0x0600)
+ return 7;
+ else if (ctrl->nand_version >= 0x0500)
+ return 6;
+ else
+ return -1;
+}
+
+static int brcmnand_get_sector_size_1k(struct brcmnand_host *host)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ int shift = brcmnand_sector_1k_shift(ctrl);
+ u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_ACC_CONTROL);
+
+ if (shift < 0)
+ return 0;
+
+ return (nand_readreg(ctrl, acc_control_offs) >> shift) & 0x1;
+}
+
+static void brcmnand_set_sector_size_1k(struct brcmnand_host *host, int val)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ int shift = brcmnand_sector_1k_shift(ctrl);
+ u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_ACC_CONTROL);
+ u32 tmp;
+
+ if (shift < 0)
+ return;
+
+ tmp = nand_readreg(ctrl, acc_control_offs);
+ tmp &= ~(1 << shift);
+ tmp |= (!!val) << shift;
+ nand_writereg(ctrl, acc_control_offs, tmp);
+}
+
+/***********************************************************************
+ * CS_NAND_SELECT
+ ***********************************************************************/
+
+enum {
+ CS_SELECT_NAND_WP = BIT(29),
+ CS_SELECT_AUTO_DEVICE_ID_CFG = BIT(30),
+};
+
+static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
+ u32 mask, u32 expected_val,
+ unsigned long timeout_ms)
+{
+ unsigned long limit;
+ u32 val;
+
+ if (!timeout_ms)
+ timeout_ms = NAND_POLL_STATUS_TIMEOUT_MS;
+
+ limit = jiffies + msecs_to_jiffies(timeout_ms);
+ do {
+ val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
+ if ((val & mask) == expected_val)
+ return 0;
+
+ cpu_relax();
+ } while (time_after(limit, jiffies));
+
+ /*
+ * do a final check after time out in case the CPU was busy and the driver
+ * did not get enough time to perform the polling to avoid false alarms
+ */
+ val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
+ if ((val & mask) == expected_val)
+ return 0;
+
+ dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
+ expected_val, val & mask);
+
+ return -ETIMEDOUT;
+}
+
+static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en)
+{
+ u32 val = en ? CS_SELECT_NAND_WP : 0;
+
+ brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT, CS_SELECT_NAND_WP, 0, val);
+}
+
+/***********************************************************************
+ * Flash DMA
+ ***********************************************************************/
+
+static inline bool has_flash_dma(struct brcmnand_controller *ctrl)
+{
+ return ctrl->flash_dma_base;
+}
+
+static inline bool has_edu(struct brcmnand_controller *ctrl)
+{
+ return ctrl->edu_base;
+}
+
+static inline bool use_dma(struct brcmnand_controller *ctrl)
+{
+ return has_flash_dma(ctrl) || has_edu(ctrl);
+}
+
+static inline void disable_ctrl_irqs(struct brcmnand_controller *ctrl)
+{
+ if (ctrl->pio_poll_mode)
+ return;
+
+ if (has_flash_dma(ctrl)) {
+ ctrl->flash_dma_base = NULL;
+ disable_irq(ctrl->dma_irq);
+ }
+
+ disable_irq(ctrl->irq);
+ ctrl->pio_poll_mode = true;
+}
+
+static inline bool flash_dma_buf_ok(const void *buf)
+{
+ return buf && !is_vmalloc_addr(buf) &&
+ likely(IS_ALIGNED((uintptr_t)buf, 4));
+}
+
+static inline void flash_dma_writel(struct brcmnand_controller *ctrl,
+ enum flash_dma_reg dma_reg, u32 val)
+{
+ u16 offs = ctrl->flash_dma_offsets[dma_reg];
+
+ brcmnand_writel(val, ctrl->flash_dma_base + offs);
+}
+
+static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl,
+ enum flash_dma_reg dma_reg)
+{
+ u16 offs = ctrl->flash_dma_offsets[dma_reg];
+
+ return brcmnand_readl(ctrl->flash_dma_base + offs);
+}
+
+/* Low-level operation types: command, address, write, or read */
+enum brcmnand_llop_type {
+ LL_OP_CMD,
+ LL_OP_ADDR,
+ LL_OP_WR,
+ LL_OP_RD,
+};
+
+/***********************************************************************
+ * Internal support functions
+ ***********************************************************************/
+
+static inline bool is_hamming_ecc(struct brcmnand_controller *ctrl,
+ struct brcmnand_cfg *cfg)
+{
+ if (ctrl->nand_version <= 0x0701)
+ return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 &&
+ cfg->ecc_level == 15;
+ else
+ return cfg->sector_size_1k == 0 && ((cfg->spare_area_size == 16 &&
+ cfg->ecc_level == 15) ||
+ (cfg->spare_area_size == 28 && cfg->ecc_level == 16));
+}
+
+/*
+ * Set mtd->ooblayout to the appropriate mtd_ooblayout_ops given
+ * the layout/configuration.
+ * Returns -ERRCODE on failure.
+ */
+static int brcmnand_hamming_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_cfg *cfg = &host->hwcfg;
+ int sas = cfg->spare_area_size << cfg->sector_size_1k;
+ int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
+
+ if (section >= sectors)
+ return -ERANGE;
+
+ oobregion->offset = (section * sas) + 6;
+ oobregion->length = 3;
+
+ return 0;
+}
+
+static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_cfg *cfg = &host->hwcfg;
+ int sas = cfg->spare_area_size << cfg->sector_size_1k;
+ int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
+ u32 next;
+
+ if (section > sectors)
+ return -ERANGE;
+
+ next = (section * sas);
+ if (section < sectors)
+ next += 6;
+
+ if (section) {
+ oobregion->offset = ((section - 1) * sas) + 9;
+ } else {
+ if (cfg->page_size > 512) {
+ /* Large page NAND uses first 2 bytes for BBI */
+ oobregion->offset = 2;
+ } else {
+ /* Small page NAND uses last byte before ECC for BBI */
+ oobregion->offset = 0;
+ next--;
+ }
+ }
+
+ oobregion->length = next - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops brcmnand_hamming_ooblayout_ops = {
+ .ecc = brcmnand_hamming_ooblayout_ecc,
+ .free = brcmnand_hamming_ooblayout_free,
+};
+
+static int brcmnand_bch_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_cfg *cfg = &host->hwcfg;
+ int sas = cfg->spare_area_size << cfg->sector_size_1k;
+ int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
+
+ if (section >= sectors)
+ return -ERANGE;
+
+ oobregion->offset = ((section + 1) * sas) - chip->ecc.bytes;
+ oobregion->length = chip->ecc.bytes;
+
+ return 0;
+}
+
+static int brcmnand_bch_ooblayout_free_lp(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_cfg *cfg = &host->hwcfg;
+ int sas = cfg->spare_area_size << cfg->sector_size_1k;
+ int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
+
+ if (section >= sectors)
+ return -ERANGE;
+
+ if (sas <= chip->ecc.bytes)
+ return 0;
+
+ oobregion->offset = section * sas;
+ oobregion->length = sas - chip->ecc.bytes;
+
+ if (!section) {
+ oobregion->offset++;
+ oobregion->length--;
+ }
+
+ return 0;
+}
+
+static int brcmnand_bch_ooblayout_free_sp(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_cfg *cfg = &host->hwcfg;
+ int sas = cfg->spare_area_size << cfg->sector_size_1k;
+
+ if (section > 1 || sas - chip->ecc.bytes < 6 ||
+ (section && sas - chip->ecc.bytes == 6))
+ return -ERANGE;
+
+ if (!section) {
+ oobregion->offset = 0;
+ oobregion->length = 5;
+ } else {
+ oobregion->offset = 6;
+ oobregion->length = sas - chip->ecc.bytes - 6;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops brcmnand_bch_lp_ooblayout_ops = {
+ .ecc = brcmnand_bch_ooblayout_ecc,
+ .free = brcmnand_bch_ooblayout_free_lp,
+};
+
+static const struct mtd_ooblayout_ops brcmnand_bch_sp_ooblayout_ops = {
+ .ecc = brcmnand_bch_ooblayout_ecc,
+ .free = brcmnand_bch_ooblayout_free_sp,
+};
+
+static int brcmstb_choose_ecc_layout(struct brcmnand_host *host)
+{
+ struct brcmnand_cfg *p = &host->hwcfg;
+ struct mtd_info *mtd = nand_to_mtd(&host->chip);
+ struct nand_ecc_ctrl *ecc = &host->chip.ecc;
+ unsigned int ecc_level = p->ecc_level;
+ int sas = p->spare_area_size << p->sector_size_1k;
+ int sectors = p->page_size / (512 << p->sector_size_1k);
+
+ if (p->sector_size_1k)
+ ecc_level <<= 1;
+
+ if (is_hamming_ecc(host->ctrl, p)) {
+ ecc->bytes = 3 * sectors;
+ mtd_set_ooblayout(mtd, &brcmnand_hamming_ooblayout_ops);
+ return 0;
+ }
+
+ /*
+ * CONTROLLER_VERSION:
+ * < v5.0: ECC_REQ = ceil(BCH_T * 13/8)
+ * >= v5.0: ECC_REQ = ceil(BCH_T * 14/8)
+ * But we will just be conservative.
+ */
+ ecc->bytes = DIV_ROUND_UP(ecc_level * 14, 8);
+ if (p->page_size == 512)
+ mtd_set_ooblayout(mtd, &brcmnand_bch_sp_ooblayout_ops);
+ else
+ mtd_set_ooblayout(mtd, &brcmnand_bch_lp_ooblayout_ops);
+
+ if (ecc->bytes >= sas) {
+ dev_err(&host->pdev->dev,
+ "error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n",
+ ecc->bytes, sas);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void brcmnand_wp(struct mtd_info *mtd, int wp)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+
+ if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) {
+ static int old_wp = -1;
+ int ret;
+
+ if (old_wp != wp) {
+ dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off");
+ old_wp = wp;
+ }
+
+ /*
+ * make sure ctrl/flash ready before and after
+ * changing state of #WP pin
+ */
+ ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY |
+ NAND_STATUS_READY,
+ NAND_CTRL_RDY |
+ NAND_STATUS_READY, 0);
+ if (ret)
+ return;
+
+ brcmnand_set_wp(ctrl, wp);
+ nand_status_op(chip, NULL);
+ /* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */
+ ret = bcmnand_ctrl_poll_status(ctrl,
+ NAND_CTRL_RDY |
+ NAND_STATUS_READY |
+ NAND_STATUS_WP,
+ NAND_CTRL_RDY |
+ NAND_STATUS_READY |
+ (wp ? 0 : NAND_STATUS_WP), 0);
+
+ if (ret)
+ dev_err_ratelimited(&host->pdev->dev,
+ "nand #WP expected %s\n",
+ wp ? "on" : "off");
+ }
+}
+
+/* Helper functions for reading and writing OOB registers */
+static inline u8 oob_reg_read(struct brcmnand_controller *ctrl, u32 offs)
+{
+ u16 offset0, offset10, reg_offs;
+
+ offset0 = ctrl->reg_offsets[BRCMNAND_OOB_READ_BASE];
+ offset10 = ctrl->reg_offsets[BRCMNAND_OOB_READ_10_BASE];
+
+ if (offs >= ctrl->max_oob)
+ return 0x77;
+
+ if (offs >= 16 && offset10)
+ reg_offs = offset10 + ((offs - 0x10) & ~0x03);
+ else
+ reg_offs = offset0 + (offs & ~0x03);
+
+ return nand_readreg(ctrl, reg_offs) >> (24 - ((offs & 0x03) << 3));
+}
+
+static inline void oob_reg_write(struct brcmnand_controller *ctrl, u32 offs,
+ u32 data)
+{
+ u16 offset0, offset10, reg_offs;
+
+ offset0 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_BASE];
+ offset10 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_10_BASE];
+
+ if (offs >= ctrl->max_oob)
+ return;
+
+ if (offs >= 16 && offset10)
+ reg_offs = offset10 + ((offs - 0x10) & ~0x03);
+ else
+ reg_offs = offset0 + (offs & ~0x03);
+
+ nand_writereg(ctrl, reg_offs, data);
+}
+
+/*
+ * read_oob_from_regs - read data from OOB registers
+ * @ctrl: NAND controller
+ * @i: sub-page sector index
+ * @oob: buffer to read to
+ * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
+ * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
+ */
+static int read_oob_from_regs(struct brcmnand_controller *ctrl, int i, u8 *oob,
+ int sas, int sector_1k)
+{
+ int tbytes = sas << sector_1k;
+ int j;
+
+ /* Adjust OOB values for 1K sector size */
+ if (sector_1k && (i & 0x01))
+ tbytes = max(0, tbytes - (int)ctrl->max_oob);
+ tbytes = min_t(int, tbytes, ctrl->max_oob);
+
+ for (j = 0; j < tbytes; j++)
+ oob[j] = oob_reg_read(ctrl, j);
+ return tbytes;
+}
+
+/*
+ * write_oob_to_regs - write data to OOB registers
+ * @i: sub-page sector index
+ * @oob: buffer to write from
+ * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
+ * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
+ */
+static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
+ const u8 *oob, int sas, int sector_1k)
+{
+ int tbytes = sas << sector_1k;
+ int j, k = 0;
+ u32 last = 0xffffffff;
+ u8 *plast = (u8 *)&last;
+
+ /* Adjust OOB values for 1K sector size */
+ if (sector_1k && (i & 0x01))
+ tbytes = max(0, tbytes - (int)ctrl->max_oob);
+ tbytes = min_t(int, tbytes, ctrl->max_oob);
+
+ /*
+ * tbytes may not be multiple of words. Make sure we don't read out of
+ * the boundary and stop at last word.
+ */
+ for (j = 0; (j + 3) < tbytes; j += 4)
+ oob_reg_write(ctrl, j,
+ (oob[j + 0] << 24) |
+ (oob[j + 1] << 16) |
+ (oob[j + 2] << 8) |
+ (oob[j + 3] << 0));
+
+ /* handle the remaing bytes */
+ while (j < tbytes)
+ plast[k++] = oob[j++];
+
+ if (tbytes & 0x3)
+ oob_reg_write(ctrl, (tbytes & ~0x3), (__force u32)cpu_to_be32(last));
+
+ return tbytes;
+}
+
+static void brcmnand_edu_init(struct brcmnand_controller *ctrl)
+{
+ /* initialize edu */
+ edu_writel(ctrl, EDU_ERR_STATUS, 0);
+ edu_readl(ctrl, EDU_ERR_STATUS);
+ edu_writel(ctrl, EDU_DONE, 0);
+ edu_writel(ctrl, EDU_DONE, 0);
+ edu_writel(ctrl, EDU_DONE, 0);
+ edu_writel(ctrl, EDU_DONE, 0);
+ edu_readl(ctrl, EDU_DONE);
+}
+
+/* edu irq */
+static irqreturn_t brcmnand_edu_irq(int irq, void *data)
+{
+ struct brcmnand_controller *ctrl = data;
+
+ if (ctrl->edu_count) {
+ ctrl->edu_count--;
+ while (!(edu_readl(ctrl, EDU_DONE) & EDU_DONE_MASK))
+ udelay(1);
+ edu_writel(ctrl, EDU_DONE, 0);
+ edu_readl(ctrl, EDU_DONE);
+ }
+
+ if (ctrl->edu_count) {
+ ctrl->edu_dram_addr += FC_BYTES;
+ ctrl->edu_ext_addr += FC_BYTES;
+
+ edu_writel(ctrl, EDU_DRAM_ADDR, (u32)ctrl->edu_dram_addr);
+ edu_readl(ctrl, EDU_DRAM_ADDR);
+ edu_writel(ctrl, EDU_EXT_ADDR, ctrl->edu_ext_addr);
+ edu_readl(ctrl, EDU_EXT_ADDR);
+
+ if (ctrl->oob) {
+ if (ctrl->edu_cmd == EDU_CMD_READ) {
+ ctrl->oob += read_oob_from_regs(ctrl,
+ ctrl->edu_count + 1,
+ ctrl->oob, ctrl->sas,
+ ctrl->sector_size_1k);
+ } else {
+ brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
+ ctrl->edu_ext_addr);
+ brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
+ ctrl->oob += write_oob_to_regs(ctrl,
+ ctrl->edu_count,
+ ctrl->oob, ctrl->sas,
+ ctrl->sector_size_1k);
+ }
+ }
+
+ mb(); /* flush previous writes */
+ edu_writel(ctrl, EDU_CMD, ctrl->edu_cmd);
+ edu_readl(ctrl, EDU_CMD);
+
+ return IRQ_HANDLED;
+ }
+
+ complete(&ctrl->edu_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t brcmnand_ctlrdy_irq(int irq, void *data)
+{
+ struct brcmnand_controller *ctrl = data;
+
+ /* Discard all NAND_CTLRDY interrupts during DMA */
+ if (ctrl->dma_pending)
+ return IRQ_HANDLED;
+
+ /* check if you need to piggy back on the ctrlrdy irq */
+ if (ctrl->edu_pending) {
+ if (irq == ctrl->irq && ((int)ctrl->edu_irq >= 0))
+ /* Discard interrupts while using dedicated edu irq */
+ return IRQ_HANDLED;
+
+ /* no registered edu irq, call handler */
+ return brcmnand_edu_irq(irq, data);
+ }
+
+ complete(&ctrl->done);
+ return IRQ_HANDLED;
+}
+
+/* Handle SoC-specific interrupt hardware */
+static irqreturn_t brcmnand_irq(int irq, void *data)
+{
+ struct brcmnand_controller *ctrl = data;
+
+ if (ctrl->soc->ctlrdy_ack(ctrl->soc))
+ return brcmnand_ctlrdy_irq(irq, data);
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t brcmnand_dma_irq(int irq, void *data)
+{
+ struct brcmnand_controller *ctrl = data;
+
+ complete(&ctrl->dma_done);
+
+ return IRQ_HANDLED;
+}
+
+static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ int ret;
+ u64 cmd_addr;
+
+ cmd_addr = brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
+
+ dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr);
+
+ /*
+ * If we came here through _panic_write and there is a pending
+ * command, try to wait for it. If it times out, rather than
+ * hitting BUG_ON, just return so we don't crash while crashing.
+ */
+ if (oops_in_progress) {
+ if (ctrl->cmd_pending &&
+ bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0))
+ return;
+ } else
+ BUG_ON(ctrl->cmd_pending != 0);
+ ctrl->cmd_pending = cmd;
+
+ ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
+ WARN_ON(ret);
+
+ mb(); /* flush previous writes */
+ brcmnand_write_reg(ctrl, BRCMNAND_CMD_START,
+ cmd << brcmnand_cmd_shift(ctrl));
+}
+
+/***********************************************************************
+ * NAND MTD API: read/program/erase
+ ***********************************************************************/
+
+static void brcmnand_cmd_ctrl(struct nand_chip *chip, int dat,
+ unsigned int ctrl)
+{
+ /* intentionally left blank */
+}
+
+static bool brcmstb_nand_wait_for_completion(struct nand_chip *chip)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ bool err = false;
+ int sts;
+
+ if (mtd->oops_panic_write || ctrl->irq < 0) {
+ /* switch to interrupt polling and PIO mode */
+ disable_ctrl_irqs(ctrl);
+ sts = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY,
+ NAND_CTRL_RDY, 0);
+ err = sts < 0;
+ } else {
+ unsigned long timeo = msecs_to_jiffies(
+ NAND_POLL_STATUS_TIMEOUT_MS);
+ /* wait for completion interrupt */
+ sts = wait_for_completion_timeout(&ctrl->done, timeo);
+ err = !sts;
+ }
+
+ return err;
+}
+
+static int brcmnand_waitfunc(struct nand_chip *chip)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+ bool err = false;
+
+ dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending);
+ if (ctrl->cmd_pending)
+ err = brcmstb_nand_wait_for_completion(chip);
+
+ ctrl->cmd_pending = 0;
+ if (err) {
+ u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START)
+ >> brcmnand_cmd_shift(ctrl);
+
+ dev_err_ratelimited(ctrl->dev,
+ "timeout waiting for command %#02x\n", cmd);
+ dev_err_ratelimited(ctrl->dev, "intfc status %08x\n",
+ brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS));
+ return -ETIMEDOUT;
+ }
+ return brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
+ INTFC_FLASH_STATUS;
+}
+
+enum {
+ LLOP_RE = BIT(16),
+ LLOP_WE = BIT(17),
+ LLOP_ALE = BIT(18),
+ LLOP_CLE = BIT(19),
+ LLOP_RETURN_IDLE = BIT(31),
+
+ LLOP_DATA_MASK = GENMASK(15, 0),
+};
+
+static int brcmnand_low_level_op(struct brcmnand_host *host,
+ enum brcmnand_llop_type type, u32 data,
+ bool last_op)
+{
+ struct nand_chip *chip = &host->chip;
+ struct brcmnand_controller *ctrl = host->ctrl;
+ u32 tmp;
+
+ tmp = data & LLOP_DATA_MASK;
+ switch (type) {
+ case LL_OP_CMD:
+ tmp |= LLOP_WE | LLOP_CLE;
+ break;
+ case LL_OP_ADDR:
+ /* WE | ALE */
+ tmp |= LLOP_WE | LLOP_ALE;
+ break;
+ case LL_OP_WR:
+ /* WE */
+ tmp |= LLOP_WE;
+ break;
+ case LL_OP_RD:
+ /* RE */
+ tmp |= LLOP_RE;
+ break;
+ }
+ if (last_op)
+ /* RETURN_IDLE */
+ tmp |= LLOP_RETURN_IDLE;
+
+ dev_dbg(ctrl->dev, "ll_op cmd %#x\n", tmp);
+
+ brcmnand_write_reg(ctrl, BRCMNAND_LL_OP, tmp);
+ (void)brcmnand_read_reg(ctrl, BRCMNAND_LL_OP);
+
+ brcmnand_send_cmd(host, CMD_LOW_LEVEL_OP);
+ return brcmnand_waitfunc(chip);
+}
+
+static void brcmnand_cmdfunc(struct nand_chip *chip, unsigned command,
+ int column, int page_addr)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+ u64 addr = (u64)page_addr << chip->page_shift;
+ int native_cmd = 0;
+
+ if (command == NAND_CMD_READID || command == NAND_CMD_PARAM ||
+ command == NAND_CMD_RNDOUT)
+ addr = (u64)column;
+ /* Avoid propagating a negative, don't-care address */
+ else if (page_addr < 0)
+ addr = 0;
+
+ dev_dbg(ctrl->dev, "cmd 0x%x addr 0x%llx\n", command,
+ (unsigned long long)addr);
+
+ host->last_cmd = command;
+ host->last_byte = 0;
+ host->last_addr = addr;
+
+ switch (command) {
+ case NAND_CMD_RESET:
+ native_cmd = CMD_FLASH_RESET;
+ break;
+ case NAND_CMD_STATUS:
+ native_cmd = CMD_STATUS_READ;
+ break;
+ case NAND_CMD_READID:
+ native_cmd = CMD_DEVICE_ID_READ;
+ break;
+ case NAND_CMD_READOOB:
+ native_cmd = CMD_SPARE_AREA_READ;
+ break;
+ case NAND_CMD_ERASE1:
+ native_cmd = CMD_BLOCK_ERASE;
+ brcmnand_wp(mtd, 0);
+ break;
+ case NAND_CMD_PARAM:
+ native_cmd = CMD_PARAMETER_READ;
+ break;
+ case NAND_CMD_SET_FEATURES:
+ case NAND_CMD_GET_FEATURES:
+ brcmnand_low_level_op(host, LL_OP_CMD, command, false);
+ brcmnand_low_level_op(host, LL_OP_ADDR, column, false);
+ break;
+ case NAND_CMD_RNDOUT:
+ native_cmd = CMD_PARAMETER_CHANGE_COL;
+ addr &= ~((u64)(FC_BYTES - 1));
+ /*
+ * HW quirk: PARAMETER_CHANGE_COL requires SECTOR_SIZE_1K=0
+ * NB: hwcfg.sector_size_1k may not be initialized yet
+ */
+ if (brcmnand_get_sector_size_1k(host)) {
+ host->hwcfg.sector_size_1k =
+ brcmnand_get_sector_size_1k(host);
+ brcmnand_set_sector_size_1k(host, 0);
+ }
+ break;
+ }
+
+ if (!native_cmd)
+ return;
+
+ brcmnand_set_cmd_addr(mtd, addr);
+ brcmnand_send_cmd(host, native_cmd);
+ brcmnand_waitfunc(chip);
+
+ if (native_cmd == CMD_PARAMETER_READ ||
+ native_cmd == CMD_PARAMETER_CHANGE_COL) {
+ /* Copy flash cache word-wise */
+ u32 *flash_cache = (u32 *)ctrl->flash_cache;
+ int i;
+
+ brcmnand_soc_data_bus_prepare(ctrl->soc, true);
+
+ /*
+ * Must cache the FLASH_CACHE now, since changes in
+ * SECTOR_SIZE_1K may invalidate it
+ */
+ for (i = 0; i < FC_WORDS; i++)
+ /*
+ * Flash cache is big endian for parameter pages, at
+ * least on STB SoCs
+ */
+ flash_cache[i] = be32_to_cpu(brcmnand_read_fc(ctrl, i));
+
+ brcmnand_soc_data_bus_unprepare(ctrl->soc, true);
+
+ /* Cleanup from HW quirk: restore SECTOR_SIZE_1K */
+ if (host->hwcfg.sector_size_1k)
+ brcmnand_set_sector_size_1k(host,
+ host->hwcfg.sector_size_1k);
+ }
+
+ /* Re-enable protection is necessary only after erase */
+ if (command == NAND_CMD_ERASE1)
+ brcmnand_wp(mtd, 1);
+}
+
+static uint8_t brcmnand_read_byte(struct nand_chip *chip)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+ uint8_t ret = 0;
+ int addr, offs;
+
+ switch (host->last_cmd) {
+ case NAND_CMD_READID:
+ if (host->last_byte < 4)
+ ret = brcmnand_read_reg(ctrl, BRCMNAND_ID) >>
+ (24 - (host->last_byte << 3));
+ else if (host->last_byte < 8)
+ ret = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT) >>
+ (56 - (host->last_byte << 3));
+ break;
+
+ case NAND_CMD_READOOB:
+ ret = oob_reg_read(ctrl, host->last_byte);
+ break;
+
+ case NAND_CMD_STATUS:
+ ret = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
+ INTFC_FLASH_STATUS;
+ if (wp_on) /* hide WP status */
+ ret |= NAND_STATUS_WP;
+ break;
+
+ case NAND_CMD_PARAM:
+ case NAND_CMD_RNDOUT:
+ addr = host->last_addr + host->last_byte;
+ offs = addr & (FC_BYTES - 1);
+
+ /* At FC_BYTES boundary, switch to next column */
+ if (host->last_byte > 0 && offs == 0)
+ nand_change_read_column_op(chip, addr, NULL, 0, false);
+
+ ret = ctrl->flash_cache[offs];
+ break;
+ case NAND_CMD_GET_FEATURES:
+ if (host->last_byte >= ONFI_SUBFEATURE_PARAM_LEN) {
+ ret = 0;
+ } else {
+ bool last = host->last_byte ==
+ ONFI_SUBFEATURE_PARAM_LEN - 1;
+ brcmnand_low_level_op(host, LL_OP_RD, 0, last);
+ ret = brcmnand_read_reg(ctrl, BRCMNAND_LL_RDATA) & 0xff;
+ }
+ }
+
+ dev_dbg(ctrl->dev, "read byte = 0x%02x\n", ret);
+ host->last_byte++;
+
+ return ret;
+}
+
+static void brcmnand_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++, buf++)
+ *buf = brcmnand_read_byte(chip);
+}
+
+static void brcmnand_write_buf(struct nand_chip *chip, const uint8_t *buf,
+ int len)
+{
+ int i;
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+
+ switch (host->last_cmd) {
+ case NAND_CMD_SET_FEATURES:
+ for (i = 0; i < len; i++)
+ brcmnand_low_level_op(host, LL_OP_WR, buf[i],
+ (i + 1) == len);
+ break;
+ default:
+ BUG();
+ break;
+ }
+}
+
+/*
+ * Kick EDU engine
+ */
+static int brcmnand_edu_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
+ u8 *oob, u32 len, u8 cmd)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ struct brcmnand_cfg *cfg = &host->hwcfg;
+ unsigned long timeo = msecs_to_jiffies(200);
+ int ret = 0;
+ int dir = (cmd == CMD_PAGE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ u8 edu_cmd = (cmd == CMD_PAGE_READ ? EDU_CMD_READ : EDU_CMD_WRITE);
+ unsigned int trans = len >> FC_SHIFT;
+ dma_addr_t pa;
+
+ dev_dbg(ctrl->dev, "EDU %s %p:%p\n", ((edu_cmd == EDU_CMD_READ) ?
+ "read" : "write"), buf, oob);
+
+ pa = dma_map_single(ctrl->dev, buf, len, dir);
+ if (dma_mapping_error(ctrl->dev, pa)) {
+ dev_err(ctrl->dev, "unable to map buffer for EDU DMA\n");
+ return -ENOMEM;
+ }
+
+ ctrl->edu_pending = true;
+ ctrl->edu_dram_addr = pa;
+ ctrl->edu_ext_addr = addr;
+ ctrl->edu_cmd = edu_cmd;
+ ctrl->edu_count = trans;
+ ctrl->sas = cfg->spare_area_size;
+ ctrl->oob = oob;
+
+ edu_writel(ctrl, EDU_DRAM_ADDR, (u32)ctrl->edu_dram_addr);
+ edu_readl(ctrl, EDU_DRAM_ADDR);
+ edu_writel(ctrl, EDU_EXT_ADDR, ctrl->edu_ext_addr);
+ edu_readl(ctrl, EDU_EXT_ADDR);
+ edu_writel(ctrl, EDU_LENGTH, FC_BYTES);
+ edu_readl(ctrl, EDU_LENGTH);
+
+ if (ctrl->oob && (ctrl->edu_cmd == EDU_CMD_WRITE)) {
+ brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
+ ctrl->edu_ext_addr);
+ brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
+ ctrl->oob += write_oob_to_regs(ctrl,
+ 1,
+ ctrl->oob, ctrl->sas,
+ ctrl->sector_size_1k);
+ }
+
+ /* Start edu engine */
+ mb(); /* flush previous writes */
+ edu_writel(ctrl, EDU_CMD, ctrl->edu_cmd);
+ edu_readl(ctrl, EDU_CMD);
+
+ if (wait_for_completion_timeout(&ctrl->edu_done, timeo) <= 0) {
+ dev_err(ctrl->dev,
+ "timeout waiting for EDU; status %#x, error status %#x\n",
+ edu_readl(ctrl, EDU_STATUS),
+ edu_readl(ctrl, EDU_ERR_STATUS));
+ }
+
+ dma_unmap_single(ctrl->dev, pa, len, dir);
+
+ /* read last subpage oob */
+ if (ctrl->oob && (ctrl->edu_cmd == EDU_CMD_READ)) {
+ ctrl->oob += read_oob_from_regs(ctrl,
+ 1,
+ ctrl->oob, ctrl->sas,
+ ctrl->sector_size_1k);
+ }
+
+ /* for program page check NAND status */
+ if (((brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
+ INTFC_FLASH_STATUS) & NAND_STATUS_FAIL) &&
+ edu_cmd == EDU_CMD_WRITE) {
+ dev_info(ctrl->dev, "program failed at %llx\n",
+ (unsigned long long)addr);
+ ret = -EIO;
+ }
+
+ /* Make sure the EDU status is clean */
+ if (edu_readl(ctrl, EDU_STATUS) & EDU_STATUS_ACTIVE)
+ dev_warn(ctrl->dev, "EDU still active: %#x\n",
+ edu_readl(ctrl, EDU_STATUS));
+
+ if (unlikely(edu_readl(ctrl, EDU_ERR_STATUS) & EDU_ERR_STATUS_ERRACK)) {
+ dev_warn(ctrl->dev, "EDU RBUS error at addr %llx\n",
+ (unsigned long long)addr);
+ ret = -EIO;
+ }
+
+ ctrl->edu_pending = false;
+ brcmnand_edu_init(ctrl);
+ edu_writel(ctrl, EDU_STOP, 0); /* force stop */
+ edu_readl(ctrl, EDU_STOP);
+
+ if (!ret && edu_cmd == EDU_CMD_READ) {
+ u64 err_addr = 0;
+
+ /*
+ * check for ECC errors here, subpage ECC errors are
+ * retained in ECC error address register
+ */
+ err_addr = brcmnand_get_uncorrecc_addr(ctrl);
+ if (!err_addr) {
+ err_addr = brcmnand_get_correcc_addr(ctrl);
+ if (err_addr)
+ ret = -EUCLEAN;
+ } else
+ ret = -EBADMSG;
+ }
+
+ return ret;
+}
+
+/*
+ * Construct a FLASH_DMA descriptor as part of a linked list. You must know the
+ * following ahead of time:
+ * - Is this descriptor the beginning or end of a linked list?
+ * - What is the (DMA) address of the next descriptor in the linked list?
+ */
+static int brcmnand_fill_dma_desc(struct brcmnand_host *host,
+ struct brcm_nand_dma_desc *desc, u64 addr,
+ dma_addr_t buf, u32 len, u8 dma_cmd,
+ bool begin, bool end,
+ dma_addr_t next_desc)
+{
+ memset(desc, 0, sizeof(*desc));
+ /* Descriptors are written in native byte order (wordwise) */
+ desc->next_desc = lower_32_bits(next_desc);
+ desc->next_desc_ext = upper_32_bits(next_desc);
+ desc->cmd_irq = (dma_cmd << 24) |
+ (end ? (0x03 << 8) : 0) | /* IRQ | STOP */
+ (!!begin) | ((!!end) << 1); /* head, tail */
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ desc->cmd_irq |= 0x01 << 12;
+#endif
+ desc->dram_addr = lower_32_bits(buf);
+ desc->dram_addr_ext = upper_32_bits(buf);
+ desc->tfr_len = len;
+ desc->total_len = len;
+ desc->flash_addr = lower_32_bits(addr);
+ desc->flash_addr_ext = upper_32_bits(addr);
+ desc->cs = host->cs;
+ desc->status_valid = 0x01;
+ return 0;
+}
+
+/*
+ * Kick the FLASH_DMA engine, with a given DMA descriptor
+ */
+static void brcmnand_dma_run(struct brcmnand_host *host, dma_addr_t desc)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ unsigned long timeo = msecs_to_jiffies(100);
+
+ flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC, lower_32_bits(desc));
+ (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC);
+ if (ctrl->nand_version > 0x0602) {
+ flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT,
+ upper_32_bits(desc));
+ (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT);
+ }
+
+ /* Start FLASH_DMA engine */
+ ctrl->dma_pending = true;
+ mb(); /* flush previous writes */
+ flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0x03); /* wake | run */
+
+ if (wait_for_completion_timeout(&ctrl->dma_done, timeo) <= 0) {
+ dev_err(ctrl->dev,
+ "timeout waiting for DMA; status %#x, error status %#x\n",
+ flash_dma_readl(ctrl, FLASH_DMA_STATUS),
+ flash_dma_readl(ctrl, FLASH_DMA_ERROR_STATUS));
+ }
+ ctrl->dma_pending = false;
+ flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0); /* force stop */
+}
+
+static int brcmnand_dma_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
+ u8 *oob, u32 len, u8 dma_cmd)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ dma_addr_t buf_pa;
+ int dir = dma_cmd == CMD_PAGE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ buf_pa = dma_map_single(ctrl->dev, buf, len, dir);
+ if (dma_mapping_error(ctrl->dev, buf_pa)) {
+ dev_err(ctrl->dev, "unable to map buffer for DMA\n");
+ return -ENOMEM;
+ }
+
+ brcmnand_fill_dma_desc(host, ctrl->dma_desc, addr, buf_pa, len,
+ dma_cmd, true, true, 0);
+
+ brcmnand_dma_run(host, ctrl->dma_pa);
+
+ dma_unmap_single(ctrl->dev, buf_pa, len, dir);
+
+ if (ctrl->dma_desc->status_valid & FLASH_DMA_ECC_ERROR)
+ return -EBADMSG;
+ else if (ctrl->dma_desc->status_valid & FLASH_DMA_CORR_ERROR)
+ return -EUCLEAN;
+
+ return 0;
+}
+
+/*
+ * Assumes proper CS is already set
+ */
+static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
+ u64 addr, unsigned int trans, u32 *buf,
+ u8 *oob, u64 *err_addr)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+ int i, j, ret = 0;
+
+ brcmnand_clear_ecc_addr(ctrl);
+
+ for (i = 0; i < trans; i++, addr += FC_BYTES) {
+ brcmnand_set_cmd_addr(mtd, addr);
+ /* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */
+ brcmnand_send_cmd(host, CMD_PAGE_READ);
+ brcmnand_waitfunc(chip);
+
+ if (likely(buf)) {
+ brcmnand_soc_data_bus_prepare(ctrl->soc, false);
+
+ for (j = 0; j < FC_WORDS; j++, buf++)
+ *buf = brcmnand_read_fc(ctrl, j);
+
+ brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
+ }
+
+ if (oob)
+ oob += read_oob_from_regs(ctrl, i, oob,
+ mtd->oobsize / trans,
+ host->hwcfg.sector_size_1k);
+
+ if (ret != -EBADMSG) {
+ *err_addr = brcmnand_get_uncorrecc_addr(ctrl);
+
+ if (*err_addr)
+ ret = -EBADMSG;
+ }
+
+ if (!ret) {
+ *err_addr = brcmnand_get_correcc_addr(ctrl);
+
+ if (*err_addr)
+ ret = -EUCLEAN;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Check a page to see if it is erased (w/ bitflips) after an uncorrectable ECC
+ * error
+ *
+ * Because the HW ECC signals an ECC error if an erase paged has even a single
+ * bitflip, we must check each ECC error to see if it is actually an erased
+ * page with bitflips, not a truly corrupted page.
+ *
+ * On a real error, return a negative error code (-EBADMSG for ECC error), and
+ * buf will contain raw data.
+ * Otherwise, buf gets filled with 0xffs and return the maximum number of
+ * bitflips-per-ECC-sector to the caller.
+ *
+ */
+static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
+ struct nand_chip *chip, void *buf, u64 addr)
+{
+ struct mtd_oob_region ecc;
+ int i;
+ int bitflips = 0;
+ int page = addr >> chip->page_shift;
+ int ret;
+ void *ecc_bytes;
+ void *ecc_chunk;
+
+ if (!buf)
+ buf = nand_get_data_buf(chip);
+
+ /* read without ecc for verification */
+ ret = chip->ecc.read_page_raw(chip, buf, true, page);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ ecc_chunk = buf + chip->ecc.size * i;
+
+ mtd_ooblayout_ecc(mtd, i, &ecc);
+ ecc_bytes = chip->oob_poi + ecc.offset;
+
+ ret = nand_check_erased_ecc_chunk(ecc_chunk, chip->ecc.size,
+ ecc_bytes, ecc.length,
+ NULL, 0,
+ chip->ecc.strength);
+ if (ret < 0)
+ return ret;
+
+ bitflips = max(bitflips, ret);
+ }
+
+ return bitflips;
+}
+
+static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
+ u64 addr, unsigned int trans, u32 *buf, u8 *oob)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+ u64 err_addr = 0;
+ int err;
+ bool retry = true;
+ bool edu_err = false;
+
+ dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
+
+try_dmaread:
+ brcmnand_clear_ecc_addr(ctrl);
+
+ if (ctrl->dma_trans && (has_edu(ctrl) || !oob) &&
+ flash_dma_buf_ok(buf)) {
+ err = ctrl->dma_trans(host, addr, buf, oob,
+ trans * FC_BYTES,
+ CMD_PAGE_READ);
+
+ if (err) {
+ if (mtd_is_bitflip_or_eccerr(err))
+ err_addr = addr;
+ else
+ return -EIO;
+ }
+
+ if (has_edu(ctrl) && err_addr)
+ edu_err = true;
+
+ } else {
+ if (oob)
+ memset(oob, 0x99, mtd->oobsize);
+
+ err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf,
+ oob, &err_addr);
+ }
+
+ if (mtd_is_eccerr(err)) {
+ /*
+ * On controller version and 7.0, 7.1 , DMA read after a
+ * prior PIO read that reported uncorrectable error,
+ * the DMA engine captures this error following DMA read
+ * cleared only on subsequent DMA read, so just retry once
+ * to clear a possible false error reported for current DMA
+ * read
+ */
+ if ((ctrl->nand_version == 0x0700) ||
+ (ctrl->nand_version == 0x0701)) {
+ if (retry) {
+ retry = false;
+ goto try_dmaread;
+ }
+ }
+
+ /*
+ * Controller version 7.2 has hw encoder to detect erased page
+ * bitflips, apply sw verification for older controllers only
+ */
+ if (ctrl->nand_version < 0x0702) {
+ err = brcmstb_nand_verify_erased_page(mtd, chip, buf,
+ addr);
+ /* erased page bitflips corrected */
+ if (err >= 0)
+ return err;
+ }
+
+ dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n",
+ (unsigned long long)err_addr);
+ mtd->ecc_stats.failed++;
+ /* NAND layer expects zero on ECC errors */
+ return 0;
+ }
+
+ if (mtd_is_bitflip(err)) {
+ unsigned int corrected = brcmnand_count_corrected(ctrl);
+
+ /* in case of EDU correctable error we read again using PIO */
+ if (edu_err)
+ err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf,
+ oob, &err_addr);
+
+ dev_dbg(ctrl->dev, "corrected error at 0x%llx\n",
+ (unsigned long long)err_addr);
+ mtd->ecc_stats.corrected += corrected;
+ /* Always exceed the software-imposed threshold */
+ return max(mtd->bitflip_threshold, corrected);
+ }
+
+ return 0;
+}
+
+static int brcmnand_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ return brcmnand_read(mtd, chip, host->last_addr,
+ mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
+}
+
+static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
+ int ret;
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ brcmnand_set_ecc_enabled(host, 0);
+ ret = brcmnand_read(mtd, chip, host->last_addr,
+ mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
+ brcmnand_set_ecc_enabled(host, 1);
+ return ret;
+}
+
+static int brcmnand_read_oob(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ return brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
+ mtd->writesize >> FC_SHIFT,
+ NULL, (u8 *)chip->oob_poi);
+}
+
+static int brcmnand_read_oob_raw(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+
+ brcmnand_set_ecc_enabled(host, 0);
+ brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
+ mtd->writesize >> FC_SHIFT,
+ NULL, (u8 *)chip->oob_poi);
+ brcmnand_set_ecc_enabled(host, 1);
+ return 0;
+}
+
+static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
+ u64 addr, const u32 *buf, u8 *oob)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+ unsigned int i, j, trans = mtd->writesize >> FC_SHIFT;
+ int status, ret = 0;
+
+ dev_dbg(ctrl->dev, "write %llx <- %p\n", (unsigned long long)addr, buf);
+
+ if (unlikely((unsigned long)buf & 0x03)) {
+ dev_warn(ctrl->dev, "unaligned buffer: %p\n", buf);
+ buf = (u32 *)((unsigned long)buf & ~0x03);
+ }
+
+ brcmnand_wp(mtd, 0);
+
+ for (i = 0; i < ctrl->max_oob; i += 4)
+ oob_reg_write(ctrl, i, 0xffffffff);
+
+ if (mtd->oops_panic_write)
+ /* switch to interrupt polling and PIO mode */
+ disable_ctrl_irqs(ctrl);
+
+ if (use_dma(ctrl) && (has_edu(ctrl) || !oob) && flash_dma_buf_ok(buf)) {
+ if (ctrl->dma_trans(host, addr, (u32 *)buf, oob, mtd->writesize,
+ CMD_PROGRAM_PAGE))
+
+ ret = -EIO;
+
+ goto out;
+ }
+
+ for (i = 0; i < trans; i++, addr += FC_BYTES) {
+ /* full address MUST be set before populating FC */
+ brcmnand_set_cmd_addr(mtd, addr);
+
+ if (buf) {
+ brcmnand_soc_data_bus_prepare(ctrl->soc, false);
+
+ for (j = 0; j < FC_WORDS; j++, buf++)
+ brcmnand_write_fc(ctrl, j, *buf);
+
+ brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
+ } else if (oob) {
+ for (j = 0; j < FC_WORDS; j++)
+ brcmnand_write_fc(ctrl, j, 0xffffffff);
+ }
+
+ if (oob) {
+ oob += write_oob_to_regs(ctrl, i, oob,
+ mtd->oobsize / trans,
+ host->hwcfg.sector_size_1k);
+ }
+
+ /* we cannot use SPARE_AREA_PROGRAM when PARTIAL_PAGE_EN=0 */
+ brcmnand_send_cmd(host, CMD_PROGRAM_PAGE);
+ status = brcmnand_waitfunc(chip);
+
+ if (status & NAND_STATUS_FAIL) {
+ dev_info(ctrl->dev, "program failed at %llx\n",
+ (unsigned long long)addr);
+ ret = -EIO;
+ goto out;
+ }
+ }
+out:
+ brcmnand_wp(mtd, 1);
+ return ret;
+}
+
+static int brcmnand_write_page(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ void *oob = oob_required ? chip->oob_poi : NULL;
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ void *oob = oob_required ? chip->oob_poi : NULL;
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ brcmnand_set_ecc_enabled(host, 0);
+ brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
+ brcmnand_set_ecc_enabled(host, 1);
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int brcmnand_write_oob(struct nand_chip *chip, int page)
+{
+ return brcmnand_write(nand_to_mtd(chip), chip,
+ (u64)page << chip->page_shift, NULL,
+ chip->oob_poi);
+}
+
+static int brcmnand_write_oob_raw(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ int ret;
+
+ brcmnand_set_ecc_enabled(host, 0);
+ ret = brcmnand_write(mtd, chip, (u64)page << chip->page_shift, NULL,
+ (u8 *)chip->oob_poi);
+ brcmnand_set_ecc_enabled(host, 1);
+
+ return ret;
+}
+
+/***********************************************************************
+ * Per-CS setup (1 NAND device)
+ ***********************************************************************/
+
+static int brcmnand_set_cfg(struct brcmnand_host *host,
+ struct brcmnand_cfg *cfg)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ struct nand_chip *chip = &host->chip;
+ u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
+ u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_CFG_EXT);
+ u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_ACC_CONTROL);
+ u8 block_size = 0, page_size = 0, device_size = 0;
+ u32 tmp;
+
+ if (ctrl->block_sizes) {
+ int i, found;
+
+ for (i = 0, found = 0; ctrl->block_sizes[i]; i++)
+ if (ctrl->block_sizes[i] * 1024 == cfg->block_size) {
+ block_size = i;
+ found = 1;
+ }
+ if (!found) {
+ dev_warn(ctrl->dev, "invalid block size %u\n",
+ cfg->block_size);
+ return -EINVAL;
+ }
+ } else {
+ block_size = ffs(cfg->block_size) - ffs(BRCMNAND_MIN_BLOCKSIZE);
+ }
+
+ if (cfg->block_size < BRCMNAND_MIN_BLOCKSIZE || (ctrl->max_block_size &&
+ cfg->block_size > ctrl->max_block_size)) {
+ dev_warn(ctrl->dev, "invalid block size %u\n",
+ cfg->block_size);
+ block_size = 0;
+ }
+
+ if (ctrl->page_sizes) {
+ int i, found;
+
+ for (i = 0, found = 0; ctrl->page_sizes[i]; i++)
+ if (ctrl->page_sizes[i] == cfg->page_size) {
+ page_size = i;
+ found = 1;
+ }
+ if (!found) {
+ dev_warn(ctrl->dev, "invalid page size %u\n",
+ cfg->page_size);
+ return -EINVAL;
+ }
+ } else {
+ page_size = ffs(cfg->page_size) - ffs(BRCMNAND_MIN_PAGESIZE);
+ }
+
+ if (cfg->page_size < BRCMNAND_MIN_PAGESIZE || (ctrl->max_page_size &&
+ cfg->page_size > ctrl->max_page_size)) {
+ dev_warn(ctrl->dev, "invalid page size %u\n", cfg->page_size);
+ return -EINVAL;
+ }
+
+ if (fls64(cfg->device_size) < fls64(BRCMNAND_MIN_DEVSIZE)) {
+ dev_warn(ctrl->dev, "invalid device size 0x%llx\n",
+ (unsigned long long)cfg->device_size);
+ return -EINVAL;
+ }
+ device_size = fls64(cfg->device_size) - fls64(BRCMNAND_MIN_DEVSIZE);
+
+ tmp = (cfg->blk_adr_bytes << CFG_BLK_ADR_BYTES_SHIFT) |
+ (cfg->col_adr_bytes << CFG_COL_ADR_BYTES_SHIFT) |
+ (cfg->ful_adr_bytes << CFG_FUL_ADR_BYTES_SHIFT) |
+ (!!(cfg->device_width == 16) << CFG_BUS_WIDTH_SHIFT) |
+ (device_size << CFG_DEVICE_SIZE_SHIFT);
+ if (cfg_offs == cfg_ext_offs) {
+ tmp |= (page_size << ctrl->page_size_shift) |
+ (block_size << CFG_BLK_SIZE_SHIFT);
+ nand_writereg(ctrl, cfg_offs, tmp);
+ } else {
+ nand_writereg(ctrl, cfg_offs, tmp);
+ tmp = (page_size << CFG_EXT_PAGE_SIZE_SHIFT) |
+ (block_size << CFG_EXT_BLK_SIZE_SHIFT);
+ nand_writereg(ctrl, cfg_ext_offs, tmp);
+ }
+
+ tmp = nand_readreg(ctrl, acc_control_offs);
+ tmp &= ~brcmnand_ecc_level_mask(ctrl);
+ tmp &= ~brcmnand_spare_area_mask(ctrl);
+ if (ctrl->nand_version >= 0x0302) {
+ tmp |= cfg->ecc_level << ctrl->ecc_level_shift;
+ tmp |= cfg->spare_area_size;
+ }
+ nand_writereg(ctrl, acc_control_offs, tmp);
+
+ brcmnand_set_sector_size_1k(host, cfg->sector_size_1k);
+
+ /* threshold = ceil(BCH-level * 0.75) */
+ brcmnand_wr_corr_thresh(host, DIV_ROUND_UP(chip->ecc.strength * 3, 4));
+
+ return 0;
+}
+
+static void brcmnand_print_cfg(struct brcmnand_host *host,
+ char *buf, struct brcmnand_cfg *cfg)
+{
+ buf += sprintf(buf,
+ "%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit",
+ (unsigned long long)cfg->device_size >> 20,
+ cfg->block_size >> 10,
+ cfg->page_size >= 1024 ? cfg->page_size >> 10 : cfg->page_size,
+ cfg->page_size >= 1024 ? "KiB" : "B",
+ cfg->spare_area_size, cfg->device_width);
+
+ /* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */
+ if (is_hamming_ecc(host->ctrl, cfg))
+ sprintf(buf, ", Hamming ECC");
+ else if (cfg->sector_size_1k)
+ sprintf(buf, ", BCH-%u (1KiB sector)", cfg->ecc_level << 1);
+ else
+ sprintf(buf, ", BCH-%u", cfg->ecc_level);
+}
+
+/*
+ * Minimum number of bytes to address a page. Calculated as:
+ * roundup(log2(size / page-size) / 8)
+ *
+ * NB: the following does not "round up" for non-power-of-2 'size'; but this is
+ * OK because many other things will break if 'size' is irregular...
+ */
+static inline int get_blk_adr_bytes(u64 size, u32 writesize)
+{
+ return ALIGN(ilog2(size) - ilog2(writesize), 8) >> 3;
+}
+
+static int brcmnand_setup_dev(struct brcmnand_host *host)
+{
+ struct mtd_info *mtd = nand_to_mtd(&host->chip);
+ struct nand_chip *chip = &host->chip;
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&chip->base);
+ struct nand_memory_organization *memorg =
+ nanddev_get_memorg(&chip->base);
+ struct brcmnand_controller *ctrl = host->ctrl;
+ struct brcmnand_cfg *cfg = &host->hwcfg;
+ char msg[128];
+ u32 offs, tmp, oob_sector;
+ int ret;
+
+ memset(cfg, 0, sizeof(*cfg));
+
+ ret = of_property_read_u32(nand_get_flash_node(chip),
+ "brcm,nand-oob-sector-size",
+ &oob_sector);
+ if (ret) {
+ /* Use detected size */
+ cfg->spare_area_size = mtd->oobsize /
+ (mtd->writesize >> FC_SHIFT);
+ } else {
+ cfg->spare_area_size = oob_sector;
+ }
+ if (cfg->spare_area_size > ctrl->max_oob)
+ cfg->spare_area_size = ctrl->max_oob;
+ /*
+ * Set mtd and memorg oobsize to be consistent with controller's
+ * spare_area_size, as the rest is inaccessible.
+ */
+ mtd->oobsize = cfg->spare_area_size * (mtd->writesize >> FC_SHIFT);
+ memorg->oobsize = mtd->oobsize;
+
+ cfg->device_size = mtd->size;
+ cfg->block_size = mtd->erasesize;
+ cfg->page_size = mtd->writesize;
+ cfg->device_width = (chip->options & NAND_BUSWIDTH_16) ? 16 : 8;
+ cfg->col_adr_bytes = 2;
+ cfg->blk_adr_bytes = get_blk_adr_bytes(mtd->size, mtd->writesize);
+
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST) {
+ dev_err(ctrl->dev, "only HW ECC supported; selected: %d\n",
+ chip->ecc.engine_type);
+ return -EINVAL;
+ }
+
+ if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) {
+ if (chip->ecc.strength == 1 && chip->ecc.size == 512)
+ /* Default to Hamming for 1-bit ECC, if unspecified */
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+ else
+ /* Otherwise, BCH */
+ chip->ecc.algo = NAND_ECC_ALGO_BCH;
+ }
+
+ if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING &&
+ (chip->ecc.strength != 1 || chip->ecc.size != 512)) {
+ dev_err(ctrl->dev, "invalid Hamming params: %d bits per %d bytes\n",
+ chip->ecc.strength, chip->ecc.size);
+ return -EINVAL;
+ }
+
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_NONE &&
+ (!chip->ecc.size || !chip->ecc.strength)) {
+ if (requirements->step_size && requirements->strength) {
+ /* use detected ECC parameters */
+ chip->ecc.size = requirements->step_size;
+ chip->ecc.strength = requirements->strength;
+ dev_info(ctrl->dev, "Using ECC step-size %d, strength %d\n",
+ chip->ecc.size, chip->ecc.strength);
+ }
+ }
+
+ switch (chip->ecc.size) {
+ case 512:
+ if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING)
+ cfg->ecc_level = 15;
+ else
+ cfg->ecc_level = chip->ecc.strength;
+ cfg->sector_size_1k = 0;
+ break;
+ case 1024:
+ if (!(ctrl->features & BRCMNAND_HAS_1K_SECTORS)) {
+ dev_err(ctrl->dev, "1KB sectors not supported\n");
+ return -EINVAL;
+ }
+ if (chip->ecc.strength & 0x1) {
+ dev_err(ctrl->dev,
+ "odd ECC not supported with 1KB sectors\n");
+ return -EINVAL;
+ }
+
+ cfg->ecc_level = chip->ecc.strength >> 1;
+ cfg->sector_size_1k = 1;
+ break;
+ default:
+ dev_err(ctrl->dev, "unsupported ECC size: %d\n",
+ chip->ecc.size);
+ return -EINVAL;
+ }
+
+ cfg->ful_adr_bytes = cfg->blk_adr_bytes;
+ if (mtd->writesize > 512)
+ cfg->ful_adr_bytes += cfg->col_adr_bytes;
+ else
+ cfg->ful_adr_bytes += 1;
+
+ ret = brcmnand_set_cfg(host, cfg);
+ if (ret)
+ return ret;
+
+ brcmnand_set_ecc_enabled(host, 1);
+
+ brcmnand_print_cfg(host, msg, cfg);
+ dev_info(ctrl->dev, "detected %s\n", msg);
+
+ /* Configure ACC_CONTROL */
+ offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
+ tmp = nand_readreg(ctrl, offs);
+ tmp &= ~ACC_CONTROL_PARTIAL_PAGE;
+ tmp &= ~ACC_CONTROL_RD_ERASED;
+
+ /* We need to turn on Read from erased paged protected by ECC */
+ if (ctrl->nand_version >= 0x0702)
+ tmp |= ACC_CONTROL_RD_ERASED;
+ tmp &= ~ACC_CONTROL_FAST_PGM_RDIN;
+ if (ctrl->features & BRCMNAND_HAS_PREFETCH)
+ tmp &= ~ACC_CONTROL_PREFETCH;
+
+ nand_writereg(ctrl, offs, tmp);
+
+ return 0;
+}
+
+static int brcmnand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ int ret;
+
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ /*
+ * Avoid (for instance) kmap()'d buffers from JFFS2, which we can't DMA
+ * to/from, and have nand_base pass us a bounce buffer instead, as
+ * needed.
+ */
+ chip->options |= NAND_USES_DMA;
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ if (brcmnand_setup_dev(host))
+ return -ENXIO;
+
+ chip->ecc.size = host->hwcfg.sector_size_1k ? 1024 : 512;
+
+ /* only use our internal HW threshold */
+ mtd->bitflip_threshold = 1;
+
+ ret = brcmstb_choose_ecc_layout(host);
+
+ /* If OOB is written with ECC enabled it will cause ECC errors */
+ if (is_hamming_ecc(host->ctrl, &host->hwcfg)) {
+ chip->ecc.write_oob = brcmnand_write_oob_raw;
+ chip->ecc.read_oob = brcmnand_read_oob_raw;
+ }
+
+ return ret;
+}
+
+static const struct nand_controller_ops brcmnand_controller_ops = {
+ .attach_chip = brcmnand_attach_chip,
+};
+
+static int brcmnand_init_cs(struct brcmnand_host *host,
+ const char * const *part_probe_types)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ struct device *dev = ctrl->dev;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ int ret;
+ u16 cfg_offs;
+
+ mtd = nand_to_mtd(&host->chip);
+ chip = &host->chip;
+
+ nand_set_controller_data(chip, host);
+ mtd->name = devm_kasprintf(dev, GFP_KERNEL, "brcmnand.%d",
+ host->cs);
+ if (!mtd->name)
+ return -ENOMEM;
+
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = dev;
+
+ chip->legacy.cmd_ctrl = brcmnand_cmd_ctrl;
+ chip->legacy.cmdfunc = brcmnand_cmdfunc;
+ chip->legacy.waitfunc = brcmnand_waitfunc;
+ chip->legacy.read_byte = brcmnand_read_byte;
+ chip->legacy.read_buf = brcmnand_read_buf;
+ chip->legacy.write_buf = brcmnand_write_buf;
+
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ chip->ecc.read_page = brcmnand_read_page;
+ chip->ecc.write_page = brcmnand_write_page;
+ chip->ecc.read_page_raw = brcmnand_read_page_raw;
+ chip->ecc.write_page_raw = brcmnand_write_page_raw;
+ chip->ecc.write_oob_raw = brcmnand_write_oob_raw;
+ chip->ecc.read_oob_raw = brcmnand_read_oob_raw;
+ chip->ecc.read_oob = brcmnand_read_oob;
+ chip->ecc.write_oob = brcmnand_write_oob;
+
+ chip->controller = &ctrl->controller;
+
+ /*
+ * The bootloader might have configured 16bit mode but
+ * NAND READID command only works in 8bit mode. We force
+ * 8bit mode here to ensure that NAND READID commands works.
+ */
+ cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
+ nand_writereg(ctrl, cfg_offs,
+ nand_readreg(ctrl, cfg_offs) & ~CFG_BUS_WIDTH);
+
+ ret = nand_scan(chip, 1);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_parse_register(mtd, part_probe_types, NULL, NULL, 0);
+ if (ret)
+ nand_cleanup(chip);
+
+ return ret;
+}
+
+static void brcmnand_save_restore_cs_config(struct brcmnand_host *host,
+ int restore)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
+ u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_CFG_EXT);
+ u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_ACC_CONTROL);
+ u16 t1_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING1);
+ u16 t2_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING2);
+
+ if (restore) {
+ nand_writereg(ctrl, cfg_offs, host->hwcfg.config);
+ if (cfg_offs != cfg_ext_offs)
+ nand_writereg(ctrl, cfg_ext_offs,
+ host->hwcfg.config_ext);
+ nand_writereg(ctrl, acc_control_offs, host->hwcfg.acc_control);
+ nand_writereg(ctrl, t1_offs, host->hwcfg.timing_1);
+ nand_writereg(ctrl, t2_offs, host->hwcfg.timing_2);
+ } else {
+ host->hwcfg.config = nand_readreg(ctrl, cfg_offs);
+ if (cfg_offs != cfg_ext_offs)
+ host->hwcfg.config_ext =
+ nand_readreg(ctrl, cfg_ext_offs);
+ host->hwcfg.acc_control = nand_readreg(ctrl, acc_control_offs);
+ host->hwcfg.timing_1 = nand_readreg(ctrl, t1_offs);
+ host->hwcfg.timing_2 = nand_readreg(ctrl, t2_offs);
+ }
+}
+
+static int brcmnand_suspend(struct device *dev)
+{
+ struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
+ struct brcmnand_host *host;
+
+ list_for_each_entry(host, &ctrl->host_list, node)
+ brcmnand_save_restore_cs_config(host, 0);
+
+ ctrl->nand_cs_nand_select = brcmnand_read_reg(ctrl, BRCMNAND_CS_SELECT);
+ ctrl->nand_cs_nand_xor = brcmnand_read_reg(ctrl, BRCMNAND_CS_XOR);
+ ctrl->corr_stat_threshold =
+ brcmnand_read_reg(ctrl, BRCMNAND_CORR_THRESHOLD);
+
+ if (has_flash_dma(ctrl))
+ ctrl->flash_dma_mode = flash_dma_readl(ctrl, FLASH_DMA_MODE);
+ else if (has_edu(ctrl))
+ ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG);
+
+ return 0;
+}
+
+static int brcmnand_resume(struct device *dev)
+{
+ struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
+ struct brcmnand_host *host;
+
+ if (has_flash_dma(ctrl)) {
+ flash_dma_writel(ctrl, FLASH_DMA_MODE, ctrl->flash_dma_mode);
+ flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
+ }
+
+ if (has_edu(ctrl)) {
+ ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG);
+ edu_writel(ctrl, EDU_CONFIG, ctrl->edu_config);
+ edu_readl(ctrl, EDU_CONFIG);
+ brcmnand_edu_init(ctrl);
+ }
+
+ brcmnand_write_reg(ctrl, BRCMNAND_CS_SELECT, ctrl->nand_cs_nand_select);
+ brcmnand_write_reg(ctrl, BRCMNAND_CS_XOR, ctrl->nand_cs_nand_xor);
+ brcmnand_write_reg(ctrl, BRCMNAND_CORR_THRESHOLD,
+ ctrl->corr_stat_threshold);
+ if (ctrl->soc) {
+ /* Clear/re-enable interrupt */
+ ctrl->soc->ctlrdy_ack(ctrl->soc);
+ ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
+ }
+
+ list_for_each_entry(host, &ctrl->host_list, node) {
+ struct nand_chip *chip = &host->chip;
+
+ brcmnand_save_restore_cs_config(host, 1);
+
+ /* Reset the chip, required by some chips after power-up */
+ nand_reset_op(chip);
+ }
+
+ return 0;
+}
+
+const struct dev_pm_ops brcmnand_pm_ops = {
+ .suspend = brcmnand_suspend,
+ .resume = brcmnand_resume,
+};
+EXPORT_SYMBOL_GPL(brcmnand_pm_ops);
+
+static const struct of_device_id __maybe_unused brcmnand_of_match[] = {
+ { .compatible = "brcm,brcmnand-v2.1" },
+ { .compatible = "brcm,brcmnand-v2.2" },
+ { .compatible = "brcm,brcmnand-v4.0" },
+ { .compatible = "brcm,brcmnand-v5.0" },
+ { .compatible = "brcm,brcmnand-v6.0" },
+ { .compatible = "brcm,brcmnand-v6.1" },
+ { .compatible = "brcm,brcmnand-v6.2" },
+ { .compatible = "brcm,brcmnand-v7.0" },
+ { .compatible = "brcm,brcmnand-v7.1" },
+ { .compatible = "brcm,brcmnand-v7.2" },
+ { .compatible = "brcm,brcmnand-v7.3" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, brcmnand_of_match);
+
+/***********************************************************************
+ * Platform driver setup (per controller)
+ ***********************************************************************/
+static int brcmnand_edu_setup(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-edu");
+ if (res) {
+ ctrl->edu_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctrl->edu_base))
+ return PTR_ERR(ctrl->edu_base);
+
+ ctrl->edu_offsets = edu_regs;
+
+ edu_writel(ctrl, EDU_CONFIG, EDU_CONFIG_MODE_NAND |
+ EDU_CONFIG_SWAP_CFG);
+ edu_readl(ctrl, EDU_CONFIG);
+
+ /* initialize edu */
+ brcmnand_edu_init(ctrl);
+
+ ctrl->edu_irq = platform_get_irq_optional(pdev, 1);
+ if (ctrl->edu_irq < 0) {
+ dev_warn(dev,
+ "FLASH EDU enabled, using ctlrdy irq\n");
+ } else {
+ ret = devm_request_irq(dev, ctrl->edu_irq,
+ brcmnand_edu_irq, 0,
+ "brcmnand-edu", ctrl);
+ if (ret < 0) {
+ dev_err(ctrl->dev, "can't allocate IRQ %d: error %d\n",
+ ctrl->edu_irq, ret);
+ return ret;
+ }
+
+ dev_info(dev, "FLASH EDU enabled using irq %u\n",
+ ctrl->edu_irq);
+ }
+ }
+
+ return 0;
+}
+
+int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
+{
+ struct brcmnand_platform_data *pd = dev_get_platdata(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = dev->of_node, *child;
+ struct brcmnand_controller *ctrl;
+ struct brcmnand_host *host;
+ struct resource *res;
+ int ret;
+
+ if (dn && !of_match_node(brcmnand_of_match, dn))
+ return -ENODEV;
+
+ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, ctrl);
+ ctrl->dev = dev;
+ ctrl->soc = soc;
+
+ /* Enable the static key if the soc provides I/O operations indicating
+ * that a non-memory mapped IO access path must be used
+ */
+ if (brcmnand_soc_has_ops(ctrl->soc))
+ static_branch_enable(&brcmnand_soc_has_ops_key);
+
+ init_completion(&ctrl->done);
+ init_completion(&ctrl->dma_done);
+ init_completion(&ctrl->edu_done);
+ nand_controller_init(&ctrl->controller);
+ ctrl->controller.ops = &brcmnand_controller_ops;
+ INIT_LIST_HEAD(&ctrl->host_list);
+
+ /* NAND register range */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctrl->nand_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctrl->nand_base) && !brcmnand_soc_has_ops(soc))
+ return PTR_ERR(ctrl->nand_base);
+
+ /* Enable clock before using NAND registers */
+ ctrl->clk = devm_clk_get(dev, "nand");
+ if (!IS_ERR(ctrl->clk)) {
+ ret = clk_prepare_enable(ctrl->clk);
+ if (ret)
+ return ret;
+ } else {
+ ret = PTR_ERR(ctrl->clk);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ ctrl->clk = NULL;
+ }
+
+ /* Initialize NAND revision */
+ ret = brcmnand_revision_init(ctrl);
+ if (ret)
+ goto err;
+
+ /*
+ * Most chips have this cache at a fixed offset within 'nand' block.
+ * Some must specify this region separately.
+ */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-cache");
+ if (res) {
+ ctrl->nand_fc = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctrl->nand_fc)) {
+ ret = PTR_ERR(ctrl->nand_fc);
+ goto err;
+ }
+ } else {
+ ctrl->nand_fc = ctrl->nand_base +
+ ctrl->reg_offsets[BRCMNAND_FC_BASE];
+ }
+
+ /* FLASH_DMA */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-dma");
+ if (res) {
+ ctrl->flash_dma_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctrl->flash_dma_base)) {
+ ret = PTR_ERR(ctrl->flash_dma_base);
+ goto err;
+ }
+
+ /* initialize the dma version */
+ brcmnand_flash_dma_revision_init(ctrl);
+
+ ret = -EIO;
+ if (ctrl->nand_version >= 0x0700)
+ ret = dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(40));
+ if (ret)
+ ret = dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(32));
+ if (ret)
+ goto err;
+
+ /* linked-list and stop on error */
+ flash_dma_writel(ctrl, FLASH_DMA_MODE, FLASH_DMA_MODE_MASK);
+ flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
+
+ /* Allocate descriptor(s) */
+ ctrl->dma_desc = dmam_alloc_coherent(dev,
+ sizeof(*ctrl->dma_desc),
+ &ctrl->dma_pa, GFP_KERNEL);
+ if (!ctrl->dma_desc) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ctrl->dma_irq = platform_get_irq(pdev, 1);
+ if ((int)ctrl->dma_irq < 0) {
+ dev_err(dev, "missing FLASH_DMA IRQ\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ ret = devm_request_irq(dev, ctrl->dma_irq,
+ brcmnand_dma_irq, 0, DRV_NAME,
+ ctrl);
+ if (ret < 0) {
+ dev_err(dev, "can't allocate IRQ %d: error %d\n",
+ ctrl->dma_irq, ret);
+ goto err;
+ }
+
+ dev_info(dev, "enabling FLASH_DMA\n");
+ /* set flash dma transfer function to call */
+ ctrl->dma_trans = brcmnand_dma_trans;
+ } else {
+ ret = brcmnand_edu_setup(pdev);
+ if (ret < 0)
+ goto err;
+
+ if (has_edu(ctrl))
+ /* set edu transfer function to call */
+ ctrl->dma_trans = brcmnand_edu_trans;
+ }
+
+ /* Disable automatic device ID config, direct addressing */
+ brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT,
+ CS_SELECT_AUTO_DEVICE_ID_CFG | 0xff, 0, 0);
+ /* Disable XOR addressing */
+ brcmnand_rmw_reg(ctrl, BRCMNAND_CS_XOR, 0xff, 0, 0);
+
+ if (ctrl->features & BRCMNAND_HAS_WP) {
+ /* Permanently disable write protection */
+ if (wp_on == 2)
+ brcmnand_set_wp(ctrl, false);
+ } else {
+ wp_on = 0;
+ }
+
+ /* IRQ */
+ ctrl->irq = platform_get_irq_optional(pdev, 0);
+ if (ctrl->irq > 0) {
+ /*
+ * Some SoCs integrate this controller (e.g., its interrupt bits) in
+ * interesting ways
+ */
+ if (soc) {
+ ret = devm_request_irq(dev, ctrl->irq, brcmnand_irq, 0,
+ DRV_NAME, ctrl);
+
+ /* Enable interrupt */
+ ctrl->soc->ctlrdy_ack(ctrl->soc);
+ ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
+ } else {
+ /* Use standard interrupt infrastructure */
+ ret = devm_request_irq(dev, ctrl->irq, brcmnand_ctlrdy_irq, 0,
+ DRV_NAME, ctrl);
+ }
+ if (ret < 0) {
+ dev_err(dev, "can't allocate IRQ %d: error %d\n",
+ ctrl->irq, ret);
+ goto err;
+ }
+ }
+
+ for_each_available_child_of_node(dn, child) {
+ if (of_device_is_compatible(child, "brcm,nandcs")) {
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host) {
+ of_node_put(child);
+ ret = -ENOMEM;
+ goto err;
+ }
+ host->pdev = pdev;
+ host->ctrl = ctrl;
+
+ ret = of_property_read_u32(child, "reg", &host->cs);
+ if (ret) {
+ dev_err(dev, "can't get chip-select\n");
+ devm_kfree(dev, host);
+ continue;
+ }
+
+ nand_set_flash_node(&host->chip, child);
+
+ ret = brcmnand_init_cs(host, NULL);
+ if (ret) {
+ if (ret == -EPROBE_DEFER) {
+ of_node_put(child);
+ goto err;
+ }
+ devm_kfree(dev, host);
+ continue; /* Try all chip-selects */
+ }
+
+ list_add_tail(&host->node, &ctrl->host_list);
+ }
+ }
+
+ if (!list_empty(&ctrl->host_list))
+ return 0;
+
+ if (!pd) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* If we got there we must have been probing via platform data */
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ host->pdev = pdev;
+ host->ctrl = ctrl;
+ host->cs = pd->chip_select;
+ host->chip.ecc.size = pd->ecc_stepsize;
+ host->chip.ecc.strength = pd->ecc_strength;
+
+ ret = brcmnand_init_cs(host, pd->part_probe_types);
+ if (ret)
+ goto err;
+
+ list_add_tail(&host->node, &ctrl->host_list);
+
+ /* No chip-selects could initialize properly */
+ if (list_empty(&ctrl->host_list)) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ clk_disable_unprepare(ctrl->clk);
+ return ret;
+
+}
+EXPORT_SYMBOL_GPL(brcmnand_probe);
+
+int brcmnand_remove(struct platform_device *pdev)
+{
+ struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
+ struct brcmnand_host *host;
+ struct nand_chip *chip;
+ int ret;
+
+ list_for_each_entry(host, &ctrl->host_list, node) {
+ chip = &host->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ }
+
+ clk_disable_unprepare(ctrl->clk);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(brcmnand_remove);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Kevin Cernekee");
+MODULE_AUTHOR("Brian Norris");
+MODULE_DESCRIPTION("NAND driver for Broadcom chips");
+MODULE_ALIAS("platform:brcmnand");
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.h b/drivers/mtd/nand/raw/brcmnand/brcmnand.h
new file mode 100644
index 0000000000..f1f93d85f5
--- /dev/null
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright © 2015 Broadcom Corporation
+ */
+
+#ifndef __BRCMNAND_H__
+#define __BRCMNAND_H__
+
+#include <linux/types.h>
+#include <linux/io.h>
+
+struct platform_device;
+struct dev_pm_ops;
+struct brcmnand_io_ops;
+
+/* Special register offset constant to intercept a non-MMIO access
+ * to the flash cache register space. This is intentionally large
+ * not to overlap with an existing offset.
+ */
+#define BRCMNAND_NON_MMIO_FC_ADDR 0xffffffff
+
+struct brcmnand_soc {
+ bool (*ctlrdy_ack)(struct brcmnand_soc *soc);
+ void (*ctlrdy_set_enabled)(struct brcmnand_soc *soc, bool en);
+ void (*prepare_data_bus)(struct brcmnand_soc *soc, bool prepare,
+ bool is_param);
+ const struct brcmnand_io_ops *ops;
+};
+
+struct brcmnand_io_ops {
+ u32 (*read_reg)(struct brcmnand_soc *soc, u32 offset);
+ void (*write_reg)(struct brcmnand_soc *soc, u32 val, u32 offset);
+};
+
+static inline void brcmnand_soc_data_bus_prepare(struct brcmnand_soc *soc,
+ bool is_param)
+{
+ if (soc && soc->prepare_data_bus)
+ soc->prepare_data_bus(soc, true, is_param);
+}
+
+static inline void brcmnand_soc_data_bus_unprepare(struct brcmnand_soc *soc,
+ bool is_param)
+{
+ if (soc && soc->prepare_data_bus)
+ soc->prepare_data_bus(soc, false, is_param);
+}
+
+static inline u32 brcmnand_readl(void __iomem *addr)
+{
+ /*
+ * MIPS endianness is configured by boot strap, which also reverses all
+ * bus endianness (i.e., big-endian CPU + big endian bus ==> native
+ * endian I/O).
+ *
+ * Other architectures (e.g., ARM) either do not support big endian, or
+ * else leave I/O in little endian mode.
+ */
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ return __raw_readl(addr);
+ else
+ return readl_relaxed(addr);
+}
+
+static inline void brcmnand_writel(u32 val, void __iomem *addr)
+{
+ /* See brcmnand_readl() comments */
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ __raw_writel(val, addr);
+ else
+ writel_relaxed(val, addr);
+}
+
+static inline bool brcmnand_soc_has_ops(struct brcmnand_soc *soc)
+{
+ return soc && soc->ops && soc->ops->read_reg && soc->ops->write_reg;
+}
+
+static inline u32 brcmnand_soc_read(struct brcmnand_soc *soc, u32 offset)
+{
+ return soc->ops->read_reg(soc, offset);
+}
+
+static inline void brcmnand_soc_write(struct brcmnand_soc *soc, u32 val,
+ u32 offset)
+{
+ soc->ops->write_reg(soc, val, offset);
+}
+
+int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc);
+int brcmnand_remove(struct platform_device *pdev);
+
+extern const struct dev_pm_ops brcmnand_pm_ops;
+
+#endif /* __BRCMNAND_H__ */
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c b/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c
new file mode 100644
index 0000000000..950923d977
--- /dev/null
+++ b/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2015 Broadcom Corporation
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+
+#include "brcmnand.h"
+
+static const struct of_device_id brcmstb_nand_of_match[] = {
+ { .compatible = "brcm,brcmnand" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, brcmstb_nand_of_match);
+
+static int brcmstb_nand_probe(struct platform_device *pdev)
+{
+ return brcmnand_probe(pdev, NULL);
+}
+
+static struct platform_driver brcmstb_nand_driver = {
+ .probe = brcmstb_nand_probe,
+ .remove = brcmnand_remove,
+ .driver = {
+ .name = "brcmstb_nand",
+ .pm = &brcmnand_pm_ops,
+ .of_match_table = brcmstb_nand_of_match,
+ }
+};
+module_platform_driver(brcmstb_nand_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Brian Norris");
+MODULE_DESCRIPTION("NAND driver for Broadcom STB chips");
diff --git a/drivers/mtd/nand/raw/brcmnand/iproc_nand.c b/drivers/mtd/nand/raw/brcmnand/iproc_nand.c
new file mode 100644
index 0000000000..089c70fc6e
--- /dev/null
+++ b/drivers/mtd/nand/raw/brcmnand/iproc_nand.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2015 Broadcom Corporation
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "brcmnand.h"
+
+struct iproc_nand_soc {
+ struct brcmnand_soc soc;
+
+ void __iomem *idm_base;
+ void __iomem *ext_base;
+ spinlock_t idm_lock;
+};
+
+#define IPROC_NAND_CTLR_READY_OFFSET 0x10
+#define IPROC_NAND_CTLR_READY BIT(0)
+
+#define IPROC_NAND_IO_CTRL_OFFSET 0x00
+#define IPROC_NAND_APB_LE_MODE BIT(24)
+#define IPROC_NAND_INT_CTRL_READ_ENABLE BIT(6)
+
+static bool iproc_nand_intc_ack(struct brcmnand_soc *soc)
+{
+ struct iproc_nand_soc *priv =
+ container_of(soc, struct iproc_nand_soc, soc);
+ void __iomem *mmio = priv->ext_base + IPROC_NAND_CTLR_READY_OFFSET;
+ u32 val = brcmnand_readl(mmio);
+
+ if (val & IPROC_NAND_CTLR_READY) {
+ brcmnand_writel(IPROC_NAND_CTLR_READY, mmio);
+ return true;
+ }
+
+ return false;
+}
+
+static void iproc_nand_intc_set(struct brcmnand_soc *soc, bool en)
+{
+ struct iproc_nand_soc *priv =
+ container_of(soc, struct iproc_nand_soc, soc);
+ void __iomem *mmio = priv->idm_base + IPROC_NAND_IO_CTRL_OFFSET;
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->idm_lock, flags);
+
+ val = brcmnand_readl(mmio);
+
+ if (en)
+ val |= IPROC_NAND_INT_CTRL_READ_ENABLE;
+ else
+ val &= ~IPROC_NAND_INT_CTRL_READ_ENABLE;
+
+ brcmnand_writel(val, mmio);
+
+ spin_unlock_irqrestore(&priv->idm_lock, flags);
+}
+
+static void iproc_nand_apb_access(struct brcmnand_soc *soc, bool prepare,
+ bool is_param)
+{
+ struct iproc_nand_soc *priv =
+ container_of(soc, struct iproc_nand_soc, soc);
+ void __iomem *mmio = priv->idm_base + IPROC_NAND_IO_CTRL_OFFSET;
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->idm_lock, flags);
+
+ val = brcmnand_readl(mmio);
+
+ /*
+ * In the case of BE or when dealing with NAND data, alway configure
+ * the APB bus to LE mode before accessing the FIFO and back to BE mode
+ * after the access is done
+ */
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) || !is_param) {
+ if (prepare)
+ val |= IPROC_NAND_APB_LE_MODE;
+ else
+ val &= ~IPROC_NAND_APB_LE_MODE;
+ } else { /* when in LE accessing the parameter page, keep APB in BE */
+ val &= ~IPROC_NAND_APB_LE_MODE;
+ }
+
+ brcmnand_writel(val, mmio);
+
+ spin_unlock_irqrestore(&priv->idm_lock, flags);
+}
+
+static int iproc_nand_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct iproc_nand_soc *priv;
+ struct brcmnand_soc *soc;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ soc = &priv->soc;
+
+ spin_lock_init(&priv->idm_lock);
+
+ priv->idm_base = devm_platform_ioremap_resource_byname(pdev, "iproc-idm");
+ if (IS_ERR(priv->idm_base))
+ return PTR_ERR(priv->idm_base);
+
+ priv->ext_base = devm_platform_ioremap_resource_byname(pdev, "iproc-ext");
+ if (IS_ERR(priv->ext_base))
+ return PTR_ERR(priv->ext_base);
+
+ soc->ctlrdy_ack = iproc_nand_intc_ack;
+ soc->ctlrdy_set_enabled = iproc_nand_intc_set;
+ soc->prepare_data_bus = iproc_nand_apb_access;
+
+ return brcmnand_probe(pdev, soc);
+}
+
+static const struct of_device_id iproc_nand_of_match[] = {
+ { .compatible = "brcm,nand-iproc" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, iproc_nand_of_match);
+
+static struct platform_driver iproc_nand_driver = {
+ .probe = iproc_nand_probe,
+ .remove = brcmnand_remove,
+ .driver = {
+ .name = "iproc_nand",
+ .pm = &brcmnand_pm_ops,
+ .of_match_table = iproc_nand_of_match,
+ }
+};
+module_platform_driver(iproc_nand_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Brian Norris");
+MODULE_AUTHOR("Ray Jui");
+MODULE_DESCRIPTION("NAND driver for Broadcom IPROC-based SoCs");
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
new file mode 100644
index 0000000000..034ec564c2
--- /dev/null
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -0,0 +1,3079 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Cadence NAND flash controller driver
+ *
+ * Copyright (C) 2019 Cadence
+ *
+ * Author: Piotr Sroka <piotrs@cadence.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of_device.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+
+/*
+ * HPNFC can work in 3 modes:
+ * - PIO - can work in master or slave DMA
+ * - CDMA - needs Master DMA for accessing command descriptors.
+ * - Generic mode - can use only slave DMA.
+ * CDMA and PIO modes can be used to execute only base commands.
+ * Generic mode can be used to execute any command
+ * on NAND flash memory. Driver uses CDMA mode for
+ * block erasing, page reading, page programing.
+ * Generic mode is used for executing rest of commands.
+ */
+
+#define MAX_ADDRESS_CYC 6
+#define MAX_ERASE_ADDRESS_CYC 3
+#define MAX_DATA_SIZE 0xFFFC
+#define DMA_DATA_SIZE_ALIGN 8
+
+/* Register definition. */
+/*
+ * Command register 0.
+ * Writing data to this register will initiate a new transaction
+ * of the NF controller.
+ */
+#define CMD_REG0 0x0000
+/* Command type field mask. */
+#define CMD_REG0_CT GENMASK(31, 30)
+/* Command type CDMA. */
+#define CMD_REG0_CT_CDMA 0uL
+/* Command type generic. */
+#define CMD_REG0_CT_GEN 3uL
+/* Command thread number field mask. */
+#define CMD_REG0_TN GENMASK(27, 24)
+
+/* Command register 2. */
+#define CMD_REG2 0x0008
+/* Command register 3. */
+#define CMD_REG3 0x000C
+/* Pointer register to select which thread status will be selected. */
+#define CMD_STATUS_PTR 0x0010
+/* Command status register for selected thread. */
+#define CMD_STATUS 0x0014
+
+/* Interrupt status register. */
+#define INTR_STATUS 0x0110
+#define INTR_STATUS_SDMA_ERR BIT(22)
+#define INTR_STATUS_SDMA_TRIGG BIT(21)
+#define INTR_STATUS_UNSUPP_CMD BIT(19)
+#define INTR_STATUS_DDMA_TERR BIT(18)
+#define INTR_STATUS_CDMA_TERR BIT(17)
+#define INTR_STATUS_CDMA_IDL BIT(16)
+
+/* Interrupt enable register. */
+#define INTR_ENABLE 0x0114
+#define INTR_ENABLE_INTR_EN BIT(31)
+#define INTR_ENABLE_SDMA_ERR_EN BIT(22)
+#define INTR_ENABLE_SDMA_TRIGG_EN BIT(21)
+#define INTR_ENABLE_UNSUPP_CMD_EN BIT(19)
+#define INTR_ENABLE_DDMA_TERR_EN BIT(18)
+#define INTR_ENABLE_CDMA_TERR_EN BIT(17)
+#define INTR_ENABLE_CDMA_IDLE_EN BIT(16)
+
+/* Controller internal state. */
+#define CTRL_STATUS 0x0118
+#define CTRL_STATUS_INIT_COMP BIT(9)
+#define CTRL_STATUS_CTRL_BUSY BIT(8)
+
+/* Command Engine threads state. */
+#define TRD_STATUS 0x0120
+
+/* Command Engine interrupt thread error status. */
+#define TRD_ERR_INT_STATUS 0x0128
+/* Command Engine interrupt thread error enable. */
+#define TRD_ERR_INT_STATUS_EN 0x0130
+/* Command Engine interrupt thread complete status. */
+#define TRD_COMP_INT_STATUS 0x0138
+
+/*
+ * Transfer config 0 register.
+ * Configures data transfer parameters.
+ */
+#define TRAN_CFG_0 0x0400
+/* Offset value from the beginning of the page. */
+#define TRAN_CFG_0_OFFSET GENMASK(31, 16)
+/* Numbers of sectors to transfer within singlNF device's page. */
+#define TRAN_CFG_0_SEC_CNT GENMASK(7, 0)
+
+/*
+ * Transfer config 1 register.
+ * Configures data transfer parameters.
+ */
+#define TRAN_CFG_1 0x0404
+/* Size of last data sector. */
+#define TRAN_CFG_1_LAST_SEC_SIZE GENMASK(31, 16)
+/* Size of not-last data sector. */
+#define TRAN_CFG_1_SECTOR_SIZE GENMASK(15, 0)
+
+/* ECC engine configuration register 0. */
+#define ECC_CONFIG_0 0x0428
+/* Correction strength. */
+#define ECC_CONFIG_0_CORR_STR GENMASK(10, 8)
+/* Enable erased pages detection mechanism. */
+#define ECC_CONFIG_0_ERASE_DET_EN BIT(1)
+/* Enable controller ECC check bits generation and correction. */
+#define ECC_CONFIG_0_ECC_EN BIT(0)
+
+/* ECC engine configuration register 1. */
+#define ECC_CONFIG_1 0x042C
+
+/* Multiplane settings register. */
+#define MULTIPLANE_CFG 0x0434
+/* Cache operation settings. */
+#define CACHE_CFG 0x0438
+
+/* DMA settings register. */
+#define DMA_SETINGS 0x043C
+/* Enable SDMA error report on access unprepared slave DMA interface. */
+#define DMA_SETINGS_SDMA_ERR_RSP BIT(17)
+
+/* Transferred data block size for the slave DMA module. */
+#define SDMA_SIZE 0x0440
+
+/* Thread number associated with transferred data block
+ * for the slave DMA module.
+ */
+#define SDMA_TRD_NUM 0x0444
+/* Thread number mask. */
+#define SDMA_TRD_NUM_SDMA_TRD GENMASK(2, 0)
+
+#define CONTROL_DATA_CTRL 0x0494
+/* Thread number mask. */
+#define CONTROL_DATA_CTRL_SIZE GENMASK(15, 0)
+
+#define CTRL_VERSION 0x800
+#define CTRL_VERSION_REV GENMASK(7, 0)
+
+/* Available hardware features of the controller. */
+#define CTRL_FEATURES 0x804
+/* Support for NV-DDR2/3 work mode. */
+#define CTRL_FEATURES_NVDDR_2_3 BIT(28)
+/* Support for NV-DDR work mode. */
+#define CTRL_FEATURES_NVDDR BIT(27)
+/* Support for asynchronous work mode. */
+#define CTRL_FEATURES_ASYNC BIT(26)
+/* Support for asynchronous work mode. */
+#define CTRL_FEATURES_N_BANKS GENMASK(25, 24)
+/* Slave and Master DMA data width. */
+#define CTRL_FEATURES_DMA_DWITH64 BIT(21)
+/* Availability of Control Data feature.*/
+#define CTRL_FEATURES_CONTROL_DATA BIT(10)
+
+/* BCH Engine identification register 0 - correction strengths. */
+#define BCH_CFG_0 0x838
+#define BCH_CFG_0_CORR_CAP_0 GENMASK(7, 0)
+#define BCH_CFG_0_CORR_CAP_1 GENMASK(15, 8)
+#define BCH_CFG_0_CORR_CAP_2 GENMASK(23, 16)
+#define BCH_CFG_0_CORR_CAP_3 GENMASK(31, 24)
+
+/* BCH Engine identification register 1 - correction strengths. */
+#define BCH_CFG_1 0x83C
+#define BCH_CFG_1_CORR_CAP_4 GENMASK(7, 0)
+#define BCH_CFG_1_CORR_CAP_5 GENMASK(15, 8)
+#define BCH_CFG_1_CORR_CAP_6 GENMASK(23, 16)
+#define BCH_CFG_1_CORR_CAP_7 GENMASK(31, 24)
+
+/* BCH Engine identification register 2 - sector sizes. */
+#define BCH_CFG_2 0x840
+#define BCH_CFG_2_SECT_0 GENMASK(15, 0)
+#define BCH_CFG_2_SECT_1 GENMASK(31, 16)
+
+/* BCH Engine identification register 3. */
+#define BCH_CFG_3 0x844
+#define BCH_CFG_3_METADATA_SIZE GENMASK(23, 16)
+
+/* Ready/Busy# line status. */
+#define RBN_SETINGS 0x1004
+
+/* Common settings. */
+#define COMMON_SET 0x1008
+/* 16 bit device connected to the NAND Flash interface. */
+#define COMMON_SET_DEVICE_16BIT BIT(8)
+
+/* Skip_bytes registers. */
+#define SKIP_BYTES_CONF 0x100C
+#define SKIP_BYTES_MARKER_VALUE GENMASK(31, 16)
+#define SKIP_BYTES_NUM_OF_BYTES GENMASK(7, 0)
+
+#define SKIP_BYTES_OFFSET 0x1010
+#define SKIP_BYTES_OFFSET_VALUE GENMASK(23, 0)
+
+/* Timings configuration. */
+#define ASYNC_TOGGLE_TIMINGS 0x101c
+#define ASYNC_TOGGLE_TIMINGS_TRH GENMASK(28, 24)
+#define ASYNC_TOGGLE_TIMINGS_TRP GENMASK(20, 16)
+#define ASYNC_TOGGLE_TIMINGS_TWH GENMASK(12, 8)
+#define ASYNC_TOGGLE_TIMINGS_TWP GENMASK(4, 0)
+
+#define TIMINGS0 0x1024
+#define TIMINGS0_TADL GENMASK(31, 24)
+#define TIMINGS0_TCCS GENMASK(23, 16)
+#define TIMINGS0_TWHR GENMASK(15, 8)
+#define TIMINGS0_TRHW GENMASK(7, 0)
+
+#define TIMINGS1 0x1028
+#define TIMINGS1_TRHZ GENMASK(31, 24)
+#define TIMINGS1_TWB GENMASK(23, 16)
+#define TIMINGS1_TVDLY GENMASK(7, 0)
+
+#define TIMINGS2 0x102c
+#define TIMINGS2_TFEAT GENMASK(25, 16)
+#define TIMINGS2_CS_HOLD_TIME GENMASK(13, 8)
+#define TIMINGS2_CS_SETUP_TIME GENMASK(5, 0)
+
+/* Configuration of the resynchronization of slave DLL of PHY. */
+#define DLL_PHY_CTRL 0x1034
+#define DLL_PHY_CTRL_DLL_RST_N BIT(24)
+#define DLL_PHY_CTRL_EXTENDED_WR_MODE BIT(17)
+#define DLL_PHY_CTRL_EXTENDED_RD_MODE BIT(16)
+#define DLL_PHY_CTRL_RS_HIGH_WAIT_CNT GENMASK(11, 8)
+#define DLL_PHY_CTRL_RS_IDLE_CNT GENMASK(7, 0)
+
+/* Register controlling DQ related timing. */
+#define PHY_DQ_TIMING 0x2000
+/* Register controlling DSQ related timing. */
+#define PHY_DQS_TIMING 0x2004
+#define PHY_DQS_TIMING_DQS_SEL_OE_END GENMASK(3, 0)
+#define PHY_DQS_TIMING_PHONY_DQS_SEL BIT(16)
+#define PHY_DQS_TIMING_USE_PHONY_DQS BIT(20)
+
+/* Register controlling the gate and loopback control related timing. */
+#define PHY_GATE_LPBK_CTRL 0x2008
+#define PHY_GATE_LPBK_CTRL_RDS GENMASK(24, 19)
+
+/* Register holds the control for the master DLL logic. */
+#define PHY_DLL_MASTER_CTRL 0x200C
+#define PHY_DLL_MASTER_CTRL_BYPASS_MODE BIT(23)
+
+/* Register holds the control for the slave DLL logic. */
+#define PHY_DLL_SLAVE_CTRL 0x2010
+
+/* This register handles the global control settings for the PHY. */
+#define PHY_CTRL 0x2080
+#define PHY_CTRL_SDR_DQS BIT(14)
+#define PHY_CTRL_PHONY_DQS GENMASK(9, 4)
+
+/*
+ * This register handles the global control settings
+ * for the termination selects for reads.
+ */
+#define PHY_TSEL 0x2084
+
+/* Generic command layout. */
+#define GCMD_LAY_CS GENMASK_ULL(11, 8)
+/*
+ * This bit informs the minicotroller if it has to wait for tWB
+ * after sending the last CMD/ADDR/DATA in the sequence.
+ */
+#define GCMD_LAY_TWB BIT_ULL(6)
+/* Type of generic instruction. */
+#define GCMD_LAY_INSTR GENMASK_ULL(5, 0)
+
+/* Generic CMD sequence type. */
+#define GCMD_LAY_INSTR_CMD 0
+/* Generic ADDR sequence type. */
+#define GCMD_LAY_INSTR_ADDR 1
+/* Generic data transfer sequence type. */
+#define GCMD_LAY_INSTR_DATA 2
+
+/* Input part of generic command type of input is command. */
+#define GCMD_LAY_INPUT_CMD GENMASK_ULL(23, 16)
+
+/* Generic command address sequence - address fields. */
+#define GCMD_LAY_INPUT_ADDR GENMASK_ULL(63, 16)
+/* Generic command address sequence - address size. */
+#define GCMD_LAY_INPUT_ADDR_SIZE GENMASK_ULL(13, 11)
+
+/* Transfer direction field of generic command data sequence. */
+#define GCMD_DIR BIT_ULL(11)
+/* Read transfer direction of generic command data sequence. */
+#define GCMD_DIR_READ 0
+/* Write transfer direction of generic command data sequence. */
+#define GCMD_DIR_WRITE 1
+
+/* ECC enabled flag of generic command data sequence - ECC enabled. */
+#define GCMD_ECC_EN BIT_ULL(12)
+/* Generic command data sequence - sector size. */
+#define GCMD_SECT_SIZE GENMASK_ULL(31, 16)
+/* Generic command data sequence - sector count. */
+#define GCMD_SECT_CNT GENMASK_ULL(39, 32)
+/* Generic command data sequence - last sector size. */
+#define GCMD_LAST_SIZE GENMASK_ULL(55, 40)
+
+/* CDMA descriptor fields. */
+/* Erase command type of CDMA descriptor. */
+#define CDMA_CT_ERASE 0x1000
+/* Program page command type of CDMA descriptor. */
+#define CDMA_CT_WR 0x2100
+/* Read page command type of CDMA descriptor. */
+#define CDMA_CT_RD 0x2200
+
+/* Flash pointer memory shift. */
+#define CDMA_CFPTR_MEM_SHIFT 24
+/* Flash pointer memory mask. */
+#define CDMA_CFPTR_MEM GENMASK(26, 24)
+
+/*
+ * Command DMA descriptor flags. If set causes issue interrupt after
+ * the completion of descriptor processing.
+ */
+#define CDMA_CF_INT BIT(8)
+/*
+ * Command DMA descriptor flags - the next descriptor
+ * address field is valid and descriptor processing should continue.
+ */
+#define CDMA_CF_CONT BIT(9)
+/* DMA master flag of command DMA descriptor. */
+#define CDMA_CF_DMA_MASTER BIT(10)
+
+/* Operation complete status of command descriptor. */
+#define CDMA_CS_COMP BIT(15)
+/* Operation complete status of command descriptor. */
+/* Command descriptor status - operation fail. */
+#define CDMA_CS_FAIL BIT(14)
+/* Command descriptor status - page erased. */
+#define CDMA_CS_ERP BIT(11)
+/* Command descriptor status - timeout occurred. */
+#define CDMA_CS_TOUT BIT(10)
+/*
+ * Maximum amount of correction applied to one ECC sector.
+ * It is part of command descriptor status.
+ */
+#define CDMA_CS_MAXERR GENMASK(9, 2)
+/* Command descriptor status - uncorrectable ECC error. */
+#define CDMA_CS_UNCE BIT(1)
+/* Command descriptor status - descriptor error. */
+#define CDMA_CS_ERR BIT(0)
+
+/* Status of operation - OK. */
+#define STAT_OK 0
+/* Status of operation - FAIL. */
+#define STAT_FAIL 2
+/* Status of operation - uncorrectable ECC error. */
+#define STAT_ECC_UNCORR 3
+/* Status of operation - page erased. */
+#define STAT_ERASED 5
+/* Status of operation - correctable ECC error. */
+#define STAT_ECC_CORR 6
+/* Status of operation - unsuspected state. */
+#define STAT_UNKNOWN 7
+/* Status of operation - operation is not completed yet. */
+#define STAT_BUSY 0xFF
+
+#define BCH_MAX_NUM_CORR_CAPS 8
+#define BCH_MAX_NUM_SECTOR_SIZES 2
+
+struct cadence_nand_timings {
+ u32 async_toggle_timings;
+ u32 timings0;
+ u32 timings1;
+ u32 timings2;
+ u32 dll_phy_ctrl;
+ u32 phy_ctrl;
+ u32 phy_dqs_timing;
+ u32 phy_gate_lpbk_ctrl;
+};
+
+/* Command DMA descriptor. */
+struct cadence_nand_cdma_desc {
+ /* Next descriptor address. */
+ u64 next_pointer;
+
+ /* Flash address is a 32-bit address comprising of BANK and ROW ADDR. */
+ u32 flash_pointer;
+ /*field appears in HPNFC version 13*/
+ u16 bank;
+ u16 rsvd0;
+
+ /* Operation the controller needs to perform. */
+ u16 command_type;
+ u16 rsvd1;
+ /* Flags for operation of this command. */
+ u16 command_flags;
+ u16 rsvd2;
+
+ /* System/host memory address required for data DMA commands. */
+ u64 memory_pointer;
+
+ /* Status of operation. */
+ u32 status;
+ u32 rsvd3;
+
+ /* Address pointer to sync buffer location. */
+ u64 sync_flag_pointer;
+
+ /* Controls the buffer sync mechanism. */
+ u32 sync_arguments;
+ u32 rsvd4;
+
+ /* Control data pointer. */
+ u64 ctrl_data_ptr;
+};
+
+/* Interrupt status. */
+struct cadence_nand_irq_status {
+ /* Thread operation complete status. */
+ u32 trd_status;
+ /* Thread operation error. */
+ u32 trd_error;
+ /* Controller status. */
+ u32 status;
+};
+
+/* Cadence NAND flash controller capabilities get from driver data. */
+struct cadence_nand_dt_devdata {
+ /* Skew value of the output signals of the NAND Flash interface. */
+ u32 if_skew;
+ /* It informs if slave DMA interface is connected to DMA engine. */
+ unsigned int has_dma:1;
+};
+
+/* Cadence NAND flash controller capabilities read from registers. */
+struct cdns_nand_caps {
+ /* Maximum number of banks supported by hardware. */
+ u8 max_banks;
+ /* Slave and Master DMA data width in bytes (4 or 8). */
+ u8 data_dma_width;
+ /* Control Data feature supported. */
+ bool data_control_supp;
+ /* Is PHY type DLL. */
+ bool is_phy_type_dll;
+};
+
+struct cdns_nand_ctrl {
+ struct device *dev;
+ struct nand_controller controller;
+ struct cadence_nand_cdma_desc *cdma_desc;
+ /* IP capability. */
+ const struct cadence_nand_dt_devdata *caps1;
+ struct cdns_nand_caps caps2;
+ u8 ctrl_rev;
+ dma_addr_t dma_cdma_desc;
+ u8 *buf;
+ u32 buf_size;
+ u8 curr_corr_str_idx;
+
+ /* Register interface. */
+ void __iomem *reg;
+
+ struct {
+ void __iomem *virt;
+ dma_addr_t dma;
+ } io;
+
+ int irq;
+ /* Interrupts that have happened. */
+ struct cadence_nand_irq_status irq_status;
+ /* Interrupts we are waiting for. */
+ struct cadence_nand_irq_status irq_mask;
+ struct completion complete;
+ /* Protect irq_mask and irq_status. */
+ spinlock_t irq_lock;
+
+ int ecc_strengths[BCH_MAX_NUM_CORR_CAPS];
+ struct nand_ecc_step_info ecc_stepinfos[BCH_MAX_NUM_SECTOR_SIZES];
+ struct nand_ecc_caps ecc_caps;
+
+ int curr_trans_type;
+
+ struct dma_chan *dmac;
+
+ u32 nf_clk_rate;
+ /*
+ * Estimated Board delay. The value includes the total
+ * round trip delay for the signals and is used for deciding on values
+ * associated with data read capture.
+ */
+ u32 board_delay;
+
+ struct nand_chip *selected_chip;
+
+ unsigned long assigned_cs;
+ struct list_head chips;
+ u8 bch_metadata_size;
+};
+
+struct cdns_nand_chip {
+ struct cadence_nand_timings timings;
+ struct nand_chip chip;
+ u8 nsels;
+ struct list_head node;
+
+ /*
+ * part of oob area of NAND flash memory page.
+ * This part is available for user to read or write.
+ */
+ u32 avail_oob_size;
+
+ /* Sector size. There are few sectors per mtd->writesize */
+ u32 sector_size;
+ u32 sector_count;
+
+ /* Offset of BBM. */
+ u8 bbm_offs;
+ /* Number of bytes reserved for BBM. */
+ u8 bbm_len;
+ /* ECC strength index. */
+ u8 corr_str_idx;
+
+ u8 cs[];
+};
+
+struct ecc_info {
+ int (*calc_ecc_bytes)(int step_size, int strength);
+ int max_step_size;
+};
+
+static inline struct
+cdns_nand_chip *to_cdns_nand_chip(struct nand_chip *chip)
+{
+ return container_of(chip, struct cdns_nand_chip, chip);
+}
+
+static inline struct
+cdns_nand_ctrl *to_cdns_nand_ctrl(struct nand_controller *controller)
+{
+ return container_of(controller, struct cdns_nand_ctrl, controller);
+}
+
+static bool
+cadence_nand_dma_buf_ok(struct cdns_nand_ctrl *cdns_ctrl, const void *buf,
+ u32 buf_len)
+{
+ u8 data_dma_width = cdns_ctrl->caps2.data_dma_width;
+
+ return buf && virt_addr_valid(buf) &&
+ likely(IS_ALIGNED((uintptr_t)buf, data_dma_width)) &&
+ likely(IS_ALIGNED(buf_len, DMA_DATA_SIZE_ALIGN));
+}
+
+static int cadence_nand_wait_for_value(struct cdns_nand_ctrl *cdns_ctrl,
+ u32 reg_offset, u32 timeout_us,
+ u32 mask, bool is_clear)
+{
+ u32 val;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout(cdns_ctrl->reg + reg_offset,
+ val, !(val & mask) == is_clear,
+ 10, timeout_us);
+
+ if (ret < 0) {
+ dev_err(cdns_ctrl->dev,
+ "Timeout while waiting for reg %x with mask %x is clear %d\n",
+ reg_offset, mask, is_clear);
+ }
+
+ return ret;
+}
+
+static int cadence_nand_set_ecc_enable(struct cdns_nand_ctrl *cdns_ctrl,
+ bool enable)
+{
+ u32 reg;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
+
+ if (enable)
+ reg |= ECC_CONFIG_0_ECC_EN;
+ else
+ reg &= ~ECC_CONFIG_0_ECC_EN;
+
+ writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
+
+ return 0;
+}
+
+static void cadence_nand_set_ecc_strength(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 corr_str_idx)
+{
+ u32 reg;
+
+ if (cdns_ctrl->curr_corr_str_idx == corr_str_idx)
+ return;
+
+ reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
+ reg &= ~ECC_CONFIG_0_CORR_STR;
+ reg |= FIELD_PREP(ECC_CONFIG_0_CORR_STR, corr_str_idx);
+ writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
+
+ cdns_ctrl->curr_corr_str_idx = corr_str_idx;
+}
+
+static int cadence_nand_get_ecc_strength_idx(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 strength)
+{
+ int i, corr_str_idx = -1;
+
+ for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
+ if (cdns_ctrl->ecc_strengths[i] == strength) {
+ corr_str_idx = i;
+ break;
+ }
+ }
+
+ return corr_str_idx;
+}
+
+static int cadence_nand_set_skip_marker_val(struct cdns_nand_ctrl *cdns_ctrl,
+ u16 marker_value)
+{
+ u32 reg;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF);
+ reg &= ~SKIP_BYTES_MARKER_VALUE;
+ reg |= FIELD_PREP(SKIP_BYTES_MARKER_VALUE,
+ marker_value);
+
+ writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF);
+
+ return 0;
+}
+
+static int cadence_nand_set_skip_bytes_conf(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 num_of_bytes,
+ u32 offset_value,
+ int enable)
+{
+ u32 reg, skip_bytes_offset;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ if (!enable) {
+ num_of_bytes = 0;
+ offset_value = 0;
+ }
+
+ reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF);
+ reg &= ~SKIP_BYTES_NUM_OF_BYTES;
+ reg |= FIELD_PREP(SKIP_BYTES_NUM_OF_BYTES,
+ num_of_bytes);
+ skip_bytes_offset = FIELD_PREP(SKIP_BYTES_OFFSET_VALUE,
+ offset_value);
+
+ writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF);
+ writel_relaxed(skip_bytes_offset, cdns_ctrl->reg + SKIP_BYTES_OFFSET);
+
+ return 0;
+}
+
+/* Functions enables/disables hardware detection of erased data */
+static void cadence_nand_set_erase_detection(struct cdns_nand_ctrl *cdns_ctrl,
+ bool enable,
+ u8 bitflips_threshold)
+{
+ u32 reg;
+
+ reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
+
+ if (enable)
+ reg |= ECC_CONFIG_0_ERASE_DET_EN;
+ else
+ reg &= ~ECC_CONFIG_0_ERASE_DET_EN;
+
+ writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
+ writel_relaxed(bitflips_threshold, cdns_ctrl->reg + ECC_CONFIG_1);
+}
+
+static int cadence_nand_set_access_width16(struct cdns_nand_ctrl *cdns_ctrl,
+ bool bit_bus16)
+{
+ u32 reg;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ reg = readl_relaxed(cdns_ctrl->reg + COMMON_SET);
+
+ if (!bit_bus16)
+ reg &= ~COMMON_SET_DEVICE_16BIT;
+ else
+ reg |= COMMON_SET_DEVICE_16BIT;
+ writel_relaxed(reg, cdns_ctrl->reg + COMMON_SET);
+
+ return 0;
+}
+
+static void
+cadence_nand_clear_interrupt(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_irq_status *irq_status)
+{
+ writel_relaxed(irq_status->status, cdns_ctrl->reg + INTR_STATUS);
+ writel_relaxed(irq_status->trd_status,
+ cdns_ctrl->reg + TRD_COMP_INT_STATUS);
+ writel_relaxed(irq_status->trd_error,
+ cdns_ctrl->reg + TRD_ERR_INT_STATUS);
+}
+
+static void
+cadence_nand_read_int_status(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_irq_status *irq_status)
+{
+ irq_status->status = readl_relaxed(cdns_ctrl->reg + INTR_STATUS);
+ irq_status->trd_status = readl_relaxed(cdns_ctrl->reg
+ + TRD_COMP_INT_STATUS);
+ irq_status->trd_error = readl_relaxed(cdns_ctrl->reg
+ + TRD_ERR_INT_STATUS);
+}
+
+static u32 irq_detected(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_irq_status *irq_status)
+{
+ cadence_nand_read_int_status(cdns_ctrl, irq_status);
+
+ return irq_status->status || irq_status->trd_status ||
+ irq_status->trd_error;
+}
+
+static void cadence_nand_reset_irq(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cdns_ctrl->irq_lock, flags);
+ memset(&cdns_ctrl->irq_status, 0, sizeof(cdns_ctrl->irq_status));
+ memset(&cdns_ctrl->irq_mask, 0, sizeof(cdns_ctrl->irq_mask));
+ spin_unlock_irqrestore(&cdns_ctrl->irq_lock, flags);
+}
+
+/*
+ * This is the interrupt service routine. It handles all interrupts
+ * sent to this device.
+ */
+static irqreturn_t cadence_nand_isr(int irq, void *dev_id)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = dev_id;
+ struct cadence_nand_irq_status irq_status;
+ irqreturn_t result = IRQ_NONE;
+
+ spin_lock(&cdns_ctrl->irq_lock);
+
+ if (irq_detected(cdns_ctrl, &irq_status)) {
+ /* Handle interrupt. */
+ /* First acknowledge it. */
+ cadence_nand_clear_interrupt(cdns_ctrl, &irq_status);
+ /* Status in the device context for someone to read. */
+ cdns_ctrl->irq_status.status |= irq_status.status;
+ cdns_ctrl->irq_status.trd_status |= irq_status.trd_status;
+ cdns_ctrl->irq_status.trd_error |= irq_status.trd_error;
+ /* Notify anyone who cares that it happened. */
+ complete(&cdns_ctrl->complete);
+ /* Tell the OS that we've handled this. */
+ result = IRQ_HANDLED;
+ }
+ spin_unlock(&cdns_ctrl->irq_lock);
+
+ return result;
+}
+
+static void cadence_nand_set_irq_mask(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_irq_status *irq_mask)
+{
+ writel_relaxed(INTR_ENABLE_INTR_EN | irq_mask->status,
+ cdns_ctrl->reg + INTR_ENABLE);
+
+ writel_relaxed(irq_mask->trd_error,
+ cdns_ctrl->reg + TRD_ERR_INT_STATUS_EN);
+}
+
+static void
+cadence_nand_wait_for_irq(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_irq_status *irq_mask,
+ struct cadence_nand_irq_status *irq_status)
+{
+ unsigned long timeout = msecs_to_jiffies(10000);
+ unsigned long time_left;
+
+ time_left = wait_for_completion_timeout(&cdns_ctrl->complete,
+ timeout);
+
+ *irq_status = cdns_ctrl->irq_status;
+ if (time_left == 0) {
+ /* Timeout error. */
+ dev_err(cdns_ctrl->dev, "timeout occurred:\n");
+ dev_err(cdns_ctrl->dev, "\tstatus = 0x%x, mask = 0x%x\n",
+ irq_status->status, irq_mask->status);
+ dev_err(cdns_ctrl->dev,
+ "\ttrd_status = 0x%x, trd_status mask = 0x%x\n",
+ irq_status->trd_status, irq_mask->trd_status);
+ dev_err(cdns_ctrl->dev,
+ "\t trd_error = 0x%x, trd_error mask = 0x%x\n",
+ irq_status->trd_error, irq_mask->trd_error);
+ }
+}
+
+/* Execute generic command on NAND controller. */
+static int cadence_nand_generic_cmd_send(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 chip_nr,
+ u64 mini_ctrl_cmd)
+{
+ u32 mini_ctrl_cmd_l, mini_ctrl_cmd_h, reg;
+
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_CS, chip_nr);
+ mini_ctrl_cmd_l = mini_ctrl_cmd & 0xFFFFFFFF;
+ mini_ctrl_cmd_h = mini_ctrl_cmd >> 32;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ cadence_nand_reset_irq(cdns_ctrl);
+
+ writel_relaxed(mini_ctrl_cmd_l, cdns_ctrl->reg + CMD_REG2);
+ writel_relaxed(mini_ctrl_cmd_h, cdns_ctrl->reg + CMD_REG3);
+
+ /* Select generic command. */
+ reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_GEN);
+ /* Thread number. */
+ reg |= FIELD_PREP(CMD_REG0_TN, 0);
+
+ /* Issue command. */
+ writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0);
+
+ return 0;
+}
+
+/* Wait for data on slave DMA interface. */
+static int cadence_nand_wait_on_sdma(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 *out_sdma_trd,
+ u32 *out_sdma_size)
+{
+ struct cadence_nand_irq_status irq_mask, irq_status;
+
+ irq_mask.trd_status = 0;
+ irq_mask.trd_error = 0;
+ irq_mask.status = INTR_STATUS_SDMA_TRIGG
+ | INTR_STATUS_SDMA_ERR
+ | INTR_STATUS_UNSUPP_CMD;
+
+ cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask);
+ cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status);
+ if (irq_status.status == 0) {
+ dev_err(cdns_ctrl->dev, "Timeout while waiting for SDMA\n");
+ return -ETIMEDOUT;
+ }
+
+ if (irq_status.status & INTR_STATUS_SDMA_TRIGG) {
+ *out_sdma_size = readl_relaxed(cdns_ctrl->reg + SDMA_SIZE);
+ *out_sdma_trd = readl_relaxed(cdns_ctrl->reg + SDMA_TRD_NUM);
+ *out_sdma_trd =
+ FIELD_GET(SDMA_TRD_NUM_SDMA_TRD, *out_sdma_trd);
+ } else {
+ dev_err(cdns_ctrl->dev, "SDMA error - irq_status %x\n",
+ irq_status.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void cadence_nand_get_caps(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ u32 reg;
+
+ reg = readl_relaxed(cdns_ctrl->reg + CTRL_FEATURES);
+
+ cdns_ctrl->caps2.max_banks = 1 << FIELD_GET(CTRL_FEATURES_N_BANKS, reg);
+
+ if (FIELD_GET(CTRL_FEATURES_DMA_DWITH64, reg))
+ cdns_ctrl->caps2.data_dma_width = 8;
+ else
+ cdns_ctrl->caps2.data_dma_width = 4;
+
+ if (reg & CTRL_FEATURES_CONTROL_DATA)
+ cdns_ctrl->caps2.data_control_supp = true;
+
+ if (reg & (CTRL_FEATURES_NVDDR_2_3
+ | CTRL_FEATURES_NVDDR))
+ cdns_ctrl->caps2.is_phy_type_dll = true;
+}
+
+/* Prepare CDMA descriptor. */
+static void
+cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl,
+ char nf_mem, u32 flash_ptr, dma_addr_t mem_ptr,
+ dma_addr_t ctrl_data_ptr, u16 ctype)
+{
+ struct cadence_nand_cdma_desc *cdma_desc = cdns_ctrl->cdma_desc;
+
+ memset(cdma_desc, 0, sizeof(struct cadence_nand_cdma_desc));
+
+ /* Set fields for one descriptor. */
+ cdma_desc->flash_pointer = flash_ptr;
+ if (cdns_ctrl->ctrl_rev >= 13)
+ cdma_desc->bank = nf_mem;
+ else
+ cdma_desc->flash_pointer |= (nf_mem << CDMA_CFPTR_MEM_SHIFT);
+
+ cdma_desc->command_flags |= CDMA_CF_DMA_MASTER;
+ cdma_desc->command_flags |= CDMA_CF_INT;
+
+ cdma_desc->memory_pointer = mem_ptr;
+ cdma_desc->status = 0;
+ cdma_desc->sync_flag_pointer = 0;
+ cdma_desc->sync_arguments = 0;
+
+ cdma_desc->command_type = ctype;
+ cdma_desc->ctrl_data_ptr = ctrl_data_ptr;
+}
+
+static u8 cadence_nand_check_desc_error(struct cdns_nand_ctrl *cdns_ctrl,
+ u32 desc_status)
+{
+ if (desc_status & CDMA_CS_ERP)
+ return STAT_ERASED;
+
+ if (desc_status & CDMA_CS_UNCE)
+ return STAT_ECC_UNCORR;
+
+ if (desc_status & CDMA_CS_ERR) {
+ dev_err(cdns_ctrl->dev, ":CDMA desc error flag detected.\n");
+ return STAT_FAIL;
+ }
+
+ if (FIELD_GET(CDMA_CS_MAXERR, desc_status))
+ return STAT_ECC_CORR;
+
+ return STAT_FAIL;
+}
+
+static int cadence_nand_cdma_finish(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ struct cadence_nand_cdma_desc *desc_ptr = cdns_ctrl->cdma_desc;
+ u8 status = STAT_BUSY;
+
+ if (desc_ptr->status & CDMA_CS_FAIL) {
+ status = cadence_nand_check_desc_error(cdns_ctrl,
+ desc_ptr->status);
+ dev_err(cdns_ctrl->dev, ":CDMA error %x\n", desc_ptr->status);
+ } else if (desc_ptr->status & CDMA_CS_COMP) {
+ /* Descriptor finished with no errors. */
+ if (desc_ptr->command_flags & CDMA_CF_CONT) {
+ dev_info(cdns_ctrl->dev, "DMA unsupported flag is set");
+ status = STAT_UNKNOWN;
+ } else {
+ /* Last descriptor. */
+ status = STAT_OK;
+ }
+ }
+
+ return status;
+}
+
+static int cadence_nand_cdma_send(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 thread)
+{
+ u32 reg;
+ int status;
+
+ /* Wait for thread ready. */
+ status = cadence_nand_wait_for_value(cdns_ctrl, TRD_STATUS,
+ 1000000,
+ BIT(thread), true);
+ if (status)
+ return status;
+
+ cadence_nand_reset_irq(cdns_ctrl);
+ reinit_completion(&cdns_ctrl->complete);
+
+ writel_relaxed((u32)cdns_ctrl->dma_cdma_desc,
+ cdns_ctrl->reg + CMD_REG2);
+ writel_relaxed(0, cdns_ctrl->reg + CMD_REG3);
+
+ /* Select CDMA mode. */
+ reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_CDMA);
+ /* Thread number. */
+ reg |= FIELD_PREP(CMD_REG0_TN, thread);
+ /* Issue command. */
+ writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0);
+
+ return 0;
+}
+
+/* Send SDMA command and wait for finish. */
+static u32
+cadence_nand_cdma_send_and_wait(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 thread)
+{
+ struct cadence_nand_irq_status irq_mask, irq_status = {0};
+ int status;
+
+ irq_mask.trd_status = BIT(thread);
+ irq_mask.trd_error = BIT(thread);
+ irq_mask.status = INTR_STATUS_CDMA_TERR;
+
+ cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask);
+
+ status = cadence_nand_cdma_send(cdns_ctrl, thread);
+ if (status)
+ return status;
+
+ cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status);
+
+ if (irq_status.status == 0 && irq_status.trd_status == 0 &&
+ irq_status.trd_error == 0) {
+ dev_err(cdns_ctrl->dev, "CDMA command timeout\n");
+ return -ETIMEDOUT;
+ }
+ if (irq_status.status & irq_mask.status) {
+ dev_err(cdns_ctrl->dev, "CDMA command failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * ECC size depends on configured ECC strength and on maximum supported
+ * ECC step size.
+ */
+static int cadence_nand_calc_ecc_bytes(int max_step_size, int strength)
+{
+ int nbytes = DIV_ROUND_UP(fls(8 * max_step_size) * strength, 8);
+
+ return ALIGN(nbytes, 2);
+}
+
+#define CADENCE_NAND_CALC_ECC_BYTES(max_step_size) \
+ static int \
+ cadence_nand_calc_ecc_bytes_##max_step_size(int step_size, \
+ int strength)\
+ {\
+ return cadence_nand_calc_ecc_bytes(max_step_size, strength);\
+ }
+
+CADENCE_NAND_CALC_ECC_BYTES(256)
+CADENCE_NAND_CALC_ECC_BYTES(512)
+CADENCE_NAND_CALC_ECC_BYTES(1024)
+CADENCE_NAND_CALC_ECC_BYTES(2048)
+CADENCE_NAND_CALC_ECC_BYTES(4096)
+
+/* Function reads BCH capabilities. */
+static int cadence_nand_read_bch_caps(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ struct nand_ecc_caps *ecc_caps = &cdns_ctrl->ecc_caps;
+ int max_step_size = 0, nstrengths, i;
+ u32 reg;
+
+ reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_3);
+ cdns_ctrl->bch_metadata_size = FIELD_GET(BCH_CFG_3_METADATA_SIZE, reg);
+ if (cdns_ctrl->bch_metadata_size < 4) {
+ dev_err(cdns_ctrl->dev,
+ "Driver needs at least 4 bytes of BCH meta data\n");
+ return -EIO;
+ }
+
+ reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_0);
+ cdns_ctrl->ecc_strengths[0] = FIELD_GET(BCH_CFG_0_CORR_CAP_0, reg);
+ cdns_ctrl->ecc_strengths[1] = FIELD_GET(BCH_CFG_0_CORR_CAP_1, reg);
+ cdns_ctrl->ecc_strengths[2] = FIELD_GET(BCH_CFG_0_CORR_CAP_2, reg);
+ cdns_ctrl->ecc_strengths[3] = FIELD_GET(BCH_CFG_0_CORR_CAP_3, reg);
+
+ reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_1);
+ cdns_ctrl->ecc_strengths[4] = FIELD_GET(BCH_CFG_1_CORR_CAP_4, reg);
+ cdns_ctrl->ecc_strengths[5] = FIELD_GET(BCH_CFG_1_CORR_CAP_5, reg);
+ cdns_ctrl->ecc_strengths[6] = FIELD_GET(BCH_CFG_1_CORR_CAP_6, reg);
+ cdns_ctrl->ecc_strengths[7] = FIELD_GET(BCH_CFG_1_CORR_CAP_7, reg);
+
+ reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_2);
+ cdns_ctrl->ecc_stepinfos[0].stepsize =
+ FIELD_GET(BCH_CFG_2_SECT_0, reg);
+
+ cdns_ctrl->ecc_stepinfos[1].stepsize =
+ FIELD_GET(BCH_CFG_2_SECT_1, reg);
+
+ nstrengths = 0;
+ for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
+ if (cdns_ctrl->ecc_strengths[i] != 0)
+ nstrengths++;
+ }
+
+ ecc_caps->nstepinfos = 0;
+ for (i = 0; i < BCH_MAX_NUM_SECTOR_SIZES; i++) {
+ /* ECC strengths are common for all step infos. */
+ cdns_ctrl->ecc_stepinfos[i].nstrengths = nstrengths;
+ cdns_ctrl->ecc_stepinfos[i].strengths =
+ cdns_ctrl->ecc_strengths;
+
+ if (cdns_ctrl->ecc_stepinfos[i].stepsize != 0)
+ ecc_caps->nstepinfos++;
+
+ if (cdns_ctrl->ecc_stepinfos[i].stepsize > max_step_size)
+ max_step_size = cdns_ctrl->ecc_stepinfos[i].stepsize;
+ }
+ ecc_caps->stepinfos = &cdns_ctrl->ecc_stepinfos[0];
+
+ switch (max_step_size) {
+ case 256:
+ ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_256;
+ break;
+ case 512:
+ ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_512;
+ break;
+ case 1024:
+ ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_1024;
+ break;
+ case 2048:
+ ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_2048;
+ break;
+ case 4096:
+ ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_4096;
+ break;
+ default:
+ dev_err(cdns_ctrl->dev,
+ "Unsupported sector size(ecc step size) %d\n",
+ max_step_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* Hardware initialization. */
+static int cadence_nand_hw_init(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ int status;
+ u32 reg;
+
+ status = cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_INIT_COMP, false);
+ if (status)
+ return status;
+
+ reg = readl_relaxed(cdns_ctrl->reg + CTRL_VERSION);
+ cdns_ctrl->ctrl_rev = FIELD_GET(CTRL_VERSION_REV, reg);
+
+ dev_info(cdns_ctrl->dev,
+ "%s: cadence nand controller version reg %x\n",
+ __func__, reg);
+
+ /* Disable cache and multiplane. */
+ writel_relaxed(0, cdns_ctrl->reg + MULTIPLANE_CFG);
+ writel_relaxed(0, cdns_ctrl->reg + CACHE_CFG);
+
+ /* Clear all interrupts. */
+ writel_relaxed(0xFFFFFFFF, cdns_ctrl->reg + INTR_STATUS);
+
+ cadence_nand_get_caps(cdns_ctrl);
+ if (cadence_nand_read_bch_caps(cdns_ctrl))
+ return -EIO;
+
+#ifndef CONFIG_64BIT
+ if (cdns_ctrl->caps2.data_dma_width == 8) {
+ dev_err(cdns_ctrl->dev,
+ "cannot access 64-bit dma on !64-bit architectures");
+ return -EIO;
+ }
+#endif
+
+ /*
+ * Set IO width access to 8.
+ * It is because during SW device discovering width access
+ * is expected to be 8.
+ */
+ status = cadence_nand_set_access_width16(cdns_ctrl, false);
+
+ return status;
+}
+
+#define TT_MAIN_OOB_AREAS 2
+#define TT_RAW_PAGE 3
+#define TT_BBM 4
+#define TT_MAIN_OOB_AREA_EXT 5
+
+/* Prepare size of data to transfer. */
+static void
+cadence_nand_prepare_data_size(struct nand_chip *chip,
+ int transfer_type)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 sec_size = 0, offset = 0, sec_cnt = 1;
+ u32 last_sec_size = cdns_chip->sector_size;
+ u32 data_ctrl_size = 0;
+ u32 reg = 0;
+
+ if (cdns_ctrl->curr_trans_type == transfer_type)
+ return;
+
+ switch (transfer_type) {
+ case TT_MAIN_OOB_AREA_EXT:
+ sec_cnt = cdns_chip->sector_count;
+ sec_size = cdns_chip->sector_size;
+ data_ctrl_size = cdns_chip->avail_oob_size;
+ break;
+ case TT_MAIN_OOB_AREAS:
+ sec_cnt = cdns_chip->sector_count;
+ last_sec_size = cdns_chip->sector_size
+ + cdns_chip->avail_oob_size;
+ sec_size = cdns_chip->sector_size;
+ break;
+ case TT_RAW_PAGE:
+ last_sec_size = mtd->writesize + mtd->oobsize;
+ break;
+ case TT_BBM:
+ offset = mtd->writesize + cdns_chip->bbm_offs;
+ last_sec_size = 8;
+ break;
+ }
+
+ reg = 0;
+ reg |= FIELD_PREP(TRAN_CFG_0_OFFSET, offset);
+ reg |= FIELD_PREP(TRAN_CFG_0_SEC_CNT, sec_cnt);
+ writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_0);
+
+ reg = 0;
+ reg |= FIELD_PREP(TRAN_CFG_1_LAST_SEC_SIZE, last_sec_size);
+ reg |= FIELD_PREP(TRAN_CFG_1_SECTOR_SIZE, sec_size);
+ writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_1);
+
+ if (cdns_ctrl->caps2.data_control_supp) {
+ reg = readl_relaxed(cdns_ctrl->reg + CONTROL_DATA_CTRL);
+ reg &= ~CONTROL_DATA_CTRL_SIZE;
+ reg |= FIELD_PREP(CONTROL_DATA_CTRL_SIZE, data_ctrl_size);
+ writel_relaxed(reg, cdns_ctrl->reg + CONTROL_DATA_CTRL);
+ }
+
+ cdns_ctrl->curr_trans_type = transfer_type;
+}
+
+static int
+cadence_nand_cdma_transfer(struct cdns_nand_ctrl *cdns_ctrl, u8 chip_nr,
+ int page, void *buf, void *ctrl_dat, u32 buf_size,
+ u32 ctrl_dat_size, enum dma_data_direction dir,
+ bool with_ecc)
+{
+ dma_addr_t dma_buf, dma_ctrl_dat = 0;
+ u8 thread_nr = chip_nr;
+ int status;
+ u16 ctype;
+
+ if (dir == DMA_FROM_DEVICE)
+ ctype = CDMA_CT_RD;
+ else
+ ctype = CDMA_CT_WR;
+
+ cadence_nand_set_ecc_enable(cdns_ctrl, with_ecc);
+
+ dma_buf = dma_map_single(cdns_ctrl->dev, buf, buf_size, dir);
+ if (dma_mapping_error(cdns_ctrl->dev, dma_buf)) {
+ dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
+ return -EIO;
+ }
+
+ if (ctrl_dat && ctrl_dat_size) {
+ dma_ctrl_dat = dma_map_single(cdns_ctrl->dev, ctrl_dat,
+ ctrl_dat_size, dir);
+ if (dma_mapping_error(cdns_ctrl->dev, dma_ctrl_dat)) {
+ dma_unmap_single(cdns_ctrl->dev, dma_buf,
+ buf_size, dir);
+ dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
+ return -EIO;
+ }
+ }
+
+ cadence_nand_cdma_desc_prepare(cdns_ctrl, chip_nr, page,
+ dma_buf, dma_ctrl_dat, ctype);
+
+ status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
+
+ dma_unmap_single(cdns_ctrl->dev, dma_buf,
+ buf_size, dir);
+
+ if (ctrl_dat && ctrl_dat_size)
+ dma_unmap_single(cdns_ctrl->dev, dma_ctrl_dat,
+ ctrl_dat_size, dir);
+ if (status)
+ return status;
+
+ return cadence_nand_cdma_finish(cdns_ctrl);
+}
+
+static void cadence_nand_set_timings(struct cdns_nand_ctrl *cdns_ctrl,
+ struct cadence_nand_timings *t)
+{
+ writel_relaxed(t->async_toggle_timings,
+ cdns_ctrl->reg + ASYNC_TOGGLE_TIMINGS);
+ writel_relaxed(t->timings0, cdns_ctrl->reg + TIMINGS0);
+ writel_relaxed(t->timings1, cdns_ctrl->reg + TIMINGS1);
+ writel_relaxed(t->timings2, cdns_ctrl->reg + TIMINGS2);
+
+ if (cdns_ctrl->caps2.is_phy_type_dll)
+ writel_relaxed(t->dll_phy_ctrl, cdns_ctrl->reg + DLL_PHY_CTRL);
+
+ writel_relaxed(t->phy_ctrl, cdns_ctrl->reg + PHY_CTRL);
+
+ if (cdns_ctrl->caps2.is_phy_type_dll) {
+ writel_relaxed(0, cdns_ctrl->reg + PHY_TSEL);
+ writel_relaxed(2, cdns_ctrl->reg + PHY_DQ_TIMING);
+ writel_relaxed(t->phy_dqs_timing,
+ cdns_ctrl->reg + PHY_DQS_TIMING);
+ writel_relaxed(t->phy_gate_lpbk_ctrl,
+ cdns_ctrl->reg + PHY_GATE_LPBK_CTRL);
+ writel_relaxed(PHY_DLL_MASTER_CTRL_BYPASS_MODE,
+ cdns_ctrl->reg + PHY_DLL_MASTER_CTRL);
+ writel_relaxed(0, cdns_ctrl->reg + PHY_DLL_SLAVE_CTRL);
+ }
+}
+
+static int cadence_nand_select_target(struct nand_chip *chip)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+
+ if (chip == cdns_ctrl->selected_chip)
+ return 0;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ cadence_nand_set_timings(cdns_ctrl, &cdns_chip->timings);
+
+ cadence_nand_set_ecc_strength(cdns_ctrl,
+ cdns_chip->corr_str_idx);
+
+ cadence_nand_set_erase_detection(cdns_ctrl, true,
+ chip->ecc.strength);
+
+ cdns_ctrl->curr_trans_type = -1;
+ cdns_ctrl->selected_chip = chip;
+
+ return 0;
+}
+
+static int cadence_nand_erase(struct nand_chip *chip, u32 page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ int status;
+ u8 thread_nr = cdns_chip->cs[chip->cur_cs];
+
+ cadence_nand_cdma_desc_prepare(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, 0, 0,
+ CDMA_CT_ERASE);
+ status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
+ if (status) {
+ dev_err(cdns_ctrl->dev, "erase operation failed\n");
+ return -EIO;
+ }
+
+ status = cadence_nand_cdma_finish(cdns_ctrl);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+static int cadence_nand_read_bbm(struct nand_chip *chip, int page, u8 *buf)
+{
+ int status;
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ cadence_nand_prepare_data_size(chip, TT_BBM);
+
+ cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
+
+ /*
+ * Read only bad block marker from offset
+ * defined by a memory manufacturer.
+ */
+ status = cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, cdns_ctrl->buf, NULL,
+ mtd->oobsize,
+ 0, DMA_FROM_DEVICE, false);
+ if (status) {
+ dev_err(cdns_ctrl->dev, "read BBM failed\n");
+ return -EIO;
+ }
+
+ memcpy(buf + cdns_chip->bbm_offs, cdns_ctrl->buf, cdns_chip->bbm_len);
+
+ return 0;
+}
+
+static int cadence_nand_write_page(struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int status;
+ u16 marker_val = 0xFFFF;
+
+ status = cadence_nand_select_target(chip);
+ if (status)
+ return status;
+
+ cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len,
+ mtd->writesize
+ + cdns_chip->bbm_offs,
+ 1);
+
+ if (oob_required) {
+ marker_val = *(u16 *)(chip->oob_poi
+ + cdns_chip->bbm_offs);
+ } else {
+ /* Set oob data to 0xFF. */
+ memset(cdns_ctrl->buf + mtd->writesize, 0xFF,
+ cdns_chip->avail_oob_size);
+ }
+
+ cadence_nand_set_skip_marker_val(cdns_ctrl, marker_val);
+
+ cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT);
+
+ if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) &&
+ cdns_ctrl->caps2.data_control_supp) {
+ u8 *oob;
+
+ if (oob_required)
+ oob = chip->oob_poi;
+ else
+ oob = cdns_ctrl->buf + mtd->writesize;
+
+ status = cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, (void *)buf, oob,
+ mtd->writesize,
+ cdns_chip->avail_oob_size,
+ DMA_TO_DEVICE, true);
+ if (status) {
+ dev_err(cdns_ctrl->dev, "write page failed\n");
+ return -EIO;
+ }
+
+ return 0;
+ }
+
+ if (oob_required) {
+ /* Transfer the data to the oob area. */
+ memcpy(cdns_ctrl->buf + mtd->writesize, chip->oob_poi,
+ cdns_chip->avail_oob_size);
+ }
+
+ memcpy(cdns_ctrl->buf, buf, mtd->writesize);
+
+ cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS);
+
+ return cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, cdns_ctrl->buf, NULL,
+ mtd->writesize
+ + cdns_chip->avail_oob_size,
+ 0, DMA_TO_DEVICE, true);
+}
+
+static int cadence_nand_write_oob(struct nand_chip *chip, int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ memset(cdns_ctrl->buf, 0xFF, mtd->writesize);
+
+ return cadence_nand_write_page(chip, cdns_ctrl->buf, 1, page);
+}
+
+static int cadence_nand_write_page_raw(struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int writesize = mtd->writesize;
+ int oobsize = mtd->oobsize;
+ int ecc_steps = chip->ecc.steps;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ void *tmp_buf = cdns_ctrl->buf;
+ int oob_skip = cdns_chip->bbm_len;
+ size_t size = writesize + oobsize;
+ int i, pos, len;
+ int status = 0;
+
+ status = cadence_nand_select_target(chip);
+ if (status)
+ return status;
+
+ /*
+ * Fill the buffer with 0xff first except the full page transfer.
+ * This simplifies the logic.
+ */
+ if (!buf || !oob_required)
+ memset(tmp_buf, 0xff, size);
+
+ cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
+
+ /* Arrange the buffer for syndrome payload/ecc layout. */
+ if (buf) {
+ for (i = 0; i < ecc_steps; i++) {
+ pos = i * (ecc_size + ecc_bytes);
+ len = ecc_size;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(tmp_buf + pos, buf, len);
+ buf += len;
+ if (len < ecc_size) {
+ len = ecc_size - len;
+ memcpy(tmp_buf + writesize + oob_skip, buf,
+ len);
+ buf += len;
+ }
+ }
+ }
+
+ if (oob_required) {
+ const u8 *oob = chip->oob_poi;
+ u32 oob_data_offset = (cdns_chip->sector_count - 1) *
+ (cdns_chip->sector_size + chip->ecc.bytes)
+ + cdns_chip->sector_size + oob_skip;
+
+ /* BBM at the beginning of the OOB area. */
+ memcpy(tmp_buf + writesize, oob, oob_skip);
+
+ /* OOB free. */
+ memcpy(tmp_buf + oob_data_offset, oob,
+ cdns_chip->avail_oob_size);
+ oob += cdns_chip->avail_oob_size;
+
+ /* OOB ECC. */
+ for (i = 0; i < ecc_steps; i++) {
+ pos = ecc_size + i * (ecc_size + ecc_bytes);
+ if (i == (ecc_steps - 1))
+ pos += cdns_chip->avail_oob_size;
+
+ len = ecc_bytes;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(tmp_buf + pos, oob, len);
+ oob += len;
+ if (len < ecc_bytes) {
+ len = ecc_bytes - len;
+ memcpy(tmp_buf + writesize + oob_skip, oob,
+ len);
+ oob += len;
+ }
+ }
+ }
+
+ cadence_nand_prepare_data_size(chip, TT_RAW_PAGE);
+
+ return cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, cdns_ctrl->buf, NULL,
+ mtd->writesize +
+ mtd->oobsize,
+ 0, DMA_TO_DEVICE, false);
+}
+
+static int cadence_nand_write_oob_raw(struct nand_chip *chip,
+ int page)
+{
+ return cadence_nand_write_page_raw(chip, NULL, true, page);
+}
+
+static int cadence_nand_read_page(struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int status = 0;
+ int ecc_err_count = 0;
+
+ status = cadence_nand_select_target(chip);
+ if (status)
+ return status;
+
+ cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len,
+ mtd->writesize
+ + cdns_chip->bbm_offs, 1);
+
+ /*
+ * If data buffer can be accessed by DMA and data_control feature
+ * is supported then transfer data and oob directly.
+ */
+ if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) &&
+ cdns_ctrl->caps2.data_control_supp) {
+ u8 *oob;
+
+ if (oob_required)
+ oob = chip->oob_poi;
+ else
+ oob = cdns_ctrl->buf + mtd->writesize;
+
+ cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT);
+ status = cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, buf, oob,
+ mtd->writesize,
+ cdns_chip->avail_oob_size,
+ DMA_FROM_DEVICE, true);
+ /* Otherwise use bounce buffer. */
+ } else {
+ cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS);
+ status = cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, cdns_ctrl->buf,
+ NULL, mtd->writesize
+ + cdns_chip->avail_oob_size,
+ 0, DMA_FROM_DEVICE, true);
+
+ memcpy(buf, cdns_ctrl->buf, mtd->writesize);
+ if (oob_required)
+ memcpy(chip->oob_poi,
+ cdns_ctrl->buf + mtd->writesize,
+ mtd->oobsize);
+ }
+
+ switch (status) {
+ case STAT_ECC_UNCORR:
+ mtd->ecc_stats.failed++;
+ ecc_err_count++;
+ break;
+ case STAT_ECC_CORR:
+ ecc_err_count = FIELD_GET(CDMA_CS_MAXERR,
+ cdns_ctrl->cdma_desc->status);
+ mtd->ecc_stats.corrected += ecc_err_count;
+ break;
+ case STAT_ERASED:
+ case STAT_OK:
+ break;
+ default:
+ dev_err(cdns_ctrl->dev, "read page failed\n");
+ return -EIO;
+ }
+
+ if (oob_required)
+ if (cadence_nand_read_bbm(chip, page, chip->oob_poi))
+ return -EIO;
+
+ return ecc_err_count;
+}
+
+/* Reads OOB data from the device. */
+static int cadence_nand_read_oob(struct nand_chip *chip, int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+
+ return cadence_nand_read_page(chip, cdns_ctrl->buf, 1, page);
+}
+
+static int cadence_nand_read_page_raw(struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int oob_skip = cdns_chip->bbm_len;
+ int writesize = mtd->writesize;
+ int ecc_steps = chip->ecc.steps;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ void *tmp_buf = cdns_ctrl->buf;
+ int i, pos, len;
+ int status = 0;
+
+ status = cadence_nand_select_target(chip);
+ if (status)
+ return status;
+
+ cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
+
+ cadence_nand_prepare_data_size(chip, TT_RAW_PAGE);
+ status = cadence_nand_cdma_transfer(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ page, cdns_ctrl->buf, NULL,
+ mtd->writesize
+ + mtd->oobsize,
+ 0, DMA_FROM_DEVICE, false);
+
+ switch (status) {
+ case STAT_ERASED:
+ case STAT_OK:
+ break;
+ default:
+ dev_err(cdns_ctrl->dev, "read raw page failed\n");
+ return -EIO;
+ }
+
+ /* Arrange the buffer for syndrome payload/ecc layout. */
+ if (buf) {
+ for (i = 0; i < ecc_steps; i++) {
+ pos = i * (ecc_size + ecc_bytes);
+ len = ecc_size;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(buf, tmp_buf + pos, len);
+ buf += len;
+ if (len < ecc_size) {
+ len = ecc_size - len;
+ memcpy(buf, tmp_buf + writesize + oob_skip,
+ len);
+ buf += len;
+ }
+ }
+ }
+
+ if (oob_required) {
+ u8 *oob = chip->oob_poi;
+ u32 oob_data_offset = (cdns_chip->sector_count - 1) *
+ (cdns_chip->sector_size + chip->ecc.bytes)
+ + cdns_chip->sector_size + oob_skip;
+
+ /* OOB free. */
+ memcpy(oob, tmp_buf + oob_data_offset,
+ cdns_chip->avail_oob_size);
+
+ /* BBM at the beginning of the OOB area. */
+ memcpy(oob, tmp_buf + writesize, oob_skip);
+
+ oob += cdns_chip->avail_oob_size;
+
+ /* OOB ECC */
+ for (i = 0; i < ecc_steps; i++) {
+ pos = ecc_size + i * (ecc_size + ecc_bytes);
+ len = ecc_bytes;
+
+ if (i == (ecc_steps - 1))
+ pos += cdns_chip->avail_oob_size;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(oob, tmp_buf + pos, len);
+ oob += len;
+ if (len < ecc_bytes) {
+ len = ecc_bytes - len;
+ memcpy(oob, tmp_buf + writesize + oob_skip,
+ len);
+ oob += len;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int cadence_nand_read_oob_raw(struct nand_chip *chip,
+ int page)
+{
+ return cadence_nand_read_page_raw(chip, NULL, true, page);
+}
+
+static void cadence_nand_slave_dma_transfer_finished(void *data)
+{
+ struct completion *finished = data;
+
+ complete(finished);
+}
+
+static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl,
+ void *buf,
+ dma_addr_t dev_dma, size_t len,
+ enum dma_data_direction dir)
+{
+ DECLARE_COMPLETION_ONSTACK(finished);
+ struct dma_chan *chan;
+ struct dma_device *dma_dev;
+ dma_addr_t src_dma, dst_dma, buf_dma;
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t cookie;
+
+ chan = cdns_ctrl->dmac;
+ dma_dev = chan->device;
+
+ buf_dma = dma_map_single(dma_dev->dev, buf, len, dir);
+ if (dma_mapping_error(dma_dev->dev, buf_dma)) {
+ dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
+ goto err;
+ }
+
+ if (dir == DMA_FROM_DEVICE) {
+ src_dma = cdns_ctrl->io.dma;
+ dst_dma = buf_dma;
+ } else {
+ src_dma = buf_dma;
+ dst_dma = cdns_ctrl->io.dma;
+ }
+
+ tx = dmaengine_prep_dma_memcpy(cdns_ctrl->dmac, dst_dma, src_dma, len,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(cdns_ctrl->dev, "Failed to prepare DMA memcpy\n");
+ goto err_unmap;
+ }
+
+ tx->callback = cadence_nand_slave_dma_transfer_finished;
+ tx->callback_param = &finished;
+
+ cookie = dmaengine_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_err(cdns_ctrl->dev, "Failed to do DMA tx_submit\n");
+ goto err_unmap;
+ }
+
+ dma_async_issue_pending(cdns_ctrl->dmac);
+ wait_for_completion(&finished);
+
+ dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
+
+ return 0;
+
+err_unmap:
+ dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
+
+err:
+ dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n");
+
+ return -EIO;
+}
+
+static int cadence_nand_read_buf(struct cdns_nand_ctrl *cdns_ctrl,
+ u8 *buf, int len)
+{
+ u8 thread_nr = 0;
+ u32 sdma_size;
+ int status;
+
+ /* Wait until slave DMA interface is ready to data transfer. */
+ status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size);
+ if (status)
+ return status;
+
+ if (!cdns_ctrl->caps1->has_dma) {
+ u8 data_dma_width = cdns_ctrl->caps2.data_dma_width;
+
+ int len_in_words = (data_dma_width == 4) ? len >> 2 : len >> 3;
+
+ /* read alingment data */
+ if (data_dma_width == 4)
+ ioread32_rep(cdns_ctrl->io.virt, buf, len_in_words);
+#ifdef CONFIG_64BIT
+ else
+ readsq(cdns_ctrl->io.virt, buf, len_in_words);
+#endif
+
+ if (sdma_size > len) {
+ int read_bytes = (data_dma_width == 4) ?
+ len_in_words << 2 : len_in_words << 3;
+
+ /* read rest data from slave DMA interface if any */
+ if (data_dma_width == 4)
+ ioread32_rep(cdns_ctrl->io.virt,
+ cdns_ctrl->buf,
+ sdma_size / 4 - len_in_words);
+#ifdef CONFIG_64BIT
+ else
+ readsq(cdns_ctrl->io.virt, cdns_ctrl->buf,
+ sdma_size / 8 - len_in_words);
+#endif
+
+ /* copy rest of data */
+ memcpy(buf + read_bytes, cdns_ctrl->buf,
+ len - read_bytes);
+ }
+ return 0;
+ }
+
+ if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) {
+ status = cadence_nand_slave_dma_transfer(cdns_ctrl, buf,
+ cdns_ctrl->io.dma,
+ len, DMA_FROM_DEVICE);
+ if (status == 0)
+ return 0;
+
+ dev_warn(cdns_ctrl->dev,
+ "Slave DMA transfer failed. Try again using bounce buffer.");
+ }
+
+ /* If DMA transfer is not possible or failed then use bounce buffer. */
+ status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf,
+ cdns_ctrl->io.dma,
+ sdma_size, DMA_FROM_DEVICE);
+
+ if (status) {
+ dev_err(cdns_ctrl->dev, "Slave DMA transfer failed");
+ return status;
+ }
+
+ memcpy(buf, cdns_ctrl->buf, len);
+
+ return 0;
+}
+
+static int cadence_nand_write_buf(struct cdns_nand_ctrl *cdns_ctrl,
+ const u8 *buf, int len)
+{
+ u8 thread_nr = 0;
+ u32 sdma_size;
+ int status;
+
+ /* Wait until slave DMA interface is ready to data transfer. */
+ status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size);
+ if (status)
+ return status;
+
+ if (!cdns_ctrl->caps1->has_dma) {
+ u8 data_dma_width = cdns_ctrl->caps2.data_dma_width;
+
+ int len_in_words = (data_dma_width == 4) ? len >> 2 : len >> 3;
+
+ if (data_dma_width == 4)
+ iowrite32_rep(cdns_ctrl->io.virt, buf, len_in_words);
+#ifdef CONFIG_64BIT
+ else
+ writesq(cdns_ctrl->io.virt, buf, len_in_words);
+#endif
+
+ if (sdma_size > len) {
+ int written_bytes = (data_dma_width == 4) ?
+ len_in_words << 2 : len_in_words << 3;
+
+ /* copy rest of data */
+ memcpy(cdns_ctrl->buf, buf + written_bytes,
+ len - written_bytes);
+
+ /* write all expected by nand controller data */
+ if (data_dma_width == 4)
+ iowrite32_rep(cdns_ctrl->io.virt,
+ cdns_ctrl->buf,
+ sdma_size / 4 - len_in_words);
+#ifdef CONFIG_64BIT
+ else
+ writesq(cdns_ctrl->io.virt, cdns_ctrl->buf,
+ sdma_size / 8 - len_in_words);
+#endif
+ }
+
+ return 0;
+ }
+
+ if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) {
+ status = cadence_nand_slave_dma_transfer(cdns_ctrl, (void *)buf,
+ cdns_ctrl->io.dma,
+ len, DMA_TO_DEVICE);
+ if (status == 0)
+ return 0;
+
+ dev_warn(cdns_ctrl->dev,
+ "Slave DMA transfer failed. Try again using bounce buffer.");
+ }
+
+ /* If DMA transfer is not possible or failed then use bounce buffer. */
+ memcpy(cdns_ctrl->buf, buf, len);
+
+ status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf,
+ cdns_ctrl->io.dma,
+ sdma_size, DMA_TO_DEVICE);
+
+ if (status)
+ dev_err(cdns_ctrl->dev, "Slave DMA transfer failed");
+
+ return status;
+}
+
+static int cadence_nand_force_byte_access(struct nand_chip *chip,
+ bool force_8bit)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+
+ /*
+ * Callers of this function do not verify if the NAND is using a 16-bit
+ * an 8-bit bus for normal operations, so we need to take care of that
+ * here by leaving the configuration unchanged if the NAND does not have
+ * the NAND_BUSWIDTH_16 flag set.
+ */
+ if (!(chip->options & NAND_BUSWIDTH_16))
+ return 0;
+
+ return cadence_nand_set_access_width16(cdns_ctrl, !force_8bit);
+}
+
+static int cadence_nand_cmd_opcode(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ const struct nand_op_instr *instr;
+ unsigned int op_id = 0;
+ u64 mini_ctrl_cmd = 0;
+ int ret;
+
+ instr = &subop->instrs[op_id];
+
+ if (instr->delay_ns > 0)
+ mini_ctrl_cmd |= GCMD_LAY_TWB;
+
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
+ GCMD_LAY_INSTR_CMD);
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_CMD,
+ instr->ctx.cmd.opcode);
+
+ ret = cadence_nand_generic_cmd_send(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ mini_ctrl_cmd);
+ if (ret)
+ dev_err(cdns_ctrl->dev, "send cmd %x failed\n",
+ instr->ctx.cmd.opcode);
+
+ return ret;
+}
+
+static int cadence_nand_cmd_address(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ const struct nand_op_instr *instr;
+ unsigned int op_id = 0;
+ u64 mini_ctrl_cmd = 0;
+ unsigned int offset, naddrs;
+ u64 address = 0;
+ const u8 *addrs;
+ int ret;
+ int i;
+
+ instr = &subop->instrs[op_id];
+
+ if (instr->delay_ns > 0)
+ mini_ctrl_cmd |= GCMD_LAY_TWB;
+
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
+ GCMD_LAY_INSTR_ADDR);
+
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+
+ for (i = 0; i < naddrs; i++)
+ address |= (u64)addrs[i] << (8 * i);
+
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR,
+ address);
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR_SIZE,
+ naddrs - 1);
+
+ ret = cadence_nand_generic_cmd_send(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ mini_ctrl_cmd);
+ if (ret)
+ dev_err(cdns_ctrl->dev, "send address %llx failed\n", address);
+
+ return ret;
+}
+
+static int cadence_nand_cmd_erase(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ unsigned int op_id;
+
+ if (subop->instrs[0].ctx.cmd.opcode == NAND_CMD_ERASE1) {
+ int i;
+ const struct nand_op_instr *instr = NULL;
+ unsigned int offset, naddrs;
+ const u8 *addrs;
+ u32 page = 0;
+
+ instr = &subop->instrs[1];
+ offset = nand_subop_get_addr_start_off(subop, 1);
+ naddrs = nand_subop_get_num_addr_cyc(subop, 1);
+ addrs = &instr->ctx.addr.addrs[offset];
+
+ for (i = 0; i < naddrs; i++)
+ page |= (u32)addrs[i] << (8 * i);
+
+ return cadence_nand_erase(chip, page);
+ }
+
+ /*
+ * If it is not an erase operation then handle operation
+ * by calling exec_op function.
+ */
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ int ret;
+ const struct nand_operation nand_op = {
+ .cs = chip->cur_cs,
+ .instrs = &subop->instrs[op_id],
+ .ninstrs = 1};
+ ret = chip->controller->ops->exec_op(chip, &nand_op, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cadence_nand_cmd_data(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ const struct nand_op_instr *instr;
+ unsigned int offset, op_id = 0;
+ u64 mini_ctrl_cmd = 0;
+ int len = 0;
+ int ret;
+
+ instr = &subop->instrs[op_id];
+
+ if (instr->delay_ns > 0)
+ mini_ctrl_cmd |= GCMD_LAY_TWB;
+
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
+ GCMD_LAY_INSTR_DATA);
+
+ if (instr->type == NAND_OP_DATA_OUT_INSTR)
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_DIR,
+ GCMD_DIR_WRITE);
+
+ len = nand_subop_get_data_len(subop, op_id);
+ offset = nand_subop_get_data_start_off(subop, op_id);
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_SECT_CNT, 1);
+ mini_ctrl_cmd |= FIELD_PREP(GCMD_LAST_SIZE, len);
+ if (instr->ctx.data.force_8bit) {
+ ret = cadence_nand_force_byte_access(chip, true);
+ if (ret) {
+ dev_err(cdns_ctrl->dev,
+ "cannot change byte access generic data cmd failed\n");
+ return ret;
+ }
+ }
+
+ ret = cadence_nand_generic_cmd_send(cdns_ctrl,
+ cdns_chip->cs[chip->cur_cs],
+ mini_ctrl_cmd);
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "send generic data cmd failed\n");
+ return ret;
+ }
+
+ if (instr->type == NAND_OP_DATA_IN_INSTR) {
+ void *buf = instr->ctx.data.buf.in + offset;
+
+ ret = cadence_nand_read_buf(cdns_ctrl, buf, len);
+ } else {
+ const void *buf = instr->ctx.data.buf.out + offset;
+
+ ret = cadence_nand_write_buf(cdns_ctrl, buf, len);
+ }
+
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "data transfer failed for generic command\n");
+ return ret;
+ }
+
+ if (instr->ctx.data.force_8bit) {
+ ret = cadence_nand_force_byte_access(chip, false);
+ if (ret) {
+ dev_err(cdns_ctrl->dev,
+ "cannot change byte access generic data cmd failed\n");
+ }
+ }
+
+ return ret;
+}
+
+static int cadence_nand_cmd_waitrdy(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ int status;
+ unsigned int op_id = 0;
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ const struct nand_op_instr *instr = &subop->instrs[op_id];
+ u32 timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
+
+ status = cadence_nand_wait_for_value(cdns_ctrl, RBN_SETINGS,
+ timeout_us,
+ BIT(cdns_chip->cs[chip->cur_cs]),
+ false);
+ return status;
+}
+
+static const struct nand_op_parser cadence_nand_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_erase,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ERASE_ADDRESS_CYC),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_opcode,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_address,
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC)),
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_data,
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_DATA_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_data,
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_DATA_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ cadence_nand_cmd_waitrdy,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false))
+ );
+
+static int cadence_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ if (!check_only) {
+ int status = cadence_nand_select_target(chip);
+
+ if (status)
+ return status;
+ }
+
+ return nand_op_parser_exec_op(chip, &cadence_nand_op_parser, op,
+ check_only);
+}
+
+static int cadence_nand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = cdns_chip->bbm_len;
+ oobregion->length = cdns_chip->avail_oob_size
+ - cdns_chip->bbm_len;
+
+ return 0;
+}
+
+static int cadence_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = cdns_chip->avail_oob_size;
+ oobregion->length = chip->ecc.total;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops cadence_nand_ooblayout_ops = {
+ .free = cadence_nand_ooblayout_free,
+ .ecc = cadence_nand_ooblayout_ecc,
+};
+
+static int calc_cycl(u32 timing, u32 clock)
+{
+ if (timing == 0 || clock == 0)
+ return 0;
+
+ if ((timing % clock) > 0)
+ return timing / clock;
+ else
+ return timing / clock - 1;
+}
+
+/* Calculate max data valid window. */
+static inline u32 calc_tdvw_max(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
+ u32 board_delay_skew_min, u32 ext_mode)
+{
+ if (ext_mode == 0)
+ clk_period /= 2;
+
+ return (trp_cnt + 1) * clk_period + trhoh_min +
+ board_delay_skew_min;
+}
+
+/* Calculate data valid window. */
+static inline u32 calc_tdvw(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
+ u32 trea_max, u32 ext_mode)
+{
+ if (ext_mode == 0)
+ clk_period /= 2;
+
+ return (trp_cnt + 1) * clk_period + trhoh_min - trea_max;
+}
+
+static int
+cadence_nand_setup_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_interface_config *conf)
+{
+ const struct nand_sdr_timings *sdr;
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ struct cadence_nand_timings *t = &cdns_chip->timings;
+ u32 reg;
+ u32 board_delay = cdns_ctrl->board_delay;
+ u32 clk_period = DIV_ROUND_DOWN_ULL(1000000000000ULL,
+ cdns_ctrl->nf_clk_rate);
+ u32 tceh_cnt, tcs_cnt, tadl_cnt, tccs_cnt;
+ u32 tfeat_cnt, trhz_cnt, tvdly_cnt;
+ u32 trhw_cnt, twb_cnt, twh_cnt = 0, twhr_cnt;
+ u32 twp_cnt = 0, trp_cnt = 0, trh_cnt = 0;
+ u32 if_skew = cdns_ctrl->caps1->if_skew;
+ u32 board_delay_skew_min = board_delay - if_skew;
+ u32 board_delay_skew_max = board_delay + if_skew;
+ u32 dqs_sampl_res, phony_dqs_mod;
+ u32 tdvw, tdvw_min, tdvw_max;
+ u32 ext_rd_mode, ext_wr_mode;
+ u32 dll_phy_dqs_timing = 0, phony_dqs_timing = 0, rd_del_sel = 0;
+ u32 sampling_point;
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ memset(t, 0, sizeof(*t));
+ /* Sampling point calculation. */
+
+ if (cdns_ctrl->caps2.is_phy_type_dll)
+ phony_dqs_mod = 2;
+ else
+ phony_dqs_mod = 1;
+
+ dqs_sampl_res = clk_period / phony_dqs_mod;
+
+ tdvw_min = sdr->tREA_max + board_delay_skew_max;
+ /*
+ * The idea of those calculation is to get the optimum value
+ * for tRP and tRH timings. If it is NOT possible to sample data
+ * with optimal tRP/tRH settings, the parameters will be extended.
+ * If clk_period is 50ns (the lowest value) this condition is met
+ * for SDR timing modes 1, 2, 3, 4 and 5.
+ * If clk_period is 20ns the condition is met only for SDR timing
+ * mode 5.
+ */
+ if (sdr->tRC_min <= clk_period &&
+ sdr->tRP_min <= (clk_period / 2) &&
+ sdr->tREH_min <= (clk_period / 2)) {
+ /* Performance mode. */
+ ext_rd_mode = 0;
+ tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
+ sdr->tREA_max, ext_rd_mode);
+ tdvw_max = calc_tdvw_max(trp_cnt, clk_period, sdr->tRHOH_min,
+ board_delay_skew_min,
+ ext_rd_mode);
+ /*
+ * Check if data valid window and sampling point can be found
+ * and is not on the edge (ie. we have hold margin).
+ * If not extend the tRP timings.
+ */
+ if (tdvw > 0) {
+ if (tdvw_max <= tdvw_min ||
+ (tdvw_max % dqs_sampl_res) == 0) {
+ /*
+ * No valid sampling point so the RE pulse need
+ * to be widen widening by half clock cycle.
+ */
+ ext_rd_mode = 1;
+ }
+ } else {
+ /*
+ * There is no valid window
+ * to be able to sample data the tRP need to be widen.
+ * Very safe calculations are performed here.
+ */
+ trp_cnt = (sdr->tREA_max + board_delay_skew_max
+ + dqs_sampl_res) / clk_period;
+ ext_rd_mode = 1;
+ }
+
+ } else {
+ /* Extended read mode. */
+ u32 trh;
+
+ ext_rd_mode = 1;
+ trp_cnt = calc_cycl(sdr->tRP_min, clk_period);
+ trh = sdr->tRC_min - ((trp_cnt + 1) * clk_period);
+ if (sdr->tREH_min >= trh)
+ trh_cnt = calc_cycl(sdr->tREH_min, clk_period);
+ else
+ trh_cnt = calc_cycl(trh, clk_period);
+
+ tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
+ sdr->tREA_max, ext_rd_mode);
+ /*
+ * Check if data valid window and sampling point can be found
+ * or if it is at the edge check if previous is valid
+ * - if not extend the tRP timings.
+ */
+ if (tdvw > 0) {
+ tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
+ sdr->tRHOH_min,
+ board_delay_skew_min,
+ ext_rd_mode);
+
+ if ((((tdvw_max / dqs_sampl_res)
+ * dqs_sampl_res) <= tdvw_min) ||
+ (((tdvw_max % dqs_sampl_res) == 0) &&
+ (((tdvw_max / dqs_sampl_res - 1)
+ * dqs_sampl_res) <= tdvw_min))) {
+ /*
+ * Data valid window width is lower than
+ * sampling resolution and do not hit any
+ * sampling point to be sure the sampling point
+ * will be found the RE low pulse width will be
+ * extended by one clock cycle.
+ */
+ trp_cnt = trp_cnt + 1;
+ }
+ } else {
+ /*
+ * There is no valid window to be able to sample data.
+ * The tRP need to be widen.
+ * Very safe calculations are performed here.
+ */
+ trp_cnt = (sdr->tREA_max + board_delay_skew_max
+ + dqs_sampl_res) / clk_period;
+ }
+ }
+
+ tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
+ sdr->tRHOH_min,
+ board_delay_skew_min, ext_rd_mode);
+
+ if (sdr->tWC_min <= clk_period &&
+ (sdr->tWP_min + if_skew) <= (clk_period / 2) &&
+ (sdr->tWH_min + if_skew) <= (clk_period / 2)) {
+ ext_wr_mode = 0;
+ } else {
+ u32 twh;
+
+ ext_wr_mode = 1;
+ twp_cnt = calc_cycl(sdr->tWP_min + if_skew, clk_period);
+ if ((twp_cnt + 1) * clk_period < (sdr->tALS_min + if_skew))
+ twp_cnt = calc_cycl(sdr->tALS_min + if_skew,
+ clk_period);
+
+ twh = (sdr->tWC_min - (twp_cnt + 1) * clk_period);
+ if (sdr->tWH_min >= twh)
+ twh = sdr->tWH_min;
+
+ twh_cnt = calc_cycl(twh + if_skew, clk_period);
+ }
+
+ reg = FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRH, trh_cnt);
+ reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRP, trp_cnt);
+ reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWH, twh_cnt);
+ reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWP, twp_cnt);
+ t->async_toggle_timings = reg;
+ dev_dbg(cdns_ctrl->dev, "ASYNC_TOGGLE_TIMINGS_SDR\t%x\n", reg);
+
+ tadl_cnt = calc_cycl((sdr->tADL_min + if_skew), clk_period);
+ tccs_cnt = calc_cycl((sdr->tCCS_min + if_skew), clk_period);
+ twhr_cnt = calc_cycl((sdr->tWHR_min + if_skew), clk_period);
+ trhw_cnt = calc_cycl((sdr->tRHW_min + if_skew), clk_period);
+ reg = FIELD_PREP(TIMINGS0_TADL, tadl_cnt);
+
+ /*
+ * If timing exceeds delay field in timing register
+ * then use maximum value.
+ */
+ if (FIELD_FIT(TIMINGS0_TCCS, tccs_cnt))
+ reg |= FIELD_PREP(TIMINGS0_TCCS, tccs_cnt);
+ else
+ reg |= TIMINGS0_TCCS;
+
+ reg |= FIELD_PREP(TIMINGS0_TWHR, twhr_cnt);
+ reg |= FIELD_PREP(TIMINGS0_TRHW, trhw_cnt);
+ t->timings0 = reg;
+ dev_dbg(cdns_ctrl->dev, "TIMINGS0_SDR\t%x\n", reg);
+
+ /* The following is related to single signal so skew is not needed. */
+ trhz_cnt = calc_cycl(sdr->tRHZ_max, clk_period);
+ trhz_cnt = trhz_cnt + 1;
+ twb_cnt = calc_cycl((sdr->tWB_max + board_delay), clk_period);
+ /*
+ * Because of the two stage syncflop the value must be increased by 3
+ * first value is related with sync, second value is related
+ * with output if delay.
+ */
+ twb_cnt = twb_cnt + 3 + 5;
+ /*
+ * The following is related to the we edge of the random data input
+ * sequence so skew is not needed.
+ */
+ tvdly_cnt = calc_cycl(500000 + if_skew, clk_period);
+ reg = FIELD_PREP(TIMINGS1_TRHZ, trhz_cnt);
+ reg |= FIELD_PREP(TIMINGS1_TWB, twb_cnt);
+ reg |= FIELD_PREP(TIMINGS1_TVDLY, tvdly_cnt);
+ t->timings1 = reg;
+ dev_dbg(cdns_ctrl->dev, "TIMINGS1_SDR\t%x\n", reg);
+
+ tfeat_cnt = calc_cycl(sdr->tFEAT_max, clk_period);
+ if (tfeat_cnt < twb_cnt)
+ tfeat_cnt = twb_cnt;
+
+ tceh_cnt = calc_cycl(sdr->tCEH_min, clk_period);
+ tcs_cnt = calc_cycl((sdr->tCS_min + if_skew), clk_period);
+
+ reg = FIELD_PREP(TIMINGS2_TFEAT, tfeat_cnt);
+ reg |= FIELD_PREP(TIMINGS2_CS_HOLD_TIME, tceh_cnt);
+ reg |= FIELD_PREP(TIMINGS2_CS_SETUP_TIME, tcs_cnt);
+ t->timings2 = reg;
+ dev_dbg(cdns_ctrl->dev, "TIMINGS2_SDR\t%x\n", reg);
+
+ if (cdns_ctrl->caps2.is_phy_type_dll) {
+ reg = DLL_PHY_CTRL_DLL_RST_N;
+ if (ext_wr_mode)
+ reg |= DLL_PHY_CTRL_EXTENDED_WR_MODE;
+ if (ext_rd_mode)
+ reg |= DLL_PHY_CTRL_EXTENDED_RD_MODE;
+
+ reg |= FIELD_PREP(DLL_PHY_CTRL_RS_HIGH_WAIT_CNT, 7);
+ reg |= FIELD_PREP(DLL_PHY_CTRL_RS_IDLE_CNT, 7);
+ t->dll_phy_ctrl = reg;
+ dev_dbg(cdns_ctrl->dev, "DLL_PHY_CTRL_SDR\t%x\n", reg);
+ }
+
+ /* Sampling point calculation. */
+ if ((tdvw_max % dqs_sampl_res) > 0)
+ sampling_point = tdvw_max / dqs_sampl_res;
+ else
+ sampling_point = (tdvw_max / dqs_sampl_res - 1);
+
+ if (sampling_point * dqs_sampl_res > tdvw_min) {
+ dll_phy_dqs_timing =
+ FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_OE_END, 4);
+ dll_phy_dqs_timing |= PHY_DQS_TIMING_USE_PHONY_DQS;
+ phony_dqs_timing = sampling_point / phony_dqs_mod;
+
+ if ((sampling_point % 2) > 0) {
+ dll_phy_dqs_timing |= PHY_DQS_TIMING_PHONY_DQS_SEL;
+ if ((tdvw_max % dqs_sampl_res) == 0)
+ /*
+ * Calculation for sampling point at the edge
+ * of data and being odd number.
+ */
+ phony_dqs_timing = (tdvw_max / dqs_sampl_res)
+ / phony_dqs_mod - 1;
+
+ if (!cdns_ctrl->caps2.is_phy_type_dll)
+ phony_dqs_timing--;
+
+ } else {
+ phony_dqs_timing--;
+ }
+ rd_del_sel = phony_dqs_timing + 3;
+ } else {
+ dev_warn(cdns_ctrl->dev,
+ "ERROR : cannot find valid sampling point\n");
+ }
+
+ reg = FIELD_PREP(PHY_CTRL_PHONY_DQS, phony_dqs_timing);
+ if (cdns_ctrl->caps2.is_phy_type_dll)
+ reg |= PHY_CTRL_SDR_DQS;
+ t->phy_ctrl = reg;
+ dev_dbg(cdns_ctrl->dev, "PHY_CTRL_REG_SDR\t%x\n", reg);
+
+ if (cdns_ctrl->caps2.is_phy_type_dll) {
+ dev_dbg(cdns_ctrl->dev, "PHY_TSEL_REG_SDR\t%x\n", 0);
+ dev_dbg(cdns_ctrl->dev, "PHY_DQ_TIMING_REG_SDR\t%x\n", 2);
+ dev_dbg(cdns_ctrl->dev, "PHY_DQS_TIMING_REG_SDR\t%x\n",
+ dll_phy_dqs_timing);
+ t->phy_dqs_timing = dll_phy_dqs_timing;
+
+ reg = FIELD_PREP(PHY_GATE_LPBK_CTRL_RDS, rd_del_sel);
+ dev_dbg(cdns_ctrl->dev, "PHY_GATE_LPBK_CTRL_REG_SDR\t%x\n",
+ reg);
+ t->phy_gate_lpbk_ctrl = reg;
+
+ dev_dbg(cdns_ctrl->dev, "PHY_DLL_MASTER_CTRL_REG_SDR\t%lx\n",
+ PHY_DLL_MASTER_CTRL_BYPASS_MODE);
+ dev_dbg(cdns_ctrl->dev, "PHY_DLL_SLAVE_CTRL_REG_SDR\t%x\n", 0);
+ }
+
+ return 0;
+}
+
+static int cadence_nand_attach_chip(struct nand_chip *chip)
+{
+ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+ struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+ u32 ecc_size;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ if (chip->options & NAND_BUSWIDTH_16) {
+ ret = cadence_nand_set_access_width16(cdns_ctrl, true);
+ if (ret)
+ return ret;
+ }
+
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+
+ cdns_chip->bbm_offs = chip->badblockpos;
+ cdns_chip->bbm_offs &= ~0x01;
+ /* this value should be even number */
+ cdns_chip->bbm_len = 2;
+
+ ret = nand_ecc_choose_conf(chip,
+ &cdns_ctrl->ecc_caps,
+ mtd->oobsize - cdns_chip->bbm_len);
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "ECC configuration failed\n");
+ return ret;
+ }
+
+ dev_dbg(cdns_ctrl->dev,
+ "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
+ chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
+
+ /* Error correction configuration. */
+ cdns_chip->sector_size = chip->ecc.size;
+ cdns_chip->sector_count = mtd->writesize / cdns_chip->sector_size;
+ ecc_size = cdns_chip->sector_count * chip->ecc.bytes;
+
+ cdns_chip->avail_oob_size = mtd->oobsize - ecc_size;
+
+ if (cdns_chip->avail_oob_size > cdns_ctrl->bch_metadata_size)
+ cdns_chip->avail_oob_size = cdns_ctrl->bch_metadata_size;
+
+ if ((cdns_chip->avail_oob_size + cdns_chip->bbm_len + ecc_size)
+ > mtd->oobsize)
+ cdns_chip->avail_oob_size -= 4;
+
+ ret = cadence_nand_get_ecc_strength_idx(cdns_ctrl, chip->ecc.strength);
+ if (ret < 0)
+ return -EINVAL;
+
+ cdns_chip->corr_str_idx = (u8)ret;
+
+ if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+ 1000000,
+ CTRL_STATUS_CTRL_BUSY, true))
+ return -ETIMEDOUT;
+
+ cadence_nand_set_ecc_strength(cdns_ctrl,
+ cdns_chip->corr_str_idx);
+
+ cadence_nand_set_erase_detection(cdns_ctrl, true,
+ chip->ecc.strength);
+
+ /* Override the default read operations. */
+ chip->ecc.read_page = cadence_nand_read_page;
+ chip->ecc.read_page_raw = cadence_nand_read_page_raw;
+ chip->ecc.write_page = cadence_nand_write_page;
+ chip->ecc.write_page_raw = cadence_nand_write_page_raw;
+ chip->ecc.read_oob = cadence_nand_read_oob;
+ chip->ecc.write_oob = cadence_nand_write_oob;
+ chip->ecc.read_oob_raw = cadence_nand_read_oob_raw;
+ chip->ecc.write_oob_raw = cadence_nand_write_oob_raw;
+
+ if ((mtd->writesize + mtd->oobsize) > cdns_ctrl->buf_size)
+ cdns_ctrl->buf_size = mtd->writesize + mtd->oobsize;
+
+ /* Is 32-bit DMA supported? */
+ ret = dma_set_mask(cdns_ctrl->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "no usable DMA configuration\n");
+ return ret;
+ }
+
+ mtd_set_ooblayout(mtd, &cadence_nand_ooblayout_ops);
+
+ return 0;
+}
+
+static const struct nand_controller_ops cadence_nand_controller_ops = {
+ .attach_chip = cadence_nand_attach_chip,
+ .exec_op = cadence_nand_exec_op,
+ .setup_interface = cadence_nand_setup_interface,
+};
+
+static int cadence_nand_chip_init(struct cdns_nand_ctrl *cdns_ctrl,
+ struct device_node *np)
+{
+ struct cdns_nand_chip *cdns_chip;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ int nsels, ret, i;
+ u32 cs;
+
+ nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
+ if (nsels <= 0) {
+ dev_err(cdns_ctrl->dev, "missing/invalid reg property\n");
+ return -EINVAL;
+ }
+
+ /* Allocate the nand chip structure. */
+ cdns_chip = devm_kzalloc(cdns_ctrl->dev, sizeof(*cdns_chip) +
+ (nsels * sizeof(u8)),
+ GFP_KERNEL);
+ if (!cdns_chip) {
+ dev_err(cdns_ctrl->dev, "could not allocate chip structure\n");
+ return -ENOMEM;
+ }
+
+ cdns_chip->nsels = nsels;
+
+ for (i = 0; i < nsels; i++) {
+ /* Retrieve CS id. */
+ ret = of_property_read_u32_index(np, "reg", i, &cs);
+ if (ret) {
+ dev_err(cdns_ctrl->dev,
+ "could not retrieve reg property: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (cs >= cdns_ctrl->caps2.max_banks) {
+ dev_err(cdns_ctrl->dev,
+ "invalid reg value: %u (max CS = %d)\n",
+ cs, cdns_ctrl->caps2.max_banks);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(cs, &cdns_ctrl->assigned_cs)) {
+ dev_err(cdns_ctrl->dev,
+ "CS %d already assigned\n", cs);
+ return -EINVAL;
+ }
+
+ cdns_chip->cs[i] = cs;
+ }
+
+ chip = &cdns_chip->chip;
+ chip->controller = &cdns_ctrl->controller;
+ nand_set_flash_node(chip, np);
+
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = cdns_ctrl->dev;
+
+ /*
+ * Default to HW ECC engine mode. If the nand-ecc-mode property is given
+ * in the DT node, this entry will be overwritten in nand_scan_ident().
+ */
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+
+ ret = nand_scan(chip, cdns_chip->nsels);
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "could not scan the nand chip\n");
+ return ret;
+ }
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(cdns_ctrl->dev,
+ "failed to register mtd device: %d\n", ret);
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ list_add_tail(&cdns_chip->node, &cdns_ctrl->chips);
+
+ return 0;
+}
+
+static void cadence_nand_chips_cleanup(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ struct cdns_nand_chip *entry, *temp;
+ struct nand_chip *chip;
+ int ret;
+
+ list_for_each_entry_safe(entry, temp, &cdns_ctrl->chips, node) {
+ chip = &entry->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&entry->node);
+ }
+}
+
+static int cadence_nand_chips_init(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ struct device_node *np = cdns_ctrl->dev->of_node;
+ struct device_node *nand_np;
+ int max_cs = cdns_ctrl->caps2.max_banks;
+ int nchips, ret;
+
+ nchips = of_get_child_count(np);
+
+ if (nchips > max_cs) {
+ dev_err(cdns_ctrl->dev,
+ "too many NAND chips: %d (max = %d CS)\n",
+ nchips, max_cs);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(np, nand_np) {
+ ret = cadence_nand_chip_init(cdns_ctrl, nand_np);
+ if (ret) {
+ of_node_put(nand_np);
+ cadence_nand_chips_cleanup(cdns_ctrl);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void
+cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl)
+{
+ /* Disable interrupts. */
+ writel_relaxed(INTR_ENABLE_INTR_EN, cdns_ctrl->reg + INTR_ENABLE);
+}
+
+static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ dma_cap_mask_t mask;
+ int ret;
+
+ cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev,
+ sizeof(*cdns_ctrl->cdma_desc),
+ &cdns_ctrl->dma_cdma_desc,
+ GFP_KERNEL);
+ if (!cdns_ctrl->dma_cdma_desc)
+ return -ENOMEM;
+
+ cdns_ctrl->buf_size = SZ_16K;
+ cdns_ctrl->buf = kmalloc(cdns_ctrl->buf_size, GFP_KERNEL);
+ if (!cdns_ctrl->buf) {
+ ret = -ENOMEM;
+ goto free_buf_desc;
+ }
+
+ if (devm_request_irq(cdns_ctrl->dev, cdns_ctrl->irq, cadence_nand_isr,
+ IRQF_SHARED, "cadence-nand-controller",
+ cdns_ctrl)) {
+ dev_err(cdns_ctrl->dev, "Unable to allocate IRQ\n");
+ ret = -ENODEV;
+ goto free_buf;
+ }
+
+ spin_lock_init(&cdns_ctrl->irq_lock);
+ init_completion(&cdns_ctrl->complete);
+
+ ret = cadence_nand_hw_init(cdns_ctrl);
+ if (ret)
+ goto disable_irq;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ if (cdns_ctrl->caps1->has_dma) {
+ cdns_ctrl->dmac = dma_request_channel(mask, NULL, NULL);
+ if (!cdns_ctrl->dmac) {
+ dev_err(cdns_ctrl->dev,
+ "Unable to get a DMA channel\n");
+ ret = -EBUSY;
+ goto disable_irq;
+ }
+ }
+
+ nand_controller_init(&cdns_ctrl->controller);
+ INIT_LIST_HEAD(&cdns_ctrl->chips);
+
+ cdns_ctrl->controller.ops = &cadence_nand_controller_ops;
+ cdns_ctrl->curr_corr_str_idx = 0xFF;
+
+ ret = cadence_nand_chips_init(cdns_ctrl);
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "Failed to register MTD: %d\n",
+ ret);
+ goto dma_release_chnl;
+ }
+
+ kfree(cdns_ctrl->buf);
+ cdns_ctrl->buf = kzalloc(cdns_ctrl->buf_size, GFP_KERNEL);
+ if (!cdns_ctrl->buf) {
+ ret = -ENOMEM;
+ goto dma_release_chnl;
+ }
+
+ return 0;
+
+dma_release_chnl:
+ if (cdns_ctrl->dmac)
+ dma_release_channel(cdns_ctrl->dmac);
+
+disable_irq:
+ cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
+
+free_buf:
+ kfree(cdns_ctrl->buf);
+
+free_buf_desc:
+ dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
+ cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc);
+
+ return ret;
+}
+
+/* Driver exit point. */
+static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl)
+{
+ cadence_nand_chips_cleanup(cdns_ctrl);
+ cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
+ kfree(cdns_ctrl->buf);
+ dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
+ cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc);
+
+ if (cdns_ctrl->dmac)
+ dma_release_channel(cdns_ctrl->dmac);
+}
+
+struct cadence_nand_dt {
+ struct cdns_nand_ctrl cdns_ctrl;
+ struct clk *clk;
+};
+
+static const struct cadence_nand_dt_devdata cadence_nand_default = {
+ .if_skew = 0,
+ .has_dma = 1,
+};
+
+static const struct of_device_id cadence_nand_dt_ids[] = {
+ {
+ .compatible = "cdns,hp-nfc",
+ .data = &cadence_nand_default
+ }, {}
+};
+
+MODULE_DEVICE_TABLE(of, cadence_nand_dt_ids);
+
+static int cadence_nand_dt_probe(struct platform_device *ofdev)
+{
+ struct resource *res;
+ struct cadence_nand_dt *dt;
+ struct cdns_nand_ctrl *cdns_ctrl;
+ int ret;
+ const struct of_device_id *of_id;
+ const struct cadence_nand_dt_devdata *devdata;
+ u32 val;
+
+ of_id = of_match_device(cadence_nand_dt_ids, &ofdev->dev);
+ if (of_id) {
+ ofdev->id_entry = of_id->data;
+ devdata = of_id->data;
+ } else {
+ pr_err("Failed to find the right device id.\n");
+ return -ENOMEM;
+ }
+
+ dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL);
+ if (!dt)
+ return -ENOMEM;
+
+ cdns_ctrl = &dt->cdns_ctrl;
+ cdns_ctrl->caps1 = devdata;
+
+ cdns_ctrl->dev = &ofdev->dev;
+ cdns_ctrl->irq = platform_get_irq(ofdev, 0);
+ if (cdns_ctrl->irq < 0)
+ return cdns_ctrl->irq;
+
+ dev_info(cdns_ctrl->dev, "IRQ: nr %d\n", cdns_ctrl->irq);
+
+ cdns_ctrl->reg = devm_platform_ioremap_resource(ofdev, 0);
+ if (IS_ERR(cdns_ctrl->reg))
+ return PTR_ERR(cdns_ctrl->reg);
+
+ cdns_ctrl->io.virt = devm_platform_get_and_ioremap_resource(ofdev, 1, &res);
+ if (IS_ERR(cdns_ctrl->io.virt))
+ return PTR_ERR(cdns_ctrl->io.virt);
+ cdns_ctrl->io.dma = res->start;
+
+ dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk");
+ if (IS_ERR(dt->clk))
+ return PTR_ERR(dt->clk);
+
+ cdns_ctrl->nf_clk_rate = clk_get_rate(dt->clk);
+
+ ret = of_property_read_u32(ofdev->dev.of_node,
+ "cdns,board-delay-ps", &val);
+ if (ret) {
+ val = 4830;
+ dev_info(cdns_ctrl->dev,
+ "missing cdns,board-delay-ps property, %d was set\n",
+ val);
+ }
+ cdns_ctrl->board_delay = val;
+
+ ret = cadence_nand_init(cdns_ctrl);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(ofdev, dt);
+ return 0;
+}
+
+static void cadence_nand_dt_remove(struct platform_device *ofdev)
+{
+ struct cadence_nand_dt *dt = platform_get_drvdata(ofdev);
+
+ cadence_nand_remove(&dt->cdns_ctrl);
+}
+
+static struct platform_driver cadence_nand_dt_driver = {
+ .probe = cadence_nand_dt_probe,
+ .remove_new = cadence_nand_dt_remove,
+ .driver = {
+ .name = "cadence-nand-controller",
+ .of_match_table = cadence_nand_dt_ids,
+ },
+};
+
+module_platform_driver(cadence_nand_dt_driver);
+
+MODULE_AUTHOR("Piotr Sroka <piotrs@cadence.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Driver for Cadence NAND flash controller");
+
diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c
new file mode 100644
index 0000000000..66385c4fb9
--- /dev/null
+++ b/drivers/mtd/nand/raw/cafe_nand.c
@@ -0,0 +1,892 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for One Laptop Per Child ‘CAFÉ’ controller, aka Marvell 88ALP01
+ *
+ * The data sheet for this device can be found at:
+ * http://wiki.laptop.org/go/Datasheets
+ *
+ * Copyright © 2006 Red Hat, Inc.
+ * Copyright © 2006 David Woodhouse <dwmw2@infradead.org>
+ */
+
+#define DEBUG
+
+#include <linux/device.h>
+#undef DEBUG
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/rslib.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <asm/io.h>
+
+#define CAFE_NAND_CTRL1 0x00
+#define CAFE_NAND_CTRL2 0x04
+#define CAFE_NAND_CTRL3 0x08
+#define CAFE_NAND_STATUS 0x0c
+#define CAFE_NAND_IRQ 0x10
+#define CAFE_NAND_IRQ_MASK 0x14
+#define CAFE_NAND_DATA_LEN 0x18
+#define CAFE_NAND_ADDR1 0x1c
+#define CAFE_NAND_ADDR2 0x20
+#define CAFE_NAND_TIMING1 0x24
+#define CAFE_NAND_TIMING2 0x28
+#define CAFE_NAND_TIMING3 0x2c
+#define CAFE_NAND_NONMEM 0x30
+#define CAFE_NAND_ECC_RESULT 0x3C
+#define CAFE_NAND_DMA_CTRL 0x40
+#define CAFE_NAND_DMA_ADDR0 0x44
+#define CAFE_NAND_DMA_ADDR1 0x48
+#define CAFE_NAND_ECC_SYN01 0x50
+#define CAFE_NAND_ECC_SYN23 0x54
+#define CAFE_NAND_ECC_SYN45 0x58
+#define CAFE_NAND_ECC_SYN67 0x5c
+#define CAFE_NAND_READ_DATA 0x1000
+#define CAFE_NAND_WRITE_DATA 0x2000
+
+#define CAFE_GLOBAL_CTRL 0x3004
+#define CAFE_GLOBAL_IRQ 0x3008
+#define CAFE_GLOBAL_IRQ_MASK 0x300c
+#define CAFE_NAND_RESET 0x3034
+
+/* Missing from the datasheet: bit 19 of CTRL1 sets CE0 vs. CE1 */
+#define CTRL1_CHIPSELECT (1<<19)
+
+struct cafe_priv {
+ struct nand_chip nand;
+ struct pci_dev *pdev;
+ void __iomem *mmio;
+ struct rs_control *rs;
+ uint32_t ctl1;
+ uint32_t ctl2;
+ int datalen;
+ int nr_data;
+ int data_pos;
+ int page_addr;
+ bool usedma;
+ dma_addr_t dmaaddr;
+ unsigned char *dmabuf;
+};
+
+static int usedma = 1;
+module_param(usedma, int, 0644);
+
+static int skipbbt = 0;
+module_param(skipbbt, int, 0644);
+
+static int debug = 0;
+module_param(debug, int, 0644);
+
+static int regdebug = 0;
+module_param(regdebug, int, 0644);
+
+static int checkecc = 1;
+module_param(checkecc, int, 0644);
+
+static unsigned int numtimings;
+static int timing[3];
+module_param_array(timing, int, &numtimings, 0644);
+
+static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
+
+/* Hrm. Why isn't this already conditional on something in the struct device? */
+#define cafe_dev_dbg(dev, args...) do { if (debug) dev_dbg(dev, ##args); } while(0)
+
+/* Make it easier to switch to PIO if we need to */
+#define cafe_readl(cafe, addr) readl((cafe)->mmio + CAFE_##addr)
+#define cafe_writel(cafe, datum, addr) writel(datum, (cafe)->mmio + CAFE_##addr)
+
+static int cafe_device_ready(struct nand_chip *chip)
+{
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+ int result = !!(cafe_readl(cafe, NAND_STATUS) & 0x40000000);
+ uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
+
+ cafe_writel(cafe, irqs, NAND_IRQ);
+
+ cafe_dev_dbg(&cafe->pdev->dev, "NAND device is%s ready, IRQ %x (%x) (%x,%x)\n",
+ result?"":" not", irqs, cafe_readl(cafe, NAND_IRQ),
+ cafe_readl(cafe, GLOBAL_IRQ), cafe_readl(cafe, GLOBAL_IRQ_MASK));
+
+ return result;
+}
+
+
+static void cafe_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
+{
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+
+ if (cafe->usedma)
+ memcpy(cafe->dmabuf + cafe->datalen, buf, len);
+ else
+ memcpy_toio(cafe->mmio + CAFE_NAND_WRITE_DATA + cafe->datalen, buf, len);
+
+ cafe->datalen += len;
+
+ cafe_dev_dbg(&cafe->pdev->dev, "Copy 0x%x bytes to write buffer. datalen 0x%x\n",
+ len, cafe->datalen);
+}
+
+static void cafe_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
+{
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+
+ if (cafe->usedma)
+ memcpy(buf, cafe->dmabuf + cafe->datalen, len);
+ else
+ memcpy_fromio(buf, cafe->mmio + CAFE_NAND_READ_DATA + cafe->datalen, len);
+
+ cafe_dev_dbg(&cafe->pdev->dev, "Copy 0x%x bytes from position 0x%x in read buffer.\n",
+ len, cafe->datalen);
+ cafe->datalen += len;
+}
+
+static uint8_t cafe_read_byte(struct nand_chip *chip)
+{
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+ uint8_t d;
+
+ cafe_read_buf(chip, &d, 1);
+ cafe_dev_dbg(&cafe->pdev->dev, "Read %02x\n", d);
+
+ return d;
+}
+
+static void cafe_nand_cmdfunc(struct nand_chip *chip, unsigned command,
+ int column, int page_addr)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+ int adrbytes = 0;
+ uint32_t ctl1;
+ uint32_t doneint = 0x80000000;
+
+ cafe_dev_dbg(&cafe->pdev->dev, "cmdfunc %02x, 0x%x, 0x%x\n",
+ command, column, page_addr);
+
+ if (command == NAND_CMD_ERASE2 || command == NAND_CMD_PAGEPROG) {
+ /* Second half of a command we already calculated */
+ cafe_writel(cafe, cafe->ctl2 | 0x100 | command, NAND_CTRL2);
+ ctl1 = cafe->ctl1;
+ cafe->ctl2 &= ~(1<<30);
+ cafe_dev_dbg(&cafe->pdev->dev, "Continue command, ctl1 %08x, #data %d\n",
+ cafe->ctl1, cafe->nr_data);
+ goto do_command;
+ }
+ /* Reset ECC engine */
+ cafe_writel(cafe, 0, NAND_CTRL2);
+
+ /* Emulate NAND_CMD_READOOB on large-page chips */
+ if (mtd->writesize > 512 &&
+ command == NAND_CMD_READOOB) {
+ column += mtd->writesize;
+ command = NAND_CMD_READ0;
+ }
+
+ /* FIXME: Do we need to send read command before sending data
+ for small-page chips, to position the buffer correctly? */
+
+ if (column != -1) {
+ cafe_writel(cafe, column, NAND_ADDR1);
+ adrbytes = 2;
+ if (page_addr != -1)
+ goto write_adr2;
+ } else if (page_addr != -1) {
+ cafe_writel(cafe, page_addr & 0xffff, NAND_ADDR1);
+ page_addr >>= 16;
+ write_adr2:
+ cafe_writel(cafe, page_addr, NAND_ADDR2);
+ adrbytes += 2;
+ if (mtd->size > mtd->writesize << 16)
+ adrbytes++;
+ }
+
+ cafe->data_pos = cafe->datalen = 0;
+
+ /* Set command valid bit, mask in the chip select bit */
+ ctl1 = 0x80000000 | command | (cafe->ctl1 & CTRL1_CHIPSELECT);
+
+ /* Set RD or WR bits as appropriate */
+ if (command == NAND_CMD_READID || command == NAND_CMD_STATUS) {
+ ctl1 |= (1<<26); /* rd */
+ /* Always 5 bytes, for now */
+ cafe->datalen = 4;
+ /* And one address cycle -- even for STATUS, since the controller doesn't work without */
+ adrbytes = 1;
+ } else if (command == NAND_CMD_READ0 || command == NAND_CMD_READ1 ||
+ command == NAND_CMD_READOOB || command == NAND_CMD_RNDOUT) {
+ ctl1 |= 1<<26; /* rd */
+ /* For now, assume just read to end of page */
+ cafe->datalen = mtd->writesize + mtd->oobsize - column;
+ } else if (command == NAND_CMD_SEQIN)
+ ctl1 |= 1<<25; /* wr */
+
+ /* Set number of address bytes */
+ if (adrbytes)
+ ctl1 |= ((adrbytes-1)|8) << 27;
+
+ if (command == NAND_CMD_SEQIN || command == NAND_CMD_ERASE1) {
+ /* Ignore the first command of a pair; the hardware
+ deals with them both at once, later */
+ cafe->ctl1 = ctl1;
+ cafe_dev_dbg(&cafe->pdev->dev, "Setup for delayed command, ctl1 %08x, dlen %x\n",
+ cafe->ctl1, cafe->datalen);
+ return;
+ }
+ /* RNDOUT and READ0 commands need a following byte */
+ if (command == NAND_CMD_RNDOUT)
+ cafe_writel(cafe, cafe->ctl2 | 0x100 | NAND_CMD_RNDOUTSTART, NAND_CTRL2);
+ else if (command == NAND_CMD_READ0 && mtd->writesize > 512)
+ cafe_writel(cafe, cafe->ctl2 | 0x100 | NAND_CMD_READSTART, NAND_CTRL2);
+
+ do_command:
+ cafe_dev_dbg(&cafe->pdev->dev, "dlen %x, ctl1 %x, ctl2 %x\n",
+ cafe->datalen, ctl1, cafe_readl(cafe, NAND_CTRL2));
+
+ /* NB: The datasheet lies -- we really should be subtracting 1 here */
+ cafe_writel(cafe, cafe->datalen, NAND_DATA_LEN);
+ cafe_writel(cafe, 0x90000000, NAND_IRQ);
+ if (cafe->usedma && (ctl1 & (3<<25))) {
+ uint32_t dmactl = 0xc0000000 + cafe->datalen;
+ /* If WR or RD bits set, set up DMA */
+ if (ctl1 & (1<<26)) {
+ /* It's a read */
+ dmactl |= (1<<29);
+ /* ... so it's done when the DMA is done, not just
+ the command. */
+ doneint = 0x10000000;
+ }
+ cafe_writel(cafe, dmactl, NAND_DMA_CTRL);
+ }
+ cafe->datalen = 0;
+
+ if (unlikely(regdebug)) {
+ int i;
+ printk("About to write command %08x to register 0\n", ctl1);
+ for (i=4; i< 0x5c; i+=4)
+ printk("Register %x: %08x\n", i, readl(cafe->mmio + i));
+ }
+
+ cafe_writel(cafe, ctl1, NAND_CTRL1);
+ /* Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine. */
+ ndelay(100);
+
+ if (1) {
+ int c;
+ uint32_t irqs;
+
+ for (c = 500000; c != 0; c--) {
+ irqs = cafe_readl(cafe, NAND_IRQ);
+ if (irqs & doneint)
+ break;
+ udelay(1);
+ if (!(c % 100000))
+ cafe_dev_dbg(&cafe->pdev->dev, "Wait for ready, IRQ %x\n", irqs);
+ cpu_relax();
+ }
+ cafe_writel(cafe, doneint, NAND_IRQ);
+ cafe_dev_dbg(&cafe->pdev->dev, "Command %x completed after %d usec, irqs %x (%x)\n",
+ command, 500000-c, irqs, cafe_readl(cafe, NAND_IRQ));
+ }
+
+ WARN_ON(cafe->ctl2 & (1<<30));
+
+ switch (command) {
+
+ case NAND_CMD_CACHEDPROG:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_RNDIN:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_RNDOUT:
+ cafe_writel(cafe, cafe->ctl2, NAND_CTRL2);
+ return;
+ }
+ nand_wait_ready(chip);
+ cafe_writel(cafe, cafe->ctl2, NAND_CTRL2);
+}
+
+static void cafe_select_chip(struct nand_chip *chip, int chipnr)
+{
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+
+ cafe_dev_dbg(&cafe->pdev->dev, "select_chip %d\n", chipnr);
+
+ /* Mask the appropriate bit into the stored value of ctl1
+ which will be used by cafe_nand_cmdfunc() */
+ if (chipnr)
+ cafe->ctl1 |= CTRL1_CHIPSELECT;
+ else
+ cafe->ctl1 &= ~CTRL1_CHIPSELECT;
+}
+
+static irqreturn_t cafe_nand_interrupt(int irq, void *id)
+{
+ struct mtd_info *mtd = id;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+ uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
+ cafe_writel(cafe, irqs & ~0x90000000, NAND_IRQ);
+ if (!irqs)
+ return IRQ_NONE;
+
+ cafe_dev_dbg(&cafe->pdev->dev, "irq, bits %x (%x)\n", irqs, cafe_readl(cafe, NAND_IRQ));
+ return IRQ_HANDLED;
+}
+
+static int cafe_nand_write_oob(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
+}
+
+/* Don't use -- use nand_read_oob_std for now */
+static int cafe_nand_read_oob(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+}
+/**
+ * cafe_nand_read_page - [REPLACEABLE] hardware ecc syndrome based page read
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller expects OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * The hw generator calculates the error syndrome automatically. Therefore
+ * we need a special oob layout and handling.
+ */
+static int cafe_nand_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+ unsigned int max_bitflips = 0;
+
+ cafe_dev_dbg(&cafe->pdev->dev, "ECC result %08x SYN1,2 %08x\n",
+ cafe_readl(cafe, NAND_ECC_RESULT),
+ cafe_readl(cafe, NAND_ECC_SYN01));
+
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
+ chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ if (checkecc && cafe_readl(cafe, NAND_ECC_RESULT) & (1<<18)) {
+ unsigned short syn[8], pat[4];
+ int pos[4];
+ u8 *oob = chip->oob_poi;
+ int i, n;
+
+ for (i=0; i<8; i+=2) {
+ uint32_t tmp = cafe_readl(cafe, NAND_ECC_SYN01 + (i*2));
+
+ syn[i] = cafe->rs->codec->index_of[tmp & 0xfff];
+ syn[i+1] = cafe->rs->codec->index_of[(tmp >> 16) & 0xfff];
+ }
+
+ n = decode_rs16(cafe->rs, NULL, NULL, 1367, syn, 0, pos, 0,
+ pat);
+
+ for (i = 0; i < n; i++) {
+ int p = pos[i];
+
+ /* The 12-bit symbols are mapped to bytes here */
+
+ if (p > 1374) {
+ /* out of range */
+ n = -1374;
+ } else if (p == 0) {
+ /* high four bits do not correspond to data */
+ if (pat[i] > 0xff)
+ n = -2048;
+ else
+ buf[0] ^= pat[i];
+ } else if (p == 1365) {
+ buf[2047] ^= pat[i] >> 4;
+ oob[0] ^= pat[i] << 4;
+ } else if (p > 1365) {
+ if ((p & 1) == 1) {
+ oob[3*p/2 - 2048] ^= pat[i] >> 4;
+ oob[3*p/2 - 2047] ^= pat[i] << 4;
+ } else {
+ oob[3*p/2 - 2049] ^= pat[i] >> 8;
+ oob[3*p/2 - 2048] ^= pat[i];
+ }
+ } else if ((p & 1) == 1) {
+ buf[3*p/2] ^= pat[i] >> 4;
+ buf[3*p/2 + 1] ^= pat[i] << 4;
+ } else {
+ buf[3*p/2 - 1] ^= pat[i] >> 8;
+ buf[3*p/2] ^= pat[i];
+ }
+ }
+
+ if (n < 0) {
+ dev_dbg(&cafe->pdev->dev, "Failed to correct ECC at %08x\n",
+ cafe_readl(cafe, NAND_ADDR2) * 2048);
+ for (i = 0; i < 0x5c; i += 4)
+ printk("Register %x: %08x\n", i, readl(cafe->mmio + i));
+ mtd->ecc_stats.failed++;
+ } else {
+ dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", n);
+ mtd->ecc_stats.corrected += n;
+ max_bitflips = max_t(unsigned int, max_bitflips, n);
+ }
+ }
+
+ return max_bitflips;
+}
+
+static int cafe_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 0;
+ oobregion->length = chip->ecc.total;
+
+ return 0;
+}
+
+static int cafe_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = chip->ecc.total;
+ oobregion->length = mtd->oobsize - chip->ecc.total;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops cafe_ooblayout_ops = {
+ .ecc = cafe_ooblayout_ecc,
+ .free = cafe_ooblayout_free,
+};
+
+/* Ick. The BBT code really ought to be able to work this bit out
+ for itself from the above, at least for the 2KiB case */
+static uint8_t cafe_bbt_pattern_2048[] = { 'B', 'b', 't', '0' };
+static uint8_t cafe_mirror_pattern_2048[] = { '1', 't', 'b', 'B' };
+
+static uint8_t cafe_bbt_pattern_512[] = { 0xBB };
+static uint8_t cafe_mirror_pattern_512[] = { 0xBC };
+
+
+static struct nand_bbt_descr cafe_bbt_main_descr_2048 = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 14,
+ .len = 4,
+ .veroffs = 18,
+ .maxblocks = 4,
+ .pattern = cafe_bbt_pattern_2048
+};
+
+static struct nand_bbt_descr cafe_bbt_mirror_descr_2048 = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 14,
+ .len = 4,
+ .veroffs = 18,
+ .maxblocks = 4,
+ .pattern = cafe_mirror_pattern_2048
+};
+
+static struct nand_bbt_descr cafe_bbt_main_descr_512 = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 14,
+ .len = 1,
+ .veroffs = 15,
+ .maxblocks = 4,
+ .pattern = cafe_bbt_pattern_512
+};
+
+static struct nand_bbt_descr cafe_bbt_mirror_descr_512 = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 14,
+ .len = 1,
+ .veroffs = 15,
+ .maxblocks = 4,
+ .pattern = cafe_mirror_pattern_512
+};
+
+
+static int cafe_nand_write_page_lowlevel(struct nand_chip *chip,
+ const uint8_t *buf, int oob_required,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
+ chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ /* Set up ECC autogeneration */
+ cafe->ctl2 |= (1<<30);
+
+ return nand_prog_page_end_op(chip);
+}
+
+/* F_2[X]/(X**6+X+1) */
+static unsigned short gf64_mul(u8 a, u8 b)
+{
+ u8 c;
+ unsigned int i;
+
+ c = 0;
+ for (i = 0; i < 6; i++) {
+ if (a & 1)
+ c ^= b;
+ a >>= 1;
+ b <<= 1;
+ if ((b & 0x40) != 0)
+ b ^= 0x43;
+ }
+
+ return c;
+}
+
+/* F_64[X]/(X**2+X+A**-1) with A the generator of F_64[X] */
+static u16 gf4096_mul(u16 a, u16 b)
+{
+ u8 ah, al, bh, bl, ch, cl;
+
+ ah = a >> 6;
+ al = a & 0x3f;
+ bh = b >> 6;
+ bl = b & 0x3f;
+
+ ch = gf64_mul(ah ^ al, bh ^ bl) ^ gf64_mul(al, bl);
+ cl = gf64_mul(gf64_mul(ah, bh), 0x21) ^ gf64_mul(al, bl);
+
+ return (ch << 6) ^ cl;
+}
+
+static int cafe_mul(int x)
+{
+ if (x == 0)
+ return 1;
+ return gf4096_mul(x, 0xe01);
+}
+
+static int cafe_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+ int err = 0;
+
+ cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev, 2112,
+ &cafe->dmaaddr, GFP_KERNEL);
+ if (!cafe->dmabuf)
+ return -ENOMEM;
+
+ /* Set up DMA address */
+ cafe_writel(cafe, lower_32_bits(cafe->dmaaddr), NAND_DMA_ADDR0);
+ cafe_writel(cafe, upper_32_bits(cafe->dmaaddr), NAND_DMA_ADDR1);
+
+ cafe_dev_dbg(&cafe->pdev->dev, "Set DMA address to %x (virt %p)\n",
+ cafe_readl(cafe, NAND_DMA_ADDR0), cafe->dmabuf);
+
+ /* Restore the DMA flag */
+ cafe->usedma = usedma;
+
+ cafe->ctl2 = BIT(27); /* Reed-Solomon ECC */
+ if (mtd->writesize == 2048)
+ cafe->ctl2 |= BIT(29); /* 2KiB page size */
+
+ /* Set up ECC according to the type of chip we found */
+ mtd_set_ooblayout(mtd, &cafe_ooblayout_ops);
+ if (mtd->writesize == 2048) {
+ cafe->nand.bbt_td = &cafe_bbt_main_descr_2048;
+ cafe->nand.bbt_md = &cafe_bbt_mirror_descr_2048;
+ } else if (mtd->writesize == 512) {
+ cafe->nand.bbt_td = &cafe_bbt_main_descr_512;
+ cafe->nand.bbt_md = &cafe_bbt_mirror_descr_512;
+ } else {
+ dev_warn(&cafe->pdev->dev,
+ "Unexpected NAND flash writesize %d. Aborting\n",
+ mtd->writesize);
+ err = -ENOTSUPP;
+ goto out_free_dma;
+ }
+
+ cafe->nand.ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ cafe->nand.ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
+ cafe->nand.ecc.size = mtd->writesize;
+ cafe->nand.ecc.bytes = 14;
+ cafe->nand.ecc.strength = 4;
+ cafe->nand.ecc.write_page = cafe_nand_write_page_lowlevel;
+ cafe->nand.ecc.write_oob = cafe_nand_write_oob;
+ cafe->nand.ecc.read_page = cafe_nand_read_page;
+ cafe->nand.ecc.read_oob = cafe_nand_read_oob;
+
+ return 0;
+
+ out_free_dma:
+ dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
+
+ return err;
+}
+
+static void cafe_nand_detach_chip(struct nand_chip *chip)
+{
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+
+ dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
+}
+
+static const struct nand_controller_ops cafe_nand_controller_ops = {
+ .attach_chip = cafe_nand_attach_chip,
+ .detach_chip = cafe_nand_detach_chip,
+};
+
+static int cafe_nand_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct mtd_info *mtd;
+ struct cafe_priv *cafe;
+ uint32_t ctrl;
+ int err = 0;
+
+ /* Very old versions shared the same PCI ident for all three
+ functions on the chip. Verify the class too... */
+ if ((pdev->class >> 8) != PCI_CLASS_MEMORY_FLASH)
+ return -ENODEV;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ pci_set_master(pdev);
+
+ cafe = kzalloc(sizeof(*cafe), GFP_KERNEL);
+ if (!cafe) {
+ err = -ENOMEM;
+ goto out_disable_device;
+ }
+
+ mtd = nand_to_mtd(&cafe->nand);
+ mtd->dev.parent = &pdev->dev;
+ nand_set_controller_data(&cafe->nand, cafe);
+
+ cafe->pdev = pdev;
+ cafe->mmio = pci_iomap(pdev, 0, 0);
+ if (!cafe->mmio) {
+ dev_warn(&pdev->dev, "failed to iomap\n");
+ err = -ENOMEM;
+ goto out_free_mtd;
+ }
+
+ cafe->rs = init_rs_non_canonical(12, &cafe_mul, 0, 1, 8);
+ if (!cafe->rs) {
+ err = -ENOMEM;
+ goto out_ior;
+ }
+
+ cafe->nand.legacy.cmdfunc = cafe_nand_cmdfunc;
+ cafe->nand.legacy.dev_ready = cafe_device_ready;
+ cafe->nand.legacy.read_byte = cafe_read_byte;
+ cafe->nand.legacy.read_buf = cafe_read_buf;
+ cafe->nand.legacy.write_buf = cafe_write_buf;
+ cafe->nand.legacy.select_chip = cafe_select_chip;
+ cafe->nand.legacy.set_features = nand_get_set_features_notsupp;
+ cafe->nand.legacy.get_features = nand_get_set_features_notsupp;
+
+ cafe->nand.legacy.chip_delay = 0;
+
+ /* Enable the following for a flash based bad block table */
+ cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
+
+ if (skipbbt)
+ cafe->nand.options |= NAND_SKIP_BBTSCAN | NAND_NO_BBM_QUIRK;
+
+ if (numtimings && numtimings != 3) {
+ dev_warn(&cafe->pdev->dev, "%d timing register values ignored; precisely three are required\n", numtimings);
+ }
+
+ if (numtimings == 3) {
+ cafe_dev_dbg(&cafe->pdev->dev, "Using provided timings (%08x %08x %08x)\n",
+ timing[0], timing[1], timing[2]);
+ } else {
+ timing[0] = cafe_readl(cafe, NAND_TIMING1);
+ timing[1] = cafe_readl(cafe, NAND_TIMING2);
+ timing[2] = cafe_readl(cafe, NAND_TIMING3);
+
+ if (timing[0] | timing[1] | timing[2]) {
+ cafe_dev_dbg(&cafe->pdev->dev, "Timing registers already set (%08x %08x %08x)\n",
+ timing[0], timing[1], timing[2]);
+ } else {
+ dev_warn(&cafe->pdev->dev, "Timing registers unset; using most conservative defaults\n");
+ timing[0] = timing[1] = timing[2] = 0xffffffff;
+ }
+ }
+
+ /* Start off by resetting the NAND controller completely */
+ cafe_writel(cafe, 1, NAND_RESET);
+ cafe_writel(cafe, 0, NAND_RESET);
+
+ cafe_writel(cafe, timing[0], NAND_TIMING1);
+ cafe_writel(cafe, timing[1], NAND_TIMING2);
+ cafe_writel(cafe, timing[2], NAND_TIMING3);
+
+ cafe_writel(cafe, 0xffffffff, NAND_IRQ_MASK);
+ err = request_irq(pdev->irq, &cafe_nand_interrupt, IRQF_SHARED,
+ "CAFE NAND", mtd);
+ if (err) {
+ dev_warn(&pdev->dev, "Could not register IRQ %d\n", pdev->irq);
+ goto out_free_rs;
+ }
+
+ /* Disable master reset, enable NAND clock */
+ ctrl = cafe_readl(cafe, GLOBAL_CTRL);
+ ctrl &= 0xffffeff0;
+ ctrl |= 0x00007000;
+ cafe_writel(cafe, ctrl | 0x05, GLOBAL_CTRL);
+ cafe_writel(cafe, ctrl | 0x0a, GLOBAL_CTRL);
+ cafe_writel(cafe, 0, NAND_DMA_CTRL);
+
+ cafe_writel(cafe, 0x7006, GLOBAL_CTRL);
+ cafe_writel(cafe, 0x700a, GLOBAL_CTRL);
+
+ /* Enable NAND IRQ in global IRQ mask register */
+ cafe_writel(cafe, 0x80000007, GLOBAL_IRQ_MASK);
+ cafe_dev_dbg(&cafe->pdev->dev, "Control %x, IRQ mask %x\n",
+ cafe_readl(cafe, GLOBAL_CTRL),
+ cafe_readl(cafe, GLOBAL_IRQ_MASK));
+
+ /* Do not use the DMA during the NAND identification */
+ cafe->usedma = 0;
+
+ /* Scan to find existence of the device */
+ cafe->nand.legacy.dummy_controller.ops = &cafe_nand_controller_ops;
+ err = nand_scan(&cafe->nand, 2);
+ if (err)
+ goto out_irq;
+
+ pci_set_drvdata(pdev, mtd);
+
+ mtd->name = "cafe_nand";
+ err = mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
+ if (err)
+ goto out_cleanup_nand;
+
+ goto out;
+
+ out_cleanup_nand:
+ nand_cleanup(&cafe->nand);
+ out_irq:
+ /* Disable NAND IRQ in global IRQ mask register */
+ cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
+ free_irq(pdev->irq, mtd);
+ out_free_rs:
+ free_rs(cafe->rs);
+ out_ior:
+ pci_iounmap(pdev, cafe->mmio);
+ out_free_mtd:
+ kfree(cafe);
+ out_disable_device:
+ pci_disable_device(pdev);
+ out:
+ return err;
+}
+
+static void cafe_nand_remove(struct pci_dev *pdev)
+{
+ struct mtd_info *mtd = pci_get_drvdata(pdev);
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+ int ret;
+
+ /* Disable NAND IRQ in global IRQ mask register */
+ cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
+ free_irq(pdev->irq, mtd);
+ ret = mtd_device_unregister(mtd);
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ free_rs(cafe->rs);
+ pci_iounmap(pdev, cafe->mmio);
+ dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
+ kfree(cafe);
+ pci_disable_device(pdev);
+}
+
+static const struct pci_device_id cafe_nand_tbl[] = {
+ { PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_88ALP01_NAND,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, cafe_nand_tbl);
+
+static int cafe_nand_resume(struct pci_dev *pdev)
+{
+ uint32_t ctrl;
+ struct mtd_info *mtd = pci_get_drvdata(pdev);
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+
+ /* Start off by resetting the NAND controller completely */
+ cafe_writel(cafe, 1, NAND_RESET);
+ cafe_writel(cafe, 0, NAND_RESET);
+ cafe_writel(cafe, 0xffffffff, NAND_IRQ_MASK);
+
+ /* Restore timing configuration */
+ cafe_writel(cafe, timing[0], NAND_TIMING1);
+ cafe_writel(cafe, timing[1], NAND_TIMING2);
+ cafe_writel(cafe, timing[2], NAND_TIMING3);
+
+ /* Disable master reset, enable NAND clock */
+ ctrl = cafe_readl(cafe, GLOBAL_CTRL);
+ ctrl &= 0xffffeff0;
+ ctrl |= 0x00007000;
+ cafe_writel(cafe, ctrl | 0x05, GLOBAL_CTRL);
+ cafe_writel(cafe, ctrl | 0x0a, GLOBAL_CTRL);
+ cafe_writel(cafe, 0, NAND_DMA_CTRL);
+ cafe_writel(cafe, 0x7006, GLOBAL_CTRL);
+ cafe_writel(cafe, 0x700a, GLOBAL_CTRL);
+
+ /* Set up DMA address */
+ cafe_writel(cafe, cafe->dmaaddr & 0xffffffff, NAND_DMA_ADDR0);
+ if (sizeof(cafe->dmaaddr) > 4)
+ /* Shift in two parts to shut the compiler up */
+ cafe_writel(cafe, (cafe->dmaaddr >> 16) >> 16, NAND_DMA_ADDR1);
+ else
+ cafe_writel(cafe, 0, NAND_DMA_ADDR1);
+
+ /* Enable NAND IRQ in global IRQ mask register */
+ cafe_writel(cafe, 0x80000007, GLOBAL_IRQ_MASK);
+ return 0;
+}
+
+static struct pci_driver cafe_nand_pci_driver = {
+ .name = "CAFÉ NAND",
+ .id_table = cafe_nand_tbl,
+ .probe = cafe_nand_probe,
+ .remove = cafe_nand_remove,
+ .resume = cafe_nand_resume,
+};
+
+module_pci_driver(cafe_nand_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("NAND flash driver for OLPC CAFÉ chip");
diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c
new file mode 100644
index 0000000000..f0a15717cf
--- /dev/null
+++ b/drivers/mtd/nand/raw/cs553x_nand.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * (C) 2005, 2006 Red Hat Inc.
+ *
+ * Author: David Woodhouse <dwmw2@infradead.org>
+ * Tom Sylla <tom.sylla@amd.com>
+ *
+ * Overview:
+ * This is a device driver for the NAND flash controller found on
+ * the AMD CS5535/CS5536 companion chipsets for the Geode processor.
+ * mtd-id for command line partitioning is cs553x_nand_cs[0-3]
+ * where 0-3 reflects the chip select for NAND.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/iopoll.h>
+
+#include <asm/msr.h>
+
+#define NR_CS553X_CONTROLLERS 4
+
+#define MSR_DIVIL_GLD_CAP 0x51400000 /* DIVIL capabilitiies */
+#define CAP_CS5535 0x2df000ULL
+#define CAP_CS5536 0x5df500ULL
+
+/* NAND Timing MSRs */
+#define MSR_NANDF_DATA 0x5140001b /* NAND Flash Data Timing MSR */
+#define MSR_NANDF_CTL 0x5140001c /* NAND Flash Control Timing */
+#define MSR_NANDF_RSVD 0x5140001d /* Reserved */
+
+/* NAND BAR MSRs */
+#define MSR_DIVIL_LBAR_FLSH0 0x51400010 /* Flash Chip Select 0 */
+#define MSR_DIVIL_LBAR_FLSH1 0x51400011 /* Flash Chip Select 1 */
+#define MSR_DIVIL_LBAR_FLSH2 0x51400012 /* Flash Chip Select 2 */
+#define MSR_DIVIL_LBAR_FLSH3 0x51400013 /* Flash Chip Select 3 */
+ /* Each made up of... */
+#define FLSH_LBAR_EN (1ULL<<32)
+#define FLSH_NOR_NAND (1ULL<<33) /* 1 for NAND */
+#define FLSH_MEM_IO (1ULL<<34) /* 1 for MMIO */
+ /* I/O BARs have BASE_ADDR in bits 15:4, IO_MASK in 47:36 */
+ /* MMIO BARs have BASE_ADDR in bits 31:12, MEM_MASK in 63:44 */
+
+/* Pin function selection MSR (IDE vs. flash on the IDE pins) */
+#define MSR_DIVIL_BALL_OPTS 0x51400015
+#define PIN_OPT_IDE (1<<0) /* 0 for flash, 1 for IDE */
+
+/* Registers within the NAND flash controller BAR -- memory mapped */
+#define MM_NAND_DATA 0x00 /* 0 to 0x7ff, in fact */
+#define MM_NAND_CTL 0x800 /* Any even address 0x800-0x80e */
+#define MM_NAND_IO 0x801 /* Any odd address 0x801-0x80f */
+#define MM_NAND_STS 0x810
+#define MM_NAND_ECC_LSB 0x811
+#define MM_NAND_ECC_MSB 0x812
+#define MM_NAND_ECC_COL 0x813
+#define MM_NAND_LAC 0x814
+#define MM_NAND_ECC_CTL 0x815
+
+/* Registers within the NAND flash controller BAR -- I/O mapped */
+#define IO_NAND_DATA 0x00 /* 0 to 3, in fact */
+#define IO_NAND_CTL 0x04
+#define IO_NAND_IO 0x05
+#define IO_NAND_STS 0x06
+#define IO_NAND_ECC_CTL 0x08
+#define IO_NAND_ECC_LSB 0x09
+#define IO_NAND_ECC_MSB 0x0a
+#define IO_NAND_ECC_COL 0x0b
+#define IO_NAND_LAC 0x0c
+
+#define CS_NAND_CTL_DIST_EN (1<<4) /* Enable NAND Distract interrupt */
+#define CS_NAND_CTL_RDY_INT_MASK (1<<3) /* Enable RDY/BUSY# interrupt */
+#define CS_NAND_CTL_ALE (1<<2)
+#define CS_NAND_CTL_CLE (1<<1)
+#define CS_NAND_CTL_CE (1<<0) /* Keep low; 1 to reset */
+
+#define CS_NAND_STS_FLASH_RDY (1<<3)
+#define CS_NAND_CTLR_BUSY (1<<2)
+#define CS_NAND_CMD_COMP (1<<1)
+#define CS_NAND_DIST_ST (1<<0)
+
+#define CS_NAND_ECC_PARITY (1<<2)
+#define CS_NAND_ECC_CLRECC (1<<1)
+#define CS_NAND_ECC_ENECC (1<<0)
+
+struct cs553x_nand_controller {
+ struct nand_controller base;
+ struct nand_chip chip;
+ void __iomem *mmio;
+};
+
+static struct cs553x_nand_controller *
+to_cs553x(struct nand_controller *controller)
+{
+ return container_of(controller, struct cs553x_nand_controller, base);
+}
+
+static int cs553x_write_ctrl_byte(struct cs553x_nand_controller *cs553x,
+ u32 ctl, u8 data)
+{
+ u8 status;
+
+ writeb(ctl, cs553x->mmio + MM_NAND_CTL);
+ writeb(data, cs553x->mmio + MM_NAND_IO);
+ return readb_poll_timeout_atomic(cs553x->mmio + MM_NAND_STS, status,
+ !(status & CS_NAND_CTLR_BUSY), 1,
+ 100000);
+}
+
+static void cs553x_data_in(struct cs553x_nand_controller *cs553x, void *buf,
+ unsigned int len)
+{
+ writeb(0, cs553x->mmio + MM_NAND_CTL);
+ while (unlikely(len > 0x800)) {
+ memcpy_fromio(buf, cs553x->mmio, 0x800);
+ buf += 0x800;
+ len -= 0x800;
+ }
+ memcpy_fromio(buf, cs553x->mmio, len);
+}
+
+static void cs553x_data_out(struct cs553x_nand_controller *cs553x,
+ const void *buf, unsigned int len)
+{
+ writeb(0, cs553x->mmio + MM_NAND_CTL);
+ while (unlikely(len > 0x800)) {
+ memcpy_toio(cs553x->mmio, buf, 0x800);
+ buf += 0x800;
+ len -= 0x800;
+ }
+ memcpy_toio(cs553x->mmio, buf, len);
+}
+
+static int cs553x_wait_ready(struct cs553x_nand_controller *cs553x,
+ unsigned int timeout_ms)
+{
+ u8 mask = CS_NAND_CTLR_BUSY | CS_NAND_STS_FLASH_RDY;
+ u8 status;
+
+ return readb_poll_timeout(cs553x->mmio + MM_NAND_STS, status,
+ (status & mask) == CS_NAND_STS_FLASH_RDY, 100,
+ timeout_ms * 1000);
+}
+
+static int cs553x_exec_instr(struct cs553x_nand_controller *cs553x,
+ const struct nand_op_instr *instr)
+{
+ unsigned int i;
+ int ret = 0;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ ret = cs553x_write_ctrl_byte(cs553x, CS_NAND_CTL_CLE,
+ instr->ctx.cmd.opcode);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ ret = cs553x_write_ctrl_byte(cs553x, CS_NAND_CTL_ALE,
+ instr->ctx.addr.addrs[i]);
+ if (ret)
+ break;
+ }
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ cs553x_data_in(cs553x, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ cs553x_data_out(cs553x, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ ret = cs553x_wait_ready(cs553x, instr->ctx.waitrdy.timeout_ms);
+ break;
+ }
+
+ if (instr->delay_ns)
+ ndelay(instr->delay_ns);
+
+ return ret;
+}
+
+static int cs553x_exec_op(struct nand_chip *this,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct cs553x_nand_controller *cs553x = to_cs553x(this->controller);
+ unsigned int i;
+ int ret;
+
+ if (check_only)
+ return true;
+
+ /* De-assert the CE pin */
+ writeb(0, cs553x->mmio + MM_NAND_CTL);
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = cs553x_exec_instr(cs553x, &op->instrs[i]);
+ if (ret)
+ break;
+ }
+
+ /* Re-assert the CE pin. */
+ writeb(CS_NAND_CTL_CE, cs553x->mmio + MM_NAND_CTL);
+
+ return ret;
+}
+
+static void cs_enable_hwecc(struct nand_chip *this, int mode)
+{
+ struct cs553x_nand_controller *cs553x = to_cs553x(this->controller);
+
+ writeb(0x07, cs553x->mmio + MM_NAND_ECC_CTL);
+}
+
+static int cs_calculate_ecc(struct nand_chip *this, const u_char *dat,
+ u_char *ecc_code)
+{
+ struct cs553x_nand_controller *cs553x = to_cs553x(this->controller);
+ uint32_t ecc;
+
+ ecc = readl(cs553x->mmio + MM_NAND_STS);
+
+ ecc_code[1] = ecc >> 8;
+ ecc_code[0] = ecc >> 16;
+ ecc_code[2] = ecc >> 24;
+ return 0;
+}
+
+static struct cs553x_nand_controller *controllers[4];
+
+static int cs553x_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ chip->ecc.hwctl = cs_enable_hwecc;
+ chip->ecc.calculate = cs_calculate_ecc;
+ chip->ecc.correct = rawnand_sw_hamming_correct;
+ chip->ecc.strength = 1;
+
+ return 0;
+}
+
+static const struct nand_controller_ops cs553x_nand_controller_ops = {
+ .exec_op = cs553x_exec_op,
+ .attach_chip = cs553x_attach_chip,
+};
+
+static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
+{
+ struct cs553x_nand_controller *controller;
+ int err = 0;
+ struct nand_chip *this;
+ struct mtd_info *new_mtd;
+
+ pr_notice("Probing CS553x NAND controller CS#%d at %sIO 0x%08lx\n",
+ cs, mmio ? "MM" : "P", adr);
+
+ if (!mmio) {
+ pr_notice("PIO mode not yet implemented for CS553X NAND controller\n");
+ return -ENXIO;
+ }
+
+ /* Allocate memory for MTD device structure and private data */
+ controller = kzalloc(sizeof(*controller), GFP_KERNEL);
+ if (!controller) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ this = &controller->chip;
+ nand_controller_init(&controller->base);
+ controller->base.ops = &cs553x_nand_controller_ops;
+ this->controller = &controller->base;
+ new_mtd = nand_to_mtd(this);
+
+ /* Link the private data with the MTD structure */
+ new_mtd->owner = THIS_MODULE;
+
+ /* map physical address */
+ controller->mmio = ioremap(adr, 4096);
+ if (!controller->mmio) {
+ pr_warn("ioremap cs553x NAND @0x%08lx failed\n", adr);
+ err = -EIO;
+ goto out_mtd;
+ }
+
+ /* Enable the following for a flash based bad block table */
+ this->bbt_options = NAND_BBT_USE_FLASH;
+
+ new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs);
+ if (!new_mtd->name) {
+ err = -ENOMEM;
+ goto out_ior;
+ }
+
+ /* Scan to find existence of the device */
+ err = nand_scan(this, 1);
+ if (err)
+ goto out_free;
+
+ controllers[cs] = controller;
+ goto out;
+
+out_free:
+ kfree(new_mtd->name);
+out_ior:
+ iounmap(controller->mmio);
+out_mtd:
+ kfree(controller);
+out:
+ return err;
+}
+
+static int is_geode(void)
+{
+ /* These are the CPUs which will have a CS553[56] companion chip */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ boot_cpu_data.x86 == 5 &&
+ boot_cpu_data.x86_model == 10)
+ return 1; /* Geode LX */
+
+ if ((boot_cpu_data.x86_vendor == X86_VENDOR_NSC ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX) &&
+ boot_cpu_data.x86 == 5 &&
+ boot_cpu_data.x86_model == 5)
+ return 1; /* Geode GX (née GX2) */
+
+ return 0;
+}
+
+static int __init cs553x_init(void)
+{
+ int err = -ENXIO;
+ int i;
+ uint64_t val;
+
+ /* If the CPU isn't a Geode GX or LX, abort */
+ if (!is_geode())
+ return -ENXIO;
+
+ /* If it doesn't have the CS553[56], abort */
+ rdmsrl(MSR_DIVIL_GLD_CAP, val);
+ val &= ~0xFFULL;
+ if (val != CAP_CS5535 && val != CAP_CS5536)
+ return -ENXIO;
+
+ /* If it doesn't have the NAND controller enabled, abort */
+ rdmsrl(MSR_DIVIL_BALL_OPTS, val);
+ if (val & PIN_OPT_IDE) {
+ pr_info("CS553x NAND controller: Flash I/O not enabled in MSR_DIVIL_BALL_OPTS.\n");
+ return -ENXIO;
+ }
+
+ for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
+ rdmsrl(MSR_DIVIL_LBAR_FLSH0 + i, val);
+
+ if ((val & (FLSH_LBAR_EN|FLSH_NOR_NAND)) == (FLSH_LBAR_EN|FLSH_NOR_NAND))
+ err = cs553x_init_one(i, !!(val & FLSH_MEM_IO), val & 0xFFFFFFFF);
+ }
+
+ /* Register all devices together here. This means we can easily hack it to
+ do mtdconcat etc. if we want to. */
+ for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
+ if (controllers[i]) {
+ /* If any devices registered, return success. Else the last error. */
+ mtd_device_register(nand_to_mtd(&controllers[i]->chip),
+ NULL, 0);
+ err = 0;
+ }
+ }
+
+ return err;
+}
+
+module_init(cs553x_init);
+
+static void __exit cs553x_cleanup(void)
+{
+ int i;
+
+ for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
+ struct cs553x_nand_controller *controller = controllers[i];
+ struct nand_chip *this = &controller->chip;
+ struct mtd_info *mtd = nand_to_mtd(this);
+ int ret;
+
+ if (!mtd)
+ continue;
+
+ /* Release resources, unregister device */
+ ret = mtd_device_unregister(mtd);
+ WARN_ON(ret);
+ nand_cleanup(this);
+ kfree(mtd->name);
+ controllers[i] = NULL;
+
+ /* unmap physical address */
+ iounmap(controller->mmio);
+
+ /* Free the MTD device structure */
+ kfree(controller);
+ }
+}
+
+module_exit(cs553x_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("NAND controller driver for AMD CS5535/CS5536 companion chip");
diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c
new file mode 100644
index 0000000000..e75d81cf8c
--- /dev/null
+++ b/drivers/mtd/nand/raw/davinci_nand.c
@@ -0,0 +1,854 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * davinci_nand.c - NAND Flash Driver for DaVinci family chips
+ *
+ * Copyright © 2006 Texas Instruments.
+ *
+ * Port to 2.6.23 Copyright © 2008 by:
+ * Sander Huijsen <Shuijsen@optelecom-nkf.com>
+ * Troy Kisky <troy.kisky@boundarydevices.com>
+ * Dirk Behme <Dirk.Behme@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+
+#include <linux/platform_data/mtd-davinci.h>
+#include <linux/platform_data/mtd-davinci-aemif.h>
+
+/*
+ * This is a device driver for the NAND flash controller found on the
+ * various DaVinci family chips. It handles up to four SoC chipselects,
+ * and some flavors of secondary chipselect (e.g. based on A12) as used
+ * with multichip packages.
+ *
+ * The 1-bit ECC hardware is supported, as well as the newer 4-bit ECC
+ * available on chips like the DM355 and OMAP-L137 and needed with the
+ * more error-prone MLC NAND chips.
+ *
+ * This driver assumes EM_WAIT connects all the NAND devices' RDY/nBUSY
+ * outputs in a "wire-AND" configuration, with no per-chip signals.
+ */
+struct davinci_nand_info {
+ struct nand_controller controller;
+ struct nand_chip chip;
+
+ struct platform_device *pdev;
+
+ bool is_readmode;
+
+ void __iomem *base;
+ void __iomem *vaddr;
+
+ void __iomem *current_cs;
+
+ uint32_t mask_chipsel;
+ uint32_t mask_ale;
+ uint32_t mask_cle;
+
+ uint32_t core_chipsel;
+
+ struct davinci_aemif_timing *timing;
+};
+
+static DEFINE_SPINLOCK(davinci_nand_lock);
+static bool ecc4_busy;
+
+static inline struct davinci_nand_info *to_davinci_nand(struct mtd_info *mtd)
+{
+ return container_of(mtd_to_nand(mtd), struct davinci_nand_info, chip);
+}
+
+static inline unsigned int davinci_nand_readl(struct davinci_nand_info *info,
+ int offset)
+{
+ return __raw_readl(info->base + offset);
+}
+
+static inline void davinci_nand_writel(struct davinci_nand_info *info,
+ int offset, unsigned long value)
+{
+ __raw_writel(value, info->base + offset);
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * 1-bit hardware ECC ... context maintained for each core chipselect
+ */
+
+static inline uint32_t nand_davinci_readecc_1bit(struct mtd_info *mtd)
+{
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+
+ return davinci_nand_readl(info, NANDF1ECC_OFFSET
+ + 4 * info->core_chipsel);
+}
+
+static void nand_davinci_hwctl_1bit(struct nand_chip *chip, int mode)
+{
+ struct davinci_nand_info *info;
+ uint32_t nandcfr;
+ unsigned long flags;
+
+ info = to_davinci_nand(nand_to_mtd(chip));
+
+ /* Reset ECC hardware */
+ nand_davinci_readecc_1bit(nand_to_mtd(chip));
+
+ spin_lock_irqsave(&davinci_nand_lock, flags);
+
+ /* Restart ECC hardware */
+ nandcfr = davinci_nand_readl(info, NANDFCR_OFFSET);
+ nandcfr |= BIT(8 + info->core_chipsel);
+ davinci_nand_writel(info, NANDFCR_OFFSET, nandcfr);
+
+ spin_unlock_irqrestore(&davinci_nand_lock, flags);
+}
+
+/*
+ * Read hardware ECC value and pack into three bytes
+ */
+static int nand_davinci_calculate_1bit(struct nand_chip *chip,
+ const u_char *dat, u_char *ecc_code)
+{
+ unsigned int ecc_val = nand_davinci_readecc_1bit(nand_to_mtd(chip));
+ unsigned int ecc24 = (ecc_val & 0x0fff) | ((ecc_val & 0x0fff0000) >> 4);
+
+ /* invert so that erased block ecc is correct */
+ ecc24 = ~ecc24;
+ ecc_code[0] = (u_char)(ecc24);
+ ecc_code[1] = (u_char)(ecc24 >> 8);
+ ecc_code[2] = (u_char)(ecc24 >> 16);
+
+ return 0;
+}
+
+static int nand_davinci_correct_1bit(struct nand_chip *chip, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ uint32_t eccNand = read_ecc[0] | (read_ecc[1] << 8) |
+ (read_ecc[2] << 16);
+ uint32_t eccCalc = calc_ecc[0] | (calc_ecc[1] << 8) |
+ (calc_ecc[2] << 16);
+ uint32_t diff = eccCalc ^ eccNand;
+
+ if (diff) {
+ if ((((diff >> 12) ^ diff) & 0xfff) == 0xfff) {
+ /* Correctable error */
+ if ((diff >> (12 + 3)) < chip->ecc.size) {
+ dat[diff >> (12 + 3)] ^= BIT((diff >> 12) & 7);
+ return 1;
+ } else {
+ return -EBADMSG;
+ }
+ } else if (!(diff & (diff - 1))) {
+ /* Single bit ECC error in the ECC itself,
+ * nothing to fix */
+ return 1;
+ } else {
+ /* Uncorrectable error */
+ return -EBADMSG;
+ }
+
+ }
+ return 0;
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * 4-bit hardware ECC ... context maintained over entire AEMIF
+ *
+ * This is a syndrome engine, but we avoid NAND_ECC_PLACEMENT_INTERLEAVED
+ * since that forces use of a problematic "infix OOB" layout.
+ * Among other things, it trashes manufacturer bad block markers.
+ * Also, and specific to this hardware, it ECC-protects the "prepad"
+ * in the OOB ... while having ECC protection for parts of OOB would
+ * seem useful, the current MTD stack sometimes wants to update the
+ * OOB without recomputing ECC.
+ */
+
+static void nand_davinci_hwctl_4bit(struct nand_chip *chip, int mode)
+{
+ struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
+ unsigned long flags;
+ u32 val;
+
+ /* Reset ECC hardware */
+ davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
+
+ spin_lock_irqsave(&davinci_nand_lock, flags);
+
+ /* Start 4-bit ECC calculation for read/write */
+ val = davinci_nand_readl(info, NANDFCR_OFFSET);
+ val &= ~(0x03 << 4);
+ val |= (info->core_chipsel << 4) | BIT(12);
+ davinci_nand_writel(info, NANDFCR_OFFSET, val);
+
+ info->is_readmode = (mode == NAND_ECC_READ);
+
+ spin_unlock_irqrestore(&davinci_nand_lock, flags);
+}
+
+/* Read raw ECC code after writing to NAND. */
+static void
+nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
+{
+ const u32 mask = 0x03ff03ff;
+
+ code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
+ code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
+ code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
+ code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
+}
+
+/* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */
+static int nand_davinci_calculate_4bit(struct nand_chip *chip,
+ const u_char *dat, u_char *ecc_code)
+{
+ struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
+ u32 raw_ecc[4], *p;
+ unsigned i;
+
+ /* After a read, terminate ECC calculation by a dummy read
+ * of some 4-bit ECC register. ECC covers everything that
+ * was read; correct() just uses the hardware state, so
+ * ecc_code is not needed.
+ */
+ if (info->is_readmode) {
+ davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
+ return 0;
+ }
+
+ /* Pack eight raw 10-bit ecc values into ten bytes, making
+ * two passes which each convert four values (in upper and
+ * lower halves of two 32-bit words) into five bytes. The
+ * ROM boot loader uses this same packing scheme.
+ */
+ nand_davinci_readecc_4bit(info, raw_ecc);
+ for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
+ *ecc_code++ = p[0] & 0xff;
+ *ecc_code++ = ((p[0] >> 8) & 0x03) | ((p[0] >> 14) & 0xfc);
+ *ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] << 4) & 0xf0);
+ *ecc_code++ = ((p[1] >> 4) & 0x3f) | ((p[1] >> 10) & 0xc0);
+ *ecc_code++ = (p[1] >> 18) & 0xff;
+ }
+
+ return 0;
+}
+
+/* Correct up to 4 bits in data we just read, using state left in the
+ * hardware plus the ecc_code computed when it was first written.
+ */
+static int nand_davinci_correct_4bit(struct nand_chip *chip, u_char *data,
+ u_char *ecc_code, u_char *null)
+{
+ int i;
+ struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
+ unsigned short ecc10[8];
+ unsigned short *ecc16;
+ u32 syndrome[4];
+ u32 ecc_state;
+ unsigned num_errors, corrected;
+ unsigned long timeo;
+
+ /* Unpack ten bytes into eight 10 bit values. We know we're
+ * little-endian, and use type punning for less shifting/masking.
+ */
+ if (WARN_ON(0x01 & (uintptr_t)ecc_code))
+ return -EINVAL;
+ ecc16 = (unsigned short *)ecc_code;
+
+ ecc10[0] = (ecc16[0] >> 0) & 0x3ff;
+ ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
+ ecc10[2] = (ecc16[1] >> 4) & 0x3ff;
+ ecc10[3] = ((ecc16[1] >> 14) & 0x3) | ((ecc16[2] << 2) & 0x3fc);
+ ecc10[4] = (ecc16[2] >> 8) | ((ecc16[3] << 8) & 0x300);
+ ecc10[5] = (ecc16[3] >> 2) & 0x3ff;
+ ecc10[6] = ((ecc16[3] >> 12) & 0xf) | ((ecc16[4] << 4) & 0x3f0);
+ ecc10[7] = (ecc16[4] >> 6) & 0x3ff;
+
+ /* Tell ECC controller about the expected ECC codes. */
+ for (i = 7; i >= 0; i--)
+ davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
+
+ /* Allow time for syndrome calculation ... then read it.
+ * A syndrome of all zeroes 0 means no detected errors.
+ */
+ davinci_nand_readl(info, NANDFSR_OFFSET);
+ nand_davinci_readecc_4bit(info, syndrome);
+ if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
+ return 0;
+
+ /*
+ * Clear any previous address calculation by doing a dummy read of an
+ * error address register.
+ */
+ davinci_nand_readl(info, NAND_ERR_ADD1_OFFSET);
+
+ /* Start address calculation, and wait for it to complete.
+ * We _could_ start reading more data while this is working,
+ * to speed up the overall page read.
+ */
+ davinci_nand_writel(info, NANDFCR_OFFSET,
+ davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
+
+ /*
+ * ECC_STATE field reads 0x3 (Error correction complete) immediately
+ * after setting the 4BITECC_ADD_CALC_START bit. So if you immediately
+ * begin trying to poll for the state, you may fall right out of your
+ * loop without any of the correction calculations having taken place.
+ * The recommendation from the hardware team is to initially delay as
+ * long as ECC_STATE reads less than 4. After that, ECC HW has entered
+ * correction state.
+ */
+ timeo = jiffies + usecs_to_jiffies(100);
+ do {
+ ecc_state = (davinci_nand_readl(info,
+ NANDFSR_OFFSET) >> 8) & 0x0f;
+ cpu_relax();
+ } while ((ecc_state < 4) && time_before(jiffies, timeo));
+
+ for (;;) {
+ u32 fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
+
+ switch ((fsr >> 8) & 0x0f) {
+ case 0: /* no error, should not happen */
+ davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
+ return 0;
+ case 1: /* five or more errors detected */
+ davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
+ return -EBADMSG;
+ case 2: /* error addresses computed */
+ case 3:
+ num_errors = 1 + ((fsr >> 16) & 0x03);
+ goto correct;
+ default: /* still working on it */
+ cpu_relax();
+ continue;
+ }
+ }
+
+correct:
+ /* correct each error */
+ for (i = 0, corrected = 0; i < num_errors; i++) {
+ int error_address, error_value;
+
+ if (i > 1) {
+ error_address = davinci_nand_readl(info,
+ NAND_ERR_ADD2_OFFSET);
+ error_value = davinci_nand_readl(info,
+ NAND_ERR_ERRVAL2_OFFSET);
+ } else {
+ error_address = davinci_nand_readl(info,
+ NAND_ERR_ADD1_OFFSET);
+ error_value = davinci_nand_readl(info,
+ NAND_ERR_ERRVAL1_OFFSET);
+ }
+
+ if (i & 1) {
+ error_address >>= 16;
+ error_value >>= 16;
+ }
+ error_address &= 0x3ff;
+ error_address = (512 + 7) - error_address;
+
+ if (error_address < 512) {
+ data[error_address] ^= error_value;
+ corrected++;
+ }
+ }
+
+ return corrected;
+}
+
+/*----------------------------------------------------------------------*/
+
+/* An ECC layout for using 4-bit ECC with small-page flash, storing
+ * ten ECC bytes plus the manufacturer's bad block marker byte, and
+ * and not overlapping the default BBT markers.
+ */
+static int hwecc4_ooblayout_small_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 2)
+ return -ERANGE;
+
+ if (!section) {
+ oobregion->offset = 0;
+ oobregion->length = 5;
+ } else if (section == 1) {
+ oobregion->offset = 6;
+ oobregion->length = 2;
+ } else {
+ oobregion->offset = 13;
+ oobregion->length = 3;
+ }
+
+ return 0;
+}
+
+static int hwecc4_ooblayout_small_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 1)
+ return -ERANGE;
+
+ if (!section) {
+ oobregion->offset = 8;
+ oobregion->length = 5;
+ } else {
+ oobregion->offset = 16;
+ oobregion->length = mtd->oobsize - 16;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops hwecc4_small_ooblayout_ops = {
+ .ecc = hwecc4_ooblayout_small_ecc,
+ .free = hwecc4_ooblayout_small_free,
+};
+
+#if defined(CONFIG_OF)
+static const struct of_device_id davinci_nand_of_match[] = {
+ {.compatible = "ti,davinci-nand", },
+ {.compatible = "ti,keystone-nand", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
+
+static struct davinci_nand_pdata
+ *nand_davinci_get_pdata(struct platform_device *pdev)
+{
+ if (!dev_get_platdata(&pdev->dev) && pdev->dev.of_node) {
+ struct davinci_nand_pdata *pdata;
+ const char *mode;
+ u32 prop;
+
+ pdata = devm_kzalloc(&pdev->dev,
+ sizeof(struct davinci_nand_pdata),
+ GFP_KERNEL);
+ pdev->dev.platform_data = pdata;
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-chipselect", &prop))
+ pdata->core_chipsel = prop;
+ else
+ return ERR_PTR(-EINVAL);
+
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-mask-ale", &prop))
+ pdata->mask_ale = prop;
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-mask-cle", &prop))
+ pdata->mask_cle = prop;
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-mask-chipsel", &prop))
+ pdata->mask_chipsel = prop;
+ if (!of_property_read_string(pdev->dev.of_node,
+ "ti,davinci-ecc-mode", &mode)) {
+ if (!strncmp("none", mode, 4))
+ pdata->engine_type = NAND_ECC_ENGINE_TYPE_NONE;
+ if (!strncmp("soft", mode, 4))
+ pdata->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ if (!strncmp("hw", mode, 2))
+ pdata->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ }
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-ecc-bits", &prop))
+ pdata->ecc_bits = prop;
+
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-nand-buswidth", &prop) && prop == 16)
+ pdata->options |= NAND_BUSWIDTH_16;
+
+ if (of_property_read_bool(pdev->dev.of_node,
+ "ti,davinci-nand-use-bbt"))
+ pdata->bbt_options = NAND_BBT_USE_FLASH;
+
+ /*
+ * Since kernel v4.8, this driver has been fixed to enable
+ * use of 4-bit hardware ECC with subpages and verified on
+ * TI's keystone EVMs (K2L, K2HK and K2E).
+ * However, in the interest of not breaking systems using
+ * existing UBI partitions, sub-page writes are not being
+ * (re)enabled. If you want to use subpage writes on Keystone
+ * platforms (i.e. do not have any existing UBI partitions),
+ * then use "ti,davinci-nand" as the compatible in your
+ * device-tree file.
+ */
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "ti,keystone-nand")) {
+ pdata->options |= NAND_NO_SUBPAGE_WRITE;
+ }
+ }
+
+ return dev_get_platdata(&pdev->dev);
+}
+#else
+static struct davinci_nand_pdata
+ *nand_davinci_get_pdata(struct platform_device *pdev)
+{
+ return dev_get_platdata(&pdev->dev);
+}
+#endif
+
+static int davinci_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ struct davinci_nand_pdata *pdata = nand_davinci_get_pdata(info->pdev);
+ int ret = 0;
+
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+
+ /* Use board-specific ECC config */
+ chip->ecc.engine_type = pdata->engine_type;
+ chip->ecc.placement = pdata->ecc_placement;
+
+ switch (chip->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ pdata->ecc_bits = 0;
+ break;
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ pdata->ecc_bits = 0;
+ /*
+ * This driver expects Hamming based ECC when engine_type is set
+ * to NAND_ECC_ENGINE_TYPE_SOFT. Force ecc.algo to
+ * NAND_ECC_ALGO_HAMMING to avoid adding an extra ->ecc_algo
+ * field to davinci_nand_pdata.
+ */
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+ break;
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ if (pdata->ecc_bits == 4) {
+ int chunks = mtd->writesize / 512;
+
+ if (!chunks || mtd->oobsize < 16) {
+ dev_dbg(&info->pdev->dev, "too small\n");
+ return -EINVAL;
+ }
+
+ /*
+ * No sanity checks: CPUs must support this,
+ * and the chips may not use NAND_BUSWIDTH_16.
+ */
+
+ /* No sharing 4-bit hardware between chipselects yet */
+ spin_lock_irq(&davinci_nand_lock);
+ if (ecc4_busy)
+ ret = -EBUSY;
+ else
+ ecc4_busy = true;
+ spin_unlock_irq(&davinci_nand_lock);
+
+ if (ret == -EBUSY)
+ return ret;
+
+ chip->ecc.calculate = nand_davinci_calculate_4bit;
+ chip->ecc.correct = nand_davinci_correct_4bit;
+ chip->ecc.hwctl = nand_davinci_hwctl_4bit;
+ chip->ecc.bytes = 10;
+ chip->ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
+ chip->ecc.algo = NAND_ECC_ALGO_BCH;
+
+ /*
+ * Update ECC layout if needed ... for 1-bit HW ECC, the
+ * default is OK, but it allocates 6 bytes when only 3
+ * are needed (for each 512 bytes). For 4-bit HW ECC,
+ * the default is not usable: 10 bytes needed, not 6.
+ *
+ * For small page chips, preserve the manufacturer's
+ * badblock marking data ... and make sure a flash BBT
+ * table marker fits in the free bytes.
+ */
+ if (chunks == 1) {
+ mtd_set_ooblayout(mtd,
+ &hwecc4_small_ooblayout_ops);
+ } else if (chunks == 4 || chunks == 8) {
+ mtd_set_ooblayout(mtd,
+ nand_get_large_page_ooblayout());
+ chip->ecc.read_page = nand_read_page_hwecc_oob_first;
+ } else {
+ return -EIO;
+ }
+ } else {
+ /* 1bit ecc hamming */
+ chip->ecc.calculate = nand_davinci_calculate_1bit;
+ chip->ecc.correct = nand_davinci_correct_1bit;
+ chip->ecc.hwctl = nand_davinci_hwctl_1bit;
+ chip->ecc.bytes = 3;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+ }
+ chip->ecc.size = 512;
+ chip->ecc.strength = pdata->ecc_bits;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static void nand_davinci_data_in(struct davinci_nand_info *info, void *buf,
+ unsigned int len, bool force_8bit)
+{
+ u32 alignment = ((uintptr_t)buf | len) & 3;
+
+ if (force_8bit || (alignment & 1))
+ ioread8_rep(info->current_cs, buf, len);
+ else if (alignment & 3)
+ ioread16_rep(info->current_cs, buf, len >> 1);
+ else
+ ioread32_rep(info->current_cs, buf, len >> 2);
+}
+
+static void nand_davinci_data_out(struct davinci_nand_info *info,
+ const void *buf, unsigned int len,
+ bool force_8bit)
+{
+ u32 alignment = ((uintptr_t)buf | len) & 3;
+
+ if (force_8bit || (alignment & 1))
+ iowrite8_rep(info->current_cs, buf, len);
+ else if (alignment & 3)
+ iowrite16_rep(info->current_cs, buf, len >> 1);
+ else
+ iowrite32_rep(info->current_cs, buf, len >> 2);
+}
+
+static int davinci_nand_exec_instr(struct davinci_nand_info *info,
+ const struct nand_op_instr *instr)
+{
+ unsigned int i, timeout_us;
+ u32 status;
+ int ret;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ iowrite8(instr->ctx.cmd.opcode,
+ info->current_cs + info->mask_cle);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ iowrite8(instr->ctx.addr.addrs[i],
+ info->current_cs + info->mask_ale);
+ }
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ nand_davinci_data_in(info, instr->ctx.data.buf.in,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ nand_davinci_data_out(info, instr->ctx.data.buf.out,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
+ ret = readl_relaxed_poll_timeout(info->base + NANDFSR_OFFSET,
+ status, status & BIT(0), 100,
+ timeout_us);
+ if (ret)
+ return ret;
+
+ break;
+ }
+
+ if (instr->delay_ns)
+ ndelay(instr->delay_ns);
+
+ return 0;
+}
+
+static int davinci_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
+ unsigned int i;
+
+ if (check_only)
+ return 0;
+
+ info->current_cs = info->vaddr + (op->cs * info->mask_chipsel);
+
+ for (i = 0; i < op->ninstrs; i++) {
+ int ret;
+
+ ret = davinci_nand_exec_instr(info, &op->instrs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops davinci_nand_controller_ops = {
+ .attach_chip = davinci_nand_attach_chip,
+ .exec_op = davinci_nand_exec_op,
+};
+
+static int nand_davinci_probe(struct platform_device *pdev)
+{
+ struct davinci_nand_pdata *pdata;
+ struct davinci_nand_info *info;
+ struct resource *res1;
+ struct resource *res2;
+ void __iomem *vaddr;
+ void __iomem *base;
+ int ret;
+ uint32_t val;
+ struct mtd_info *mtd;
+
+ pdata = nand_davinci_get_pdata(pdev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+
+ /* insist on board-specific configuration */
+ if (!pdata)
+ return -ENODEV;
+
+ /* which external chipselect will we be managing? */
+ if (pdata->core_chipsel > 3)
+ return -ENODEV;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, info);
+
+ res1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res1 || !res2) {
+ dev_err(&pdev->dev, "resource missing\n");
+ return -EINVAL;
+ }
+
+ vaddr = devm_ioremap_resource(&pdev->dev, res1);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ /*
+ * This registers range is used to setup NAND settings. In case with
+ * TI AEMIF driver, the same memory address range is requested already
+ * by AEMIF, so we cannot request it twice, just ioremap.
+ * The AEMIF and NAND drivers not use the same registers in this range.
+ */
+ base = devm_ioremap(&pdev->dev, res2->start, resource_size(res2));
+ if (!base) {
+ dev_err(&pdev->dev, "ioremap failed for resource %pR\n", res2);
+ return -EADDRNOTAVAIL;
+ }
+
+ info->pdev = pdev;
+ info->base = base;
+ info->vaddr = vaddr;
+
+ mtd = nand_to_mtd(&info->chip);
+ mtd->dev.parent = &pdev->dev;
+ nand_set_flash_node(&info->chip, pdev->dev.of_node);
+
+ /* options such as NAND_BBT_USE_FLASH */
+ info->chip.bbt_options = pdata->bbt_options;
+ /* options such as 16-bit widths */
+ info->chip.options = pdata->options;
+ info->chip.bbt_td = pdata->bbt_td;
+ info->chip.bbt_md = pdata->bbt_md;
+ info->timing = pdata->timing;
+
+ info->current_cs = info->vaddr;
+ info->core_chipsel = pdata->core_chipsel;
+ info->mask_chipsel = pdata->mask_chipsel;
+
+ /* use nandboot-capable ALE/CLE masks by default */
+ info->mask_ale = pdata->mask_ale ? : MASK_ALE;
+ info->mask_cle = pdata->mask_cle ? : MASK_CLE;
+
+ spin_lock_irq(&davinci_nand_lock);
+
+ /* put CSxNAND into NAND mode */
+ val = davinci_nand_readl(info, NANDFCR_OFFSET);
+ val |= BIT(info->core_chipsel);
+ davinci_nand_writel(info, NANDFCR_OFFSET, val);
+
+ spin_unlock_irq(&davinci_nand_lock);
+
+ /* Scan to find existence of the device(s) */
+ nand_controller_init(&info->controller);
+ info->controller.ops = &davinci_nand_controller_ops;
+ info->chip.controller = &info->controller;
+ ret = nand_scan(&info->chip, pdata->mask_chipsel ? 2 : 1);
+ if (ret < 0) {
+ dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
+ return ret;
+ }
+
+ if (pdata->parts)
+ ret = mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
+ else
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret < 0)
+ goto err_cleanup_nand;
+
+ val = davinci_nand_readl(info, NRCSR_OFFSET);
+ dev_info(&pdev->dev, "controller rev. %d.%d\n",
+ (val >> 8) & 0xff, val & 0xff);
+
+ return 0;
+
+err_cleanup_nand:
+ nand_cleanup(&info->chip);
+
+ return ret;
+}
+
+static void nand_davinci_remove(struct platform_device *pdev)
+{
+ struct davinci_nand_info *info = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &info->chip;
+ int ret;
+
+ spin_lock_irq(&davinci_nand_lock);
+ if (chip->ecc.placement == NAND_ECC_PLACEMENT_INTERLEAVED)
+ ecc4_busy = false;
+ spin_unlock_irq(&davinci_nand_lock);
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+}
+
+static struct platform_driver nand_davinci_driver = {
+ .probe = nand_davinci_probe,
+ .remove_new = nand_davinci_remove,
+ .driver = {
+ .name = "davinci_nand",
+ .of_match_table = of_match_ptr(davinci_nand_of_match),
+ },
+};
+MODULE_ALIAS("platform:davinci_nand");
+
+module_platform_driver(nand_davinci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Texas Instruments");
+MODULE_DESCRIPTION("Davinci NAND flash driver");
+
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
new file mode 100644
index 0000000000..fa2439cb4d
--- /dev/null
+++ b/drivers/mtd/nand/raw/denali.c
@@ -0,0 +1,1381 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright © 2009-2010, Intel Corporation and its suppliers.
+ *
+ * Copyright (c) 2017-2019 Socionext Inc.
+ * Reworked by Masahiro Yamada <yamada.masahiro@socionext.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "denali.h"
+
+#define DENALI_NAND_NAME "denali-nand"
+
+/* for Indexed Addressing */
+#define DENALI_INDEXED_CTRL 0x00
+#define DENALI_INDEXED_DATA 0x10
+
+#define DENALI_MAP00 (0 << 26) /* direct access to buffer */
+#define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */
+#define DENALI_MAP10 (2 << 26) /* high-level control plane */
+#define DENALI_MAP11 (3 << 26) /* direct controller access */
+
+/* MAP11 access cycle type */
+#define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */
+#define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */
+#define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */
+
+#define DENALI_BANK(denali) ((denali)->active_bank << 24)
+
+#define DENALI_INVALID_BANK -1
+
+static struct denali_chip *to_denali_chip(struct nand_chip *chip)
+{
+ return container_of(chip, struct denali_chip, chip);
+}
+
+static struct denali_controller *to_denali_controller(struct nand_chip *chip)
+{
+ return container_of(chip->controller, struct denali_controller,
+ controller);
+}
+
+/*
+ * Direct Addressing - the slave address forms the control information (command
+ * type, bank, block, and page address). The slave data is the actual data to
+ * be transferred. This mode requires 28 bits of address region allocated.
+ */
+static u32 denali_direct_read(struct denali_controller *denali, u32 addr)
+{
+ return ioread32(denali->host + addr);
+}
+
+static void denali_direct_write(struct denali_controller *denali, u32 addr,
+ u32 data)
+{
+ iowrite32(data, denali->host + addr);
+}
+
+/*
+ * Indexed Addressing - address translation module intervenes in passing the
+ * control information. This mode reduces the required address range. The
+ * control information and transferred data are latched by the registers in
+ * the translation module.
+ */
+static u32 denali_indexed_read(struct denali_controller *denali, u32 addr)
+{
+ iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
+ return ioread32(denali->host + DENALI_INDEXED_DATA);
+}
+
+static void denali_indexed_write(struct denali_controller *denali, u32 addr,
+ u32 data)
+{
+ iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
+ iowrite32(data, denali->host + DENALI_INDEXED_DATA);
+}
+
+static void denali_enable_irq(struct denali_controller *denali)
+{
+ int i;
+
+ for (i = 0; i < denali->nbanks; i++)
+ iowrite32(U32_MAX, denali->reg + INTR_EN(i));
+ iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE);
+}
+
+static void denali_disable_irq(struct denali_controller *denali)
+{
+ int i;
+
+ for (i = 0; i < denali->nbanks; i++)
+ iowrite32(0, denali->reg + INTR_EN(i));
+ iowrite32(0, denali->reg + GLOBAL_INT_ENABLE);
+}
+
+static void denali_clear_irq(struct denali_controller *denali,
+ int bank, u32 irq_status)
+{
+ /* write one to clear bits */
+ iowrite32(irq_status, denali->reg + INTR_STATUS(bank));
+}
+
+static void denali_clear_irq_all(struct denali_controller *denali)
+{
+ int i;
+
+ for (i = 0; i < denali->nbanks; i++)
+ denali_clear_irq(denali, i, U32_MAX);
+}
+
+static irqreturn_t denali_isr(int irq, void *dev_id)
+{
+ struct denali_controller *denali = dev_id;
+ irqreturn_t ret = IRQ_NONE;
+ u32 irq_status;
+ int i;
+
+ spin_lock(&denali->irq_lock);
+
+ for (i = 0; i < denali->nbanks; i++) {
+ irq_status = ioread32(denali->reg + INTR_STATUS(i));
+ if (irq_status)
+ ret = IRQ_HANDLED;
+
+ denali_clear_irq(denali, i, irq_status);
+
+ if (i != denali->active_bank)
+ continue;
+
+ denali->irq_status |= irq_status;
+
+ if (denali->irq_status & denali->irq_mask)
+ complete(&denali->complete);
+ }
+
+ spin_unlock(&denali->irq_lock);
+
+ return ret;
+}
+
+static void denali_reset_irq(struct denali_controller *denali)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&denali->irq_lock, flags);
+ denali->irq_status = 0;
+ denali->irq_mask = 0;
+ spin_unlock_irqrestore(&denali->irq_lock, flags);
+}
+
+static u32 denali_wait_for_irq(struct denali_controller *denali, u32 irq_mask)
+{
+ unsigned long time_left, flags;
+ u32 irq_status;
+
+ spin_lock_irqsave(&denali->irq_lock, flags);
+
+ irq_status = denali->irq_status;
+
+ if (irq_mask & irq_status) {
+ /* return immediately if the IRQ has already happened. */
+ spin_unlock_irqrestore(&denali->irq_lock, flags);
+ return irq_status;
+ }
+
+ denali->irq_mask = irq_mask;
+ reinit_completion(&denali->complete);
+ spin_unlock_irqrestore(&denali->irq_lock, flags);
+
+ time_left = wait_for_completion_timeout(&denali->complete,
+ msecs_to_jiffies(1000));
+ if (!time_left) {
+ dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
+ irq_mask);
+ return 0;
+ }
+
+ return denali->irq_status;
+}
+
+static void denali_select_target(struct nand_chip *chip, int cs)
+{
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct denali_chip_sel *sel = &to_denali_chip(chip)->sels[cs];
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ denali->active_bank = sel->bank;
+
+ iowrite32(1 << (chip->phys_erase_shift - chip->page_shift),
+ denali->reg + PAGES_PER_BLOCK);
+ iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
+ denali->reg + DEVICE_WIDTH);
+ iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
+ iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
+ iowrite32(chip->options & NAND_ROW_ADDR_3 ?
+ 0 : TWO_ROW_ADDR_CYCLES__FLAG,
+ denali->reg + TWO_ROW_ADDR_CYCLES);
+ iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
+ FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
+ denali->reg + ECC_CORRECTION);
+ iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE);
+ iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE);
+ iowrite32(chip->ecc.steps, denali->reg + CFG_NUM_DATA_BLOCKS);
+
+ if (chip->options & NAND_KEEP_TIMINGS)
+ return;
+
+ /* update timing registers unless NAND_KEEP_TIMINGS is set */
+ iowrite32(sel->hwhr2_and_we_2_re, denali->reg + TWHR2_AND_WE_2_RE);
+ iowrite32(sel->tcwaw_and_addr_2_data,
+ denali->reg + TCWAW_AND_ADDR_2_DATA);
+ iowrite32(sel->re_2_we, denali->reg + RE_2_WE);
+ iowrite32(sel->acc_clks, denali->reg + ACC_CLKS);
+ iowrite32(sel->rdwr_en_lo_cnt, denali->reg + RDWR_EN_LO_CNT);
+ iowrite32(sel->rdwr_en_hi_cnt, denali->reg + RDWR_EN_HI_CNT);
+ iowrite32(sel->cs_setup_cnt, denali->reg + CS_SETUP_CNT);
+ iowrite32(sel->re_2_re, denali->reg + RE_2_RE);
+}
+
+static int denali_change_column(struct nand_chip *chip, unsigned int offset,
+ void *buf, unsigned int len, bool write)
+{
+ if (write)
+ return nand_change_write_column_op(chip, offset, buf, len,
+ false);
+ else
+ return nand_change_read_column_op(chip, offset, buf, len,
+ false);
+}
+
+static int denali_payload_xfer(struct nand_chip *chip, void *buf, bool write)
+{
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int writesize = mtd->writesize;
+ int oob_skip = denali->oob_skip_bytes;
+ int ret, i, pos, len;
+
+ for (i = 0; i < ecc->steps; i++) {
+ pos = i * (ecc->size + ecc->bytes);
+ len = ecc->size;
+
+ if (pos >= writesize) {
+ pos += oob_skip;
+ } else if (pos + len > writesize) {
+ /* This chunk overwraps the BBM area. Must be split */
+ ret = denali_change_column(chip, pos, buf,
+ writesize - pos, write);
+ if (ret)
+ return ret;
+
+ buf += writesize - pos;
+ len -= writesize - pos;
+ pos = writesize + oob_skip;
+ }
+
+ ret = denali_change_column(chip, pos, buf, len, write);
+ if (ret)
+ return ret;
+
+ buf += len;
+ }
+
+ return 0;
+}
+
+static int denali_oob_xfer(struct nand_chip *chip, void *buf, bool write)
+{
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int writesize = mtd->writesize;
+ int oobsize = mtd->oobsize;
+ int oob_skip = denali->oob_skip_bytes;
+ int ret, i, pos, len;
+
+ /* BBM at the beginning of the OOB area */
+ ret = denali_change_column(chip, writesize, buf, oob_skip, write);
+ if (ret)
+ return ret;
+
+ buf += oob_skip;
+
+ for (i = 0; i < ecc->steps; i++) {
+ pos = ecc->size + i * (ecc->size + ecc->bytes);
+
+ if (i == ecc->steps - 1)
+ /* The last chunk includes OOB free */
+ len = writesize + oobsize - pos - oob_skip;
+ else
+ len = ecc->bytes;
+
+ if (pos >= writesize) {
+ pos += oob_skip;
+ } else if (pos + len > writesize) {
+ /* This chunk overwraps the BBM area. Must be split */
+ ret = denali_change_column(chip, pos, buf,
+ writesize - pos, write);
+ if (ret)
+ return ret;
+
+ buf += writesize - pos;
+ len -= writesize - pos;
+ pos = writesize + oob_skip;
+ }
+
+ ret = denali_change_column(chip, pos, buf, len, write);
+ if (ret)
+ return ret;
+
+ buf += len;
+ }
+
+ return 0;
+}
+
+static int denali_read_raw(struct nand_chip *chip, void *buf, void *oob_buf,
+ int page)
+{
+ int ret;
+
+ if (!buf && !oob_buf)
+ return -EINVAL;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ if (buf) {
+ ret = denali_payload_xfer(chip, buf, false);
+ if (ret)
+ return ret;
+ }
+
+ if (oob_buf) {
+ ret = denali_oob_xfer(chip, oob_buf, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int denali_write_raw(struct nand_chip *chip, const void *buf,
+ const void *oob_buf, int page)
+{
+ int ret;
+
+ if (!buf && !oob_buf)
+ return -EINVAL;
+
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ if (buf) {
+ ret = denali_payload_xfer(chip, (void *)buf, true);
+ if (ret)
+ return ret;
+ }
+
+ if (oob_buf) {
+ ret = denali_oob_xfer(chip, (void *)oob_buf, true);
+ if (ret)
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int denali_read_page_raw(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ return denali_read_raw(chip, buf, oob_required ? chip->oob_poi : NULL,
+ page);
+}
+
+static int denali_write_page_raw(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ return denali_write_raw(chip, buf, oob_required ? chip->oob_poi : NULL,
+ page);
+}
+
+static int denali_read_oob(struct nand_chip *chip, int page)
+{
+ return denali_read_raw(chip, NULL, chip->oob_poi, page);
+}
+
+static int denali_write_oob(struct nand_chip *chip, int page)
+{
+ return denali_write_raw(chip, NULL, chip->oob_poi, page);
+}
+
+static int denali_check_erased_page(struct nand_chip *chip, u8 *buf,
+ unsigned long uncor_ecc_flags,
+ unsigned int max_bitflips)
+{
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats;
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u8 *ecc_code = chip->oob_poi + denali->oob_skip_bytes;
+ int i, stat;
+
+ for (i = 0; i < ecc->steps; i++) {
+ if (!(uncor_ecc_flags & BIT(i)))
+ continue;
+
+ stat = nand_check_erased_ecc_chunk(buf, ecc->size, ecc_code,
+ ecc->bytes, NULL, 0,
+ ecc->strength);
+ if (stat < 0) {
+ ecc_stats->failed++;
+ } else {
+ ecc_stats->corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+
+ buf += ecc->size;
+ ecc_code += ecc->bytes;
+ }
+
+ return max_bitflips;
+}
+
+static int denali_hw_ecc_fixup(struct nand_chip *chip,
+ unsigned long *uncor_ecc_flags)
+{
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats;
+ int bank = denali->active_bank;
+ u32 ecc_cor;
+ unsigned int max_bitflips;
+
+ ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank));
+ ecc_cor >>= ECC_COR_INFO__SHIFT(bank);
+
+ if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) {
+ /*
+ * This flag is set when uncorrectable error occurs at least in
+ * one ECC sector. We can not know "how many sectors", or
+ * "which sector(s)". We need erase-page check for all sectors.
+ */
+ *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0);
+ return 0;
+ }
+
+ max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor);
+
+ /*
+ * The register holds the maximum of per-sector corrected bitflips.
+ * This is suitable for the return value of the ->read_page() callback.
+ * Unfortunately, we can not know the total number of corrected bits in
+ * the page. Increase the stats by max_bitflips. (compromised solution)
+ */
+ ecc_stats->corrected += max_bitflips;
+
+ return max_bitflips;
+}
+
+static int denali_sw_ecc_fixup(struct nand_chip *chip,
+ unsigned long *uncor_ecc_flags, u8 *buf)
+{
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats;
+ unsigned int ecc_size = chip->ecc.size;
+ unsigned int bitflips = 0;
+ unsigned int max_bitflips = 0;
+ u32 err_addr, err_cor_info;
+ unsigned int err_byte, err_sector, err_device;
+ u8 err_cor_value;
+ unsigned int prev_sector = 0;
+ u32 irq_status;
+
+ denali_reset_irq(denali);
+
+ do {
+ err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS);
+ err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr);
+ err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr);
+
+ err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO);
+ err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE,
+ err_cor_info);
+ err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE,
+ err_cor_info);
+
+ /* reset the bitflip counter when crossing ECC sector */
+ if (err_sector != prev_sector)
+ bitflips = 0;
+
+ if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) {
+ /*
+ * Check later if this is a real ECC error, or
+ * an erased sector.
+ */
+ *uncor_ecc_flags |= BIT(err_sector);
+ } else if (err_byte < ecc_size) {
+ /*
+ * If err_byte is larger than ecc_size, means error
+ * happened in OOB, so we ignore it. It's no need for
+ * us to correct it err_device is represented the NAND
+ * error bits are happened in if there are more than
+ * one NAND connected.
+ */
+ int offset;
+ unsigned int flips_in_byte;
+
+ offset = (err_sector * ecc_size + err_byte) *
+ denali->devs_per_cs + err_device;
+
+ /* correct the ECC error */
+ flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
+ buf[offset] ^= err_cor_value;
+ ecc_stats->corrected += flips_in_byte;
+ bitflips += flips_in_byte;
+
+ max_bitflips = max(max_bitflips, bitflips);
+ }
+
+ prev_sector = err_sector;
+ } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR));
+
+ /*
+ * Once handle all ECC errors, controller will trigger an
+ * ECC_TRANSACTION_DONE interrupt.
+ */
+ irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
+ if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
+ return -EIO;
+
+ return max_bitflips;
+}
+
+static void denali_setup_dma64(struct denali_controller *denali,
+ dma_addr_t dma_addr, int page, bool write)
+{
+ u32 mode;
+ const int page_count = 1;
+
+ mode = DENALI_MAP10 | DENALI_BANK(denali) | page;
+
+ /* DMA is a three step process */
+
+ /*
+ * 1. setup transfer type, interrupt when complete,
+ * burst len = 64 bytes, the number of pages
+ */
+ denali->host_write(denali, mode,
+ 0x01002000 | (64 << 16) |
+ (write ? BIT(8) : 0) | page_count);
+
+ /* 2. set memory low address */
+ denali->host_write(denali, mode, lower_32_bits(dma_addr));
+
+ /* 3. set memory high address */
+ denali->host_write(denali, mode, upper_32_bits(dma_addr));
+}
+
+static void denali_setup_dma32(struct denali_controller *denali,
+ dma_addr_t dma_addr, int page, bool write)
+{
+ u32 mode;
+ const int page_count = 1;
+
+ mode = DENALI_MAP10 | DENALI_BANK(denali);
+
+ /* DMA is a four step process */
+
+ /* 1. setup transfer type and # of pages */
+ denali->host_write(denali, mode | page,
+ 0x2000 | (write ? BIT(8) : 0) | page_count);
+
+ /* 2. set memory high address bits 23:8 */
+ denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
+
+ /* 3. set memory low address bits 23:8 */
+ denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
+
+ /* 4. interrupt when complete, burst len = 64 bytes */
+ denali->host_write(denali, mode | 0x14000, 0x2400);
+}
+
+static int denali_pio_read(struct denali_controller *denali, u32 *buf,
+ size_t size, int page)
+{
+ u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
+ u32 irq_status, ecc_err_mask;
+ int i;
+
+ if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
+ ecc_err_mask = INTR__ECC_UNCOR_ERR;
+ else
+ ecc_err_mask = INTR__ECC_ERR;
+
+ denali_reset_irq(denali);
+
+ for (i = 0; i < size / 4; i++)
+ buf[i] = denali->host_read(denali, addr);
+
+ irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
+ if (!(irq_status & INTR__PAGE_XFER_INC))
+ return -EIO;
+
+ if (irq_status & INTR__ERASED_PAGE)
+ memset(buf, 0xff, size);
+
+ return irq_status & ecc_err_mask ? -EBADMSG : 0;
+}
+
+static int denali_pio_write(struct denali_controller *denali, const u32 *buf,
+ size_t size, int page)
+{
+ u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
+ u32 irq_status;
+ int i;
+
+ denali_reset_irq(denali);
+
+ for (i = 0; i < size / 4; i++)
+ denali->host_write(denali, addr, buf[i]);
+
+ irq_status = denali_wait_for_irq(denali,
+ INTR__PROGRAM_COMP |
+ INTR__PROGRAM_FAIL);
+ if (!(irq_status & INTR__PROGRAM_COMP))
+ return -EIO;
+
+ return 0;
+}
+
+static int denali_pio_xfer(struct denali_controller *denali, void *buf,
+ size_t size, int page, bool write)
+{
+ if (write)
+ return denali_pio_write(denali, buf, size, page);
+ else
+ return denali_pio_read(denali, buf, size, page);
+}
+
+static int denali_dma_xfer(struct denali_controller *denali, void *buf,
+ size_t size, int page, bool write)
+{
+ dma_addr_t dma_addr;
+ u32 irq_mask, irq_status, ecc_err_mask;
+ enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ int ret = 0;
+
+ dma_addr = dma_map_single(denali->dev, buf, size, dir);
+ if (dma_mapping_error(denali->dev, dma_addr)) {
+ dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n");
+ return denali_pio_xfer(denali, buf, size, page, write);
+ }
+
+ if (write) {
+ /*
+ * INTR__PROGRAM_COMP is never asserted for the DMA transfer.
+ * We can use INTR__DMA_CMD_COMP instead. This flag is asserted
+ * when the page program is completed.
+ */
+ irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL;
+ ecc_err_mask = 0;
+ } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) {
+ irq_mask = INTR__DMA_CMD_COMP;
+ ecc_err_mask = INTR__ECC_UNCOR_ERR;
+ } else {
+ irq_mask = INTR__DMA_CMD_COMP;
+ ecc_err_mask = INTR__ECC_ERR;
+ }
+
+ iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
+ /*
+ * The ->setup_dma() hook kicks DMA by using the data/command
+ * interface, which belongs to a different AXI port from the
+ * register interface. Read back the register to avoid a race.
+ */
+ ioread32(denali->reg + DMA_ENABLE);
+
+ denali_reset_irq(denali);
+ denali->setup_dma(denali, dma_addr, page, write);
+
+ irq_status = denali_wait_for_irq(denali, irq_mask);
+ if (!(irq_status & INTR__DMA_CMD_COMP))
+ ret = -EIO;
+ else if (irq_status & ecc_err_mask)
+ ret = -EBADMSG;
+
+ iowrite32(0, denali->reg + DMA_ENABLE);
+
+ dma_unmap_single(denali->dev, dma_addr, size, dir);
+
+ if (irq_status & INTR__ERASED_PAGE)
+ memset(buf, 0xff, size);
+
+ return ret;
+}
+
+static int denali_page_xfer(struct nand_chip *chip, void *buf, size_t size,
+ int page, bool write)
+{
+ struct denali_controller *denali = to_denali_controller(chip);
+
+ denali_select_target(chip, chip->cur_cs);
+
+ if (denali->dma_avail)
+ return denali_dma_xfer(denali, buf, size, page, write);
+ else
+ return denali_pio_xfer(denali, buf, size, page, write);
+}
+
+static int denali_read_page(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned long uncor_ecc_flags = 0;
+ int stat = 0;
+ int ret;
+
+ ret = denali_page_xfer(chip, buf, mtd->writesize, page, false);
+ if (ret && ret != -EBADMSG)
+ return ret;
+
+ if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
+ stat = denali_hw_ecc_fixup(chip, &uncor_ecc_flags);
+ else if (ret == -EBADMSG)
+ stat = denali_sw_ecc_fixup(chip, &uncor_ecc_flags, buf);
+
+ if (stat < 0)
+ return stat;
+
+ if (uncor_ecc_flags) {
+ ret = denali_read_oob(chip, page);
+ if (ret)
+ return ret;
+
+ stat = denali_check_erased_page(chip, buf,
+ uncor_ecc_flags, stat);
+ }
+
+ return stat;
+}
+
+static int denali_write_page(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ return denali_page_xfer(chip, (void *)buf, mtd->writesize, page, true);
+}
+
+static int denali_setup_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_interface_config *conf)
+{
+ static const unsigned int data_setup_on_host = 10000;
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct denali_chip_sel *sel;
+ const struct nand_sdr_timings *timings;
+ unsigned long t_x, mult_x;
+ int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
+ int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
+ int addr_2_data_mask;
+ u32 tmp;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return PTR_ERR(timings);
+
+ /* clk_x period in picoseconds */
+ t_x = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
+ if (!t_x)
+ return -EINVAL;
+
+ /*
+ * The bus interface clock, clk_x, is phase aligned with the core clock.
+ * The clk_x is an integral multiple N of the core clk. The value N is
+ * configured at IP delivery time, and its available value is 4, 5, 6.
+ */
+ mult_x = DIV_ROUND_CLOSEST_ULL(denali->clk_x_rate, denali->clk_rate);
+ if (mult_x < 4 || mult_x > 6)
+ return -EINVAL;
+
+ if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ sel = &to_denali_chip(chip)->sels[chipnr];
+
+ /* tRWH -> RE_2_WE */
+ re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x);
+ re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
+
+ tmp = ioread32(denali->reg + RE_2_WE);
+ tmp &= ~RE_2_WE__VALUE;
+ tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we);
+ sel->re_2_we = tmp;
+
+ /* tRHZ -> RE_2_RE */
+ re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_x);
+ re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE);
+
+ tmp = ioread32(denali->reg + RE_2_RE);
+ tmp &= ~RE_2_RE__VALUE;
+ tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re);
+ sel->re_2_re = tmp;
+
+ /*
+ * tCCS, tWHR -> WE_2_RE
+ *
+ * With WE_2_RE properly set, the Denali controller automatically takes
+ * care of the delay; the driver need not set NAND_WAIT_TCCS.
+ */
+ we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), t_x);
+ we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
+
+ tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
+ tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
+ tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re);
+ sel->hwhr2_and_we_2_re = tmp;
+
+ /* tADL -> ADDR_2_DATA */
+
+ /* for older versions, ADDR_2_DATA is only 6 bit wide */
+ addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
+ if (denali->revision < 0x0501)
+ addr_2_data_mask >>= 1;
+
+ addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_x);
+ addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
+
+ tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
+ tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
+ tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data);
+ sel->tcwaw_and_addr_2_data = tmp;
+
+ /* tREH, tWH -> RDWR_EN_HI_CNT */
+ rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
+ t_x);
+ rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE);
+
+ tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
+ tmp &= ~RDWR_EN_HI_CNT__VALUE;
+ tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
+ sel->rdwr_en_hi_cnt = tmp;
+
+ /*
+ * tREA -> ACC_CLKS
+ * tRP, tWP, tRHOH, tRC, tWC -> RDWR_EN_LO_CNT
+ */
+
+ /*
+ * Determine the minimum of acc_clks to meet the setup timing when
+ * capturing the incoming data.
+ *
+ * The delay on the chip side is well-defined as tREA, but we need to
+ * take additional delay into account. This includes a certain degree
+ * of unknowledge, such as signal propagation delays on the PCB and
+ * in the SoC, load capacity of the I/O pins, etc.
+ */
+ acc_clks = DIV_ROUND_UP(timings->tREA_max + data_setup_on_host, t_x);
+
+ /* Determine the minimum of rdwr_en_lo_cnt from RE#/WE# pulse width */
+ rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x);
+
+ /* Extend rdwr_en_lo to meet the data hold timing */
+ rdwr_en_lo = max_t(int, rdwr_en_lo,
+ acc_clks - timings->tRHOH_min / t_x);
+
+ /* Extend rdwr_en_lo to meet the requirement for RE#/WE# cycle time */
+ rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
+ t_x);
+ rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
+ rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
+
+ /* Center the data latch timing for extra safety */
+ acc_clks = (acc_clks + rdwr_en_lo +
+ DIV_ROUND_UP(timings->tRHOH_min, t_x)) / 2;
+ acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
+
+ tmp = ioread32(denali->reg + ACC_CLKS);
+ tmp &= ~ACC_CLKS__VALUE;
+ tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
+ sel->acc_clks = tmp;
+
+ tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
+ tmp &= ~RDWR_EN_LO_CNT__VALUE;
+ tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
+ sel->rdwr_en_lo_cnt = tmp;
+
+ /* tCS, tCEA -> CS_SETUP_CNT */
+ cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_x) - rdwr_en_lo,
+ (int)DIV_ROUND_UP(timings->tCEA_max, t_x) - acc_clks,
+ 0);
+ cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE);
+
+ tmp = ioread32(denali->reg + CS_SETUP_CNT);
+ tmp &= ~CS_SETUP_CNT__VALUE;
+ tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup);
+ sel->cs_setup_cnt = tmp;
+
+ return 0;
+}
+
+int denali_calc_ecc_bytes(int step_size, int strength)
+{
+ /* BCH code. Denali requires ecc.bytes to be multiple of 2 */
+ return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2;
+}
+EXPORT_SYMBOL(denali_calc_ecc_bytes);
+
+static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct denali_controller *denali = to_denali_controller(chip);
+
+ if (section > 0)
+ return -ERANGE;
+
+ oobregion->offset = denali->oob_skip_bytes;
+ oobregion->length = chip->ecc.total;
+
+ return 0;
+}
+
+static int denali_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct denali_controller *denali = to_denali_controller(chip);
+
+ if (section > 0)
+ return -ERANGE;
+
+ oobregion->offset = chip->ecc.total + denali->oob_skip_bytes;
+ oobregion->length = mtd->oobsize - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
+ .ecc = denali_ooblayout_ecc,
+ .free = denali_ooblayout_free,
+};
+
+static int denali_multidev_fixup(struct nand_chip *chip)
+{
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ /*
+ * Support for multi device:
+ * When the IP configuration is x16 capable and two x8 chips are
+ * connected in parallel, DEVICES_CONNECTED should be set to 2.
+ * In this case, the core framework knows nothing about this fact,
+ * so we should tell it the _logical_ pagesize and anything necessary.
+ */
+ denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED);
+
+ /*
+ * On some SoCs, DEVICES_CONNECTED is not auto-detected.
+ * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case.
+ */
+ if (denali->devs_per_cs == 0) {
+ denali->devs_per_cs = 1;
+ iowrite32(1, denali->reg + DEVICES_CONNECTED);
+ }
+
+ if (denali->devs_per_cs == 1)
+ return 0;
+
+ if (denali->devs_per_cs != 2) {
+ dev_err(denali->dev, "unsupported number of devices %d\n",
+ denali->devs_per_cs);
+ return -EINVAL;
+ }
+
+ /* 2 chips in parallel */
+ memorg->pagesize <<= 1;
+ memorg->oobsize <<= 1;
+ mtd->size <<= 1;
+ mtd->erasesize <<= 1;
+ mtd->writesize <<= 1;
+ mtd->oobsize <<= 1;
+ chip->page_shift += 1;
+ chip->phys_erase_shift += 1;
+ chip->bbt_erase_shift += 1;
+ chip->chip_shift += 1;
+ chip->pagemask <<= 1;
+ chip->ecc.size <<= 1;
+ chip->ecc.bytes <<= 1;
+ chip->ecc.strength <<= 1;
+ denali->oob_skip_bytes <<= 1;
+
+ return 0;
+}
+
+static int denali_attach_chip(struct nand_chip *chip)
+{
+ struct denali_controller *denali = to_denali_controller(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = nand_ecc_choose_conf(chip, denali->ecc_caps,
+ mtd->oobsize - denali->oob_skip_bytes);
+ if (ret) {
+ dev_err(denali->dev, "Failed to setup ECC settings.\n");
+ return ret;
+ }
+
+ dev_dbg(denali->dev,
+ "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
+ chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
+
+ ret = denali_multidev_fixup(chip);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void denali_exec_in8(struct denali_controller *denali, u32 type,
+ u8 *buf, unsigned int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = denali->host_read(denali, type | DENALI_BANK(denali));
+}
+
+static void denali_exec_in16(struct denali_controller *denali, u32 type,
+ u8 *buf, unsigned int len)
+{
+ u32 data;
+ int i;
+
+ for (i = 0; i < len; i += 2) {
+ data = denali->host_read(denali, type | DENALI_BANK(denali));
+ /* bit 31:24 and 15:8 are used for DDR */
+ buf[i] = data;
+ buf[i + 1] = data >> 16;
+ }
+}
+
+static void denali_exec_in(struct denali_controller *denali, u32 type,
+ u8 *buf, unsigned int len, bool width16)
+{
+ if (width16)
+ denali_exec_in16(denali, type, buf, len);
+ else
+ denali_exec_in8(denali, type, buf, len);
+}
+
+static void denali_exec_out8(struct denali_controller *denali, u32 type,
+ const u8 *buf, unsigned int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ denali->host_write(denali, type | DENALI_BANK(denali), buf[i]);
+}
+
+static void denali_exec_out16(struct denali_controller *denali, u32 type,
+ const u8 *buf, unsigned int len)
+{
+ int i;
+
+ for (i = 0; i < len; i += 2)
+ denali->host_write(denali, type | DENALI_BANK(denali),
+ buf[i + 1] << 16 | buf[i]);
+}
+
+static void denali_exec_out(struct denali_controller *denali, u32 type,
+ const u8 *buf, unsigned int len, bool width16)
+{
+ if (width16)
+ denali_exec_out16(denali, type, buf, len);
+ else
+ denali_exec_out8(denali, type, buf, len);
+}
+
+static int denali_exec_waitrdy(struct denali_controller *denali)
+{
+ u32 irq_stat;
+
+ /* R/B# pin transitioned from low to high? */
+ irq_stat = denali_wait_for_irq(denali, INTR__INT_ACT);
+
+ /* Just in case nand_operation has multiple NAND_OP_WAITRDY_INSTR. */
+ denali_reset_irq(denali);
+
+ return irq_stat & INTR__INT_ACT ? 0 : -EIO;
+}
+
+static int denali_exec_instr(struct nand_chip *chip,
+ const struct nand_op_instr *instr)
+{
+ struct denali_controller *denali = to_denali_controller(chip);
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ denali_exec_out8(denali, DENALI_MAP11_CMD,
+ &instr->ctx.cmd.opcode, 1);
+ return 0;
+ case NAND_OP_ADDR_INSTR:
+ denali_exec_out8(denali, DENALI_MAP11_ADDR,
+ instr->ctx.addr.addrs,
+ instr->ctx.addr.naddrs);
+ return 0;
+ case NAND_OP_DATA_IN_INSTR:
+ denali_exec_in(denali, DENALI_MAP11_DATA,
+ instr->ctx.data.buf.in,
+ instr->ctx.data.len,
+ !instr->ctx.data.force_8bit &&
+ chip->options & NAND_BUSWIDTH_16);
+ return 0;
+ case NAND_OP_DATA_OUT_INSTR:
+ denali_exec_out(denali, DENALI_MAP11_DATA,
+ instr->ctx.data.buf.out,
+ instr->ctx.data.len,
+ !instr->ctx.data.force_8bit &&
+ chip->options & NAND_BUSWIDTH_16);
+ return 0;
+ case NAND_OP_WAITRDY_INSTR:
+ return denali_exec_waitrdy(denali);
+ default:
+ WARN_ONCE(1, "unsupported NAND instruction type: %d\n",
+ instr->type);
+
+ return -EINVAL;
+ }
+}
+
+static int denali_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op, bool check_only)
+{
+ int i, ret;
+
+ if (check_only)
+ return 0;
+
+ denali_select_target(chip, op->cs);
+
+ /*
+ * Some commands contain NAND_OP_WAITRDY_INSTR.
+ * irq must be cleared here to catch the R/B# interrupt there.
+ */
+ denali_reset_irq(to_denali_controller(chip));
+
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = denali_exec_instr(chip, &op->instrs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops denali_controller_ops = {
+ .attach_chip = denali_attach_chip,
+ .exec_op = denali_exec_op,
+ .setup_interface = denali_setup_interface,
+};
+
+int denali_chip_init(struct denali_controller *denali,
+ struct denali_chip *dchip)
+{
+ struct nand_chip *chip = &dchip->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct denali_chip *dchip2;
+ int i, j, ret;
+
+ chip->controller = &denali->controller;
+
+ /* sanity checks for bank numbers */
+ for (i = 0; i < dchip->nsels; i++) {
+ unsigned int bank = dchip->sels[i].bank;
+
+ if (bank >= denali->nbanks) {
+ dev_err(denali->dev, "unsupported bank %d\n", bank);
+ return -EINVAL;
+ }
+
+ for (j = 0; j < i; j++) {
+ if (bank == dchip->sels[j].bank) {
+ dev_err(denali->dev,
+ "bank %d is assigned twice in the same chip\n",
+ bank);
+ return -EINVAL;
+ }
+ }
+
+ list_for_each_entry(dchip2, &denali->chips, node) {
+ for (j = 0; j < dchip2->nsels; j++) {
+ if (bank == dchip2->sels[j].bank) {
+ dev_err(denali->dev,
+ "bank %d is already used\n",
+ bank);
+ return -EINVAL;
+ }
+ }
+ }
+ }
+
+ mtd->dev.parent = denali->dev;
+
+ /*
+ * Fallback to the default name if DT did not give "label" property.
+ * Use "label" property if multiple chips are connected.
+ */
+ if (!mtd->name && list_empty(&denali->chips))
+ mtd->name = "denali-nand";
+
+ if (denali->dma_avail) {
+ chip->options |= NAND_USES_DMA;
+ chip->buf_align = 16;
+ }
+
+ /* clk rate info is needed for setup_interface */
+ if (!denali->clk_rate || !denali->clk_x_rate)
+ chip->options |= NAND_KEEP_TIMINGS;
+
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
+ chip->ecc.read_page = denali_read_page;
+ chip->ecc.write_page = denali_write_page;
+ chip->ecc.read_page_raw = denali_read_page_raw;
+ chip->ecc.write_page_raw = denali_write_page_raw;
+ chip->ecc.read_oob = denali_read_oob;
+ chip->ecc.write_oob = denali_write_oob;
+
+ mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
+
+ ret = nand_scan(chip, dchip->nsels);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
+ goto cleanup_nand;
+ }
+
+ list_add_tail(&dchip->node, &denali->chips);
+
+ return 0;
+
+cleanup_nand:
+ nand_cleanup(chip);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(denali_chip_init);
+
+int denali_init(struct denali_controller *denali)
+{
+ u32 features = ioread32(denali->reg + FEATURES);
+ int ret;
+
+ nand_controller_init(&denali->controller);
+ denali->controller.ops = &denali_controller_ops;
+ init_completion(&denali->complete);
+ spin_lock_init(&denali->irq_lock);
+ INIT_LIST_HEAD(&denali->chips);
+ denali->active_bank = DENALI_INVALID_BANK;
+
+ /*
+ * The REVISION register may not be reliable. Platforms are allowed to
+ * override it.
+ */
+ if (!denali->revision)
+ denali->revision = swab16(ioread32(denali->reg + REVISION));
+
+ denali->nbanks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
+
+ /* the encoding changed from rev 5.0 to 5.1 */
+ if (denali->revision < 0x0501)
+ denali->nbanks <<= 1;
+
+ if (features & FEATURES__DMA)
+ denali->dma_avail = true;
+
+ if (denali->dma_avail) {
+ int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32;
+
+ ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit));
+ if (ret) {
+ dev_info(denali->dev,
+ "Failed to set DMA mask. Disabling DMA.\n");
+ denali->dma_avail = false;
+ }
+ }
+
+ if (denali->dma_avail) {
+ if (denali->caps & DENALI_CAP_DMA_64BIT)
+ denali->setup_dma = denali_setup_dma64;
+ else
+ denali->setup_dma = denali_setup_dma32;
+ }
+
+ if (features & FEATURES__INDEX_ADDR) {
+ denali->host_read = denali_indexed_read;
+ denali->host_write = denali_indexed_write;
+ } else {
+ denali->host_read = denali_direct_read;
+ denali->host_write = denali_direct_write;
+ }
+
+ /*
+ * Set how many bytes should be skipped before writing data in OOB.
+ * If a platform requests a non-zero value, set it to the register.
+ * Otherwise, read the value out, expecting it has already been set up
+ * by firmware.
+ */
+ if (denali->oob_skip_bytes)
+ iowrite32(denali->oob_skip_bytes,
+ denali->reg + SPARE_AREA_SKIP_BYTES);
+ else
+ denali->oob_skip_bytes = ioread32(denali->reg +
+ SPARE_AREA_SKIP_BYTES);
+
+ iowrite32(0, denali->reg + TRANSFER_SPARE_REG);
+ iowrite32(GENMASK(denali->nbanks - 1, 0), denali->reg + RB_PIN_ENABLED);
+ iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
+ iowrite32(ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
+ iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
+ iowrite32(WRITE_PROTECT__FLAG, denali->reg + WRITE_PROTECT);
+
+ denali_clear_irq_all(denali);
+
+ ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
+ IRQF_SHARED, DENALI_NAND_NAME, denali);
+ if (ret) {
+ dev_err(denali->dev, "Unable to request IRQ\n");
+ return ret;
+ }
+
+ denali_enable_irq(denali);
+
+ return 0;
+}
+EXPORT_SYMBOL(denali_init);
+
+void denali_remove(struct denali_controller *denali)
+{
+ struct denali_chip *dchip, *tmp;
+ struct nand_chip *chip;
+ int ret;
+
+ list_for_each_entry_safe(dchip, tmp, &denali->chips, node) {
+ chip = &dchip->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&dchip->node);
+ }
+
+ denali_disable_irq(denali);
+}
+EXPORT_SYMBOL(denali_remove);
+
+MODULE_DESCRIPTION("Driver core for Denali NAND controller");
+MODULE_AUTHOR("Intel Corporation and its suppliers");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/denali.h b/drivers/mtd/nand/raw/denali.h
new file mode 100644
index 0000000000..ac46eb7956
--- /dev/null
+++ b/drivers/mtd/nand/raw/denali.h
@@ -0,0 +1,398 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009 - 2010, Intel Corporation and its suppliers.
+ */
+
+#ifndef __DENALI_H__
+#define __DENALI_H__
+
+#include <linux/bits.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+#define DEVICE_RESET 0x0
+#define DEVICE_RESET__BANK(bank) BIT(bank)
+
+#define TRANSFER_SPARE_REG 0x10
+#define TRANSFER_SPARE_REG__FLAG BIT(0)
+
+#define LOAD_WAIT_CNT 0x20
+#define LOAD_WAIT_CNT__VALUE GENMASK(15, 0)
+
+#define PROGRAM_WAIT_CNT 0x30
+#define PROGRAM_WAIT_CNT__VALUE GENMASK(15, 0)
+
+#define ERASE_WAIT_CNT 0x40
+#define ERASE_WAIT_CNT__VALUE GENMASK(15, 0)
+
+#define INT_MON_CYCCNT 0x50
+#define INT_MON_CYCCNT__VALUE GENMASK(15, 0)
+
+#define RB_PIN_ENABLED 0x60
+#define RB_PIN_ENABLED__BANK(bank) BIT(bank)
+
+#define MULTIPLANE_OPERATION 0x70
+#define MULTIPLANE_OPERATION__FLAG BIT(0)
+
+#define MULTIPLANE_READ_ENABLE 0x80
+#define MULTIPLANE_READ_ENABLE__FLAG BIT(0)
+
+#define COPYBACK_DISABLE 0x90
+#define COPYBACK_DISABLE__FLAG BIT(0)
+
+#define CACHE_WRITE_ENABLE 0xa0
+#define CACHE_WRITE_ENABLE__FLAG BIT(0)
+
+#define CACHE_READ_ENABLE 0xb0
+#define CACHE_READ_ENABLE__FLAG BIT(0)
+
+#define PREFETCH_MODE 0xc0
+#define PREFETCH_MODE__PREFETCH_EN BIT(0)
+#define PREFETCH_MODE__PREFETCH_BURST_LENGTH GENMASK(15, 4)
+
+#define CHIP_ENABLE_DONT_CARE 0xd0
+#define CHIP_EN_DONT_CARE__FLAG BIT(0)
+
+#define ECC_ENABLE 0xe0
+#define ECC_ENABLE__FLAG BIT(0)
+
+#define GLOBAL_INT_ENABLE 0xf0
+#define GLOBAL_INT_EN_FLAG BIT(0)
+
+#define TWHR2_AND_WE_2_RE 0x100
+#define TWHR2_AND_WE_2_RE__WE_2_RE GENMASK(5, 0)
+#define TWHR2_AND_WE_2_RE__TWHR2 GENMASK(13, 8)
+
+#define TCWAW_AND_ADDR_2_DATA 0x110
+/* The width of ADDR_2_DATA is 6 bit for old IP, 7 bit for new IP */
+#define TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA GENMASK(6, 0)
+#define TCWAW_AND_ADDR_2_DATA__TCWAW GENMASK(13, 8)
+
+#define RE_2_WE 0x120
+#define RE_2_WE__VALUE GENMASK(5, 0)
+
+#define ACC_CLKS 0x130
+#define ACC_CLKS__VALUE GENMASK(3, 0)
+
+#define NUMBER_OF_PLANES 0x140
+#define NUMBER_OF_PLANES__VALUE GENMASK(2, 0)
+
+#define PAGES_PER_BLOCK 0x150
+#define PAGES_PER_BLOCK__VALUE GENMASK(15, 0)
+
+#define DEVICE_WIDTH 0x160
+#define DEVICE_WIDTH__VALUE GENMASK(1, 0)
+
+#define DEVICE_MAIN_AREA_SIZE 0x170
+#define DEVICE_MAIN_AREA_SIZE__VALUE GENMASK(15, 0)
+
+#define DEVICE_SPARE_AREA_SIZE 0x180
+#define DEVICE_SPARE_AREA_SIZE__VALUE GENMASK(15, 0)
+
+#define TWO_ROW_ADDR_CYCLES 0x190
+#define TWO_ROW_ADDR_CYCLES__FLAG BIT(0)
+
+#define MULTIPLANE_ADDR_RESTRICT 0x1a0
+#define MULTIPLANE_ADDR_RESTRICT__FLAG BIT(0)
+
+#define ECC_CORRECTION 0x1b0
+#define ECC_CORRECTION__VALUE GENMASK(4, 0)
+#define ECC_CORRECTION__ERASE_THRESHOLD GENMASK(31, 16)
+
+#define READ_MODE 0x1c0
+#define READ_MODE__VALUE GENMASK(3, 0)
+
+#define WRITE_MODE 0x1d0
+#define WRITE_MODE__VALUE GENMASK(3, 0)
+
+#define COPYBACK_MODE 0x1e0
+#define COPYBACK_MODE__VALUE GENMASK(3, 0)
+
+#define RDWR_EN_LO_CNT 0x1f0
+#define RDWR_EN_LO_CNT__VALUE GENMASK(4, 0)
+
+#define RDWR_EN_HI_CNT 0x200
+#define RDWR_EN_HI_CNT__VALUE GENMASK(4, 0)
+
+#define MAX_RD_DELAY 0x210
+#define MAX_RD_DELAY__VALUE GENMASK(3, 0)
+
+#define CS_SETUP_CNT 0x220
+#define CS_SETUP_CNT__VALUE GENMASK(4, 0)
+#define CS_SETUP_CNT__TWB GENMASK(17, 12)
+
+#define SPARE_AREA_SKIP_BYTES 0x230
+#define SPARE_AREA_SKIP_BYTES__VALUE GENMASK(5, 0)
+
+#define SPARE_AREA_MARKER 0x240
+#define SPARE_AREA_MARKER__VALUE GENMASK(15, 0)
+
+#define DEVICES_CONNECTED 0x250
+#define DEVICES_CONNECTED__VALUE GENMASK(2, 0)
+
+#define DIE_MASK 0x260
+#define DIE_MASK__VALUE GENMASK(7, 0)
+
+#define FIRST_BLOCK_OF_NEXT_PLANE 0x270
+#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE GENMASK(15, 0)
+
+#define WRITE_PROTECT 0x280
+#define WRITE_PROTECT__FLAG BIT(0)
+
+#define RE_2_RE 0x290
+#define RE_2_RE__VALUE GENMASK(5, 0)
+
+#define MANUFACTURER_ID 0x300
+#define MANUFACTURER_ID__VALUE GENMASK(7, 0)
+
+#define DEVICE_ID 0x310
+#define DEVICE_ID__VALUE GENMASK(7, 0)
+
+#define DEVICE_PARAM_0 0x320
+#define DEVICE_PARAM_0__VALUE GENMASK(7, 0)
+
+#define DEVICE_PARAM_1 0x330
+#define DEVICE_PARAM_1__VALUE GENMASK(7, 0)
+
+#define DEVICE_PARAM_2 0x340
+#define DEVICE_PARAM_2__VALUE GENMASK(7, 0)
+
+#define LOGICAL_PAGE_DATA_SIZE 0x350
+#define LOGICAL_PAGE_DATA_SIZE__VALUE GENMASK(15, 0)
+
+#define LOGICAL_PAGE_SPARE_SIZE 0x360
+#define LOGICAL_PAGE_SPARE_SIZE__VALUE GENMASK(15, 0)
+
+#define REVISION 0x370
+#define REVISION__VALUE GENMASK(15, 0)
+
+#define ONFI_DEVICE_FEATURES 0x380
+#define ONFI_DEVICE_FEATURES__VALUE GENMASK(5, 0)
+
+#define ONFI_OPTIONAL_COMMANDS 0x390
+#define ONFI_OPTIONAL_COMMANDS__VALUE GENMASK(5, 0)
+
+#define ONFI_TIMING_MODE 0x3a0
+#define ONFI_TIMING_MODE__VALUE GENMASK(5, 0)
+
+#define ONFI_PGM_CACHE_TIMING_MODE 0x3b0
+#define ONFI_PGM_CACHE_TIMING_MODE__VALUE GENMASK(5, 0)
+
+#define ONFI_DEVICE_NO_OF_LUNS 0x3c0
+#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS GENMASK(7, 0)
+#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE BIT(8)
+
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L 0x3d0
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE GENMASK(15, 0)
+
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U 0x3e0
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE GENMASK(15, 0)
+
+#define FEATURES 0x3f0
+#define FEATURES__N_BANKS GENMASK(1, 0)
+#define FEATURES__ECC_MAX_ERR GENMASK(5, 2)
+#define FEATURES__DMA BIT(6)
+#define FEATURES__CMD_DMA BIT(7)
+#define FEATURES__PARTITION BIT(8)
+#define FEATURES__XDMA_SIDEBAND BIT(9)
+#define FEATURES__GPREG BIT(10)
+#define FEATURES__INDEX_ADDR BIT(11)
+
+#define TRANSFER_MODE 0x400
+#define TRANSFER_MODE__VALUE GENMASK(1, 0)
+
+#define INTR_STATUS(bank) (0x410 + (bank) * 0x50)
+#define INTR_EN(bank) (0x420 + (bank) * 0x50)
+/* bit[1:0] is used differently depending on IP version */
+#define INTR__ECC_UNCOR_ERR BIT(0) /* new IP */
+#define INTR__ECC_TRANSACTION_DONE BIT(0) /* old IP */
+#define INTR__ECC_ERR BIT(1) /* old IP */
+#define INTR__DMA_CMD_COMP BIT(2)
+#define INTR__TIME_OUT BIT(3)
+#define INTR__PROGRAM_FAIL BIT(4)
+#define INTR__ERASE_FAIL BIT(5)
+#define INTR__LOAD_COMP BIT(6)
+#define INTR__PROGRAM_COMP BIT(7)
+#define INTR__ERASE_COMP BIT(8)
+#define INTR__PIPE_CPYBCK_CMD_COMP BIT(9)
+#define INTR__LOCKED_BLK BIT(10)
+#define INTR__UNSUP_CMD BIT(11)
+#define INTR__INT_ACT BIT(12)
+#define INTR__RST_COMP BIT(13)
+#define INTR__PIPE_CMD_ERR BIT(14)
+#define INTR__PAGE_XFER_INC BIT(15)
+#define INTR__ERASED_PAGE BIT(16)
+
+#define PAGE_CNT(bank) (0x430 + (bank) * 0x50)
+#define ERR_PAGE_ADDR(bank) (0x440 + (bank) * 0x50)
+#define ERR_BLOCK_ADDR(bank) (0x450 + (bank) * 0x50)
+
+#define ECC_THRESHOLD 0x600
+#define ECC_THRESHOLD__VALUE GENMASK(9, 0)
+
+#define ECC_ERROR_BLOCK_ADDRESS 0x610
+#define ECC_ERROR_BLOCK_ADDRESS__VALUE GENMASK(15, 0)
+
+#define ECC_ERROR_PAGE_ADDRESS 0x620
+#define ECC_ERROR_PAGE_ADDRESS__VALUE GENMASK(11, 0)
+#define ECC_ERROR_PAGE_ADDRESS__BANK GENMASK(15, 12)
+
+#define ECC_ERROR_ADDRESS 0x630
+#define ECC_ERROR_ADDRESS__OFFSET GENMASK(11, 0)
+#define ECC_ERROR_ADDRESS__SECTOR GENMASK(15, 12)
+
+#define ERR_CORRECTION_INFO 0x640
+#define ERR_CORRECTION_INFO__BYTE GENMASK(7, 0)
+#define ERR_CORRECTION_INFO__DEVICE GENMASK(11, 8)
+#define ERR_CORRECTION_INFO__UNCOR BIT(14)
+#define ERR_CORRECTION_INFO__LAST_ERR BIT(15)
+
+#define ECC_COR_INFO(bank) (0x650 + (bank) / 2 * 0x10)
+#define ECC_COR_INFO__SHIFT(bank) ((bank) % 2 * 8)
+#define ECC_COR_INFO__MAX_ERRORS GENMASK(6, 0)
+#define ECC_COR_INFO__UNCOR_ERR BIT(7)
+
+#define CFG_DATA_BLOCK_SIZE 0x6b0
+
+#define CFG_LAST_DATA_BLOCK_SIZE 0x6c0
+
+#define CFG_NUM_DATA_BLOCKS 0x6d0
+
+#define CFG_META_DATA_SIZE 0x6e0
+
+#define DMA_ENABLE 0x700
+#define DMA_ENABLE__FLAG BIT(0)
+
+#define IGNORE_ECC_DONE 0x710
+#define IGNORE_ECC_DONE__FLAG BIT(0)
+
+#define DMA_INTR 0x720
+#define DMA_INTR_EN 0x730
+#define DMA_INTR__TARGET_ERROR BIT(0)
+#define DMA_INTR__DESC_COMP_CHANNEL0 BIT(1)
+#define DMA_INTR__DESC_COMP_CHANNEL1 BIT(2)
+#define DMA_INTR__DESC_COMP_CHANNEL2 BIT(3)
+#define DMA_INTR__DESC_COMP_CHANNEL3 BIT(4)
+#define DMA_INTR__MEMCOPY_DESC_COMP BIT(5)
+
+#define TARGET_ERR_ADDR_LO 0x740
+#define TARGET_ERR_ADDR_LO__VALUE GENMASK(15, 0)
+
+#define TARGET_ERR_ADDR_HI 0x750
+#define TARGET_ERR_ADDR_HI__VALUE GENMASK(15, 0)
+
+#define CHNL_ACTIVE 0x760
+#define CHNL_ACTIVE__CHANNEL0 BIT(0)
+#define CHNL_ACTIVE__CHANNEL1 BIT(1)
+#define CHNL_ACTIVE__CHANNEL2 BIT(2)
+#define CHNL_ACTIVE__CHANNEL3 BIT(3)
+
+/**
+ * struct denali_chip_sel - per-CS data of Denali NAND
+ *
+ * @bank: bank id of the controller this CS is connected to
+ * @hwhr2_and_we_2_re: value of timing register HWHR2_AND_WE_2_RE
+ * @tcwaw_and_addr_2_data: value of timing register TCWAW_AND_ADDR_2_DATA
+ * @re_2_we: value of timing register RE_2_WE
+ * @acc_clks: value of timing register ACC_CLKS
+ * @rdwr_en_lo_cnt: value of timing register RDWR_EN_LO_CNT
+ * @rdwr_en_hi_cnt: value of timing register RDWR_EN_HI_CNT
+ * @cs_setup_cnt: value of timing register CS_SETUP_CNT
+ * @re_2_re: value of timing register RE_2_RE
+ */
+struct denali_chip_sel {
+ int bank;
+ u32 hwhr2_and_we_2_re;
+ u32 tcwaw_and_addr_2_data;
+ u32 re_2_we;
+ u32 acc_clks;
+ u32 rdwr_en_lo_cnt;
+ u32 rdwr_en_hi_cnt;
+ u32 cs_setup_cnt;
+ u32 re_2_re;
+};
+
+/**
+ * struct denali_chip - per-chip data of Denali NAND
+ *
+ * @chip: base NAND chip structure
+ * @node: node to be used to associate this chip with the controller
+ * @nsels: the number of CS lines of this chip
+ * @sels: the array of per-cs data
+ */
+struct denali_chip {
+ struct nand_chip chip;
+ struct list_head node;
+ unsigned int nsels;
+ struct denali_chip_sel sels[];
+};
+
+/**
+ * struct denali_controller - Denali NAND controller data
+ *
+ * @controller: base NAND controller structure
+ * @dev: device
+ * @chips: the list of chips attached to this controller
+ * @clk_rate: frequency of core clock
+ * @clk_x_rate: frequency of bus interface clock
+ * @reg: base of Register Interface
+ * @host: base of Host Data/Command interface
+ * @complete: completion used to wait for interrupts
+ * @irq: interrupt number
+ * @irq_mask: interrupt bits the controller is waiting for
+ * @irq_status: interrupt bits of events that have happened
+ * @irq_lock: lock to protect @irq_mask and @irq_status
+ * @dma_avail: set if DMA engine is available
+ * @devs_per_cs: number of devices connected in parallel
+ * @oob_skip_bytes: number of bytes in OOB skipped by the ECC engine
+ * @active_bank: active bank id
+ * @nbanks: the number of banks supported by this controller
+ * @revision: IP revision
+ * @caps: controller capabilities that cannot be detected run-time
+ * @ecc_caps: ECC engine capabilities
+ * @host_read: callback for read access of Host Data/Command Interface
+ * @host_write: callback for write access of Host Data/Command Interface
+ * @setup_dma: callback for setup of the Data DMA
+ */
+struct denali_controller {
+ struct nand_controller controller;
+ struct device *dev;
+ struct list_head chips;
+ unsigned long clk_rate;
+ unsigned long clk_x_rate;
+ void __iomem *reg;
+ void __iomem *host;
+ struct completion complete;
+ int irq;
+ u32 irq_mask;
+ u32 irq_status;
+ spinlock_t irq_lock;
+ bool dma_avail;
+ int devs_per_cs;
+ int oob_skip_bytes;
+ int active_bank;
+ int nbanks;
+ unsigned int revision;
+ unsigned int caps;
+ const struct nand_ecc_caps *ecc_caps;
+ u32 (*host_read)(struct denali_controller *denali, u32 addr);
+ void (*host_write)(struct denali_controller *denali, u32 addr,
+ u32 data);
+ void (*setup_dma)(struct denali_controller *denali, dma_addr_t dma_addr,
+ int page, bool write);
+};
+
+#define DENALI_CAP_HW_ECC_FIXUP BIT(0)
+#define DENALI_CAP_DMA_64BIT BIT(1)
+
+int denali_calc_ecc_bytes(int step_size, int strength);
+int denali_chip_init(struct denali_controller *denali,
+ struct denali_chip *dchip);
+int denali_init(struct denali_controller *denali);
+void denali_remove(struct denali_controller *denali);
+
+#endif /* __DENALI_H__ */
diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c
new file mode 100644
index 0000000000..edac8749bb
--- /dev/null
+++ b/drivers/mtd/nand/raw/denali_dt.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NAND Flash Controller Device Driver for DT
+ *
+ * Copyright © 2011, Picochip.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "denali.h"
+
+struct denali_dt {
+ struct denali_controller controller;
+ struct clk *clk; /* core clock */
+ struct clk *clk_x; /* bus interface clock */
+ struct clk *clk_ecc; /* ECC circuit clock */
+ struct reset_control *rst; /* core reset */
+ struct reset_control *rst_reg; /* register reset */
+};
+
+struct denali_dt_data {
+ unsigned int revision;
+ unsigned int caps;
+ unsigned int oob_skip_bytes;
+ const struct nand_ecc_caps *ecc_caps;
+};
+
+NAND_ECC_CAPS_SINGLE(denali_socfpga_ecc_caps, denali_calc_ecc_bytes,
+ 512, 8, 15);
+static const struct denali_dt_data denali_socfpga_data = {
+ .caps = DENALI_CAP_HW_ECC_FIXUP,
+ .oob_skip_bytes = 2,
+ .ecc_caps = &denali_socfpga_ecc_caps,
+};
+
+NAND_ECC_CAPS_SINGLE(denali_uniphier_v5a_ecc_caps, denali_calc_ecc_bytes,
+ 1024, 8, 16, 24);
+static const struct denali_dt_data denali_uniphier_v5a_data = {
+ .caps = DENALI_CAP_HW_ECC_FIXUP |
+ DENALI_CAP_DMA_64BIT,
+ .oob_skip_bytes = 8,
+ .ecc_caps = &denali_uniphier_v5a_ecc_caps,
+};
+
+NAND_ECC_CAPS_SINGLE(denali_uniphier_v5b_ecc_caps, denali_calc_ecc_bytes,
+ 1024, 8, 16);
+static const struct denali_dt_data denali_uniphier_v5b_data = {
+ .revision = 0x0501,
+ .caps = DENALI_CAP_HW_ECC_FIXUP |
+ DENALI_CAP_DMA_64BIT,
+ .oob_skip_bytes = 8,
+ .ecc_caps = &denali_uniphier_v5b_ecc_caps,
+};
+
+static const struct of_device_id denali_nand_dt_ids[] = {
+ {
+ .compatible = "altr,socfpga-denali-nand",
+ .data = &denali_socfpga_data,
+ },
+ {
+ .compatible = "socionext,uniphier-denali-nand-v5a",
+ .data = &denali_uniphier_v5a_data,
+ },
+ {
+ .compatible = "socionext,uniphier-denali-nand-v5b",
+ .data = &denali_uniphier_v5b_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, denali_nand_dt_ids);
+
+static int denali_dt_chip_init(struct denali_controller *denali,
+ struct device_node *chip_np)
+{
+ struct denali_chip *dchip;
+ u32 bank;
+ int nsels, i, ret;
+
+ nsels = of_property_count_u32_elems(chip_np, "reg");
+ if (nsels < 0)
+ return nsels;
+
+ dchip = devm_kzalloc(denali->dev, struct_size(dchip, sels, nsels),
+ GFP_KERNEL);
+ if (!dchip)
+ return -ENOMEM;
+
+ dchip->nsels = nsels;
+
+ for (i = 0; i < nsels; i++) {
+ ret = of_property_read_u32_index(chip_np, "reg", i, &bank);
+ if (ret)
+ return ret;
+
+ dchip->sels[i].bank = bank;
+
+ nand_set_flash_node(&dchip->chip, chip_np);
+ }
+
+ return denali_chip_init(denali, dchip);
+}
+
+static int denali_dt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct denali_dt *dt;
+ const struct denali_dt_data *data;
+ struct denali_controller *denali;
+ struct device_node *np;
+ int ret;
+
+ dt = devm_kzalloc(dev, sizeof(*dt), GFP_KERNEL);
+ if (!dt)
+ return -ENOMEM;
+ denali = &dt->controller;
+
+ data = of_device_get_match_data(dev);
+ if (WARN_ON(!data))
+ return -EINVAL;
+
+ denali->revision = data->revision;
+ denali->caps = data->caps;
+ denali->oob_skip_bytes = data->oob_skip_bytes;
+ denali->ecc_caps = data->ecc_caps;
+
+ denali->dev = dev;
+ denali->irq = platform_get_irq(pdev, 0);
+ if (denali->irq < 0)
+ return denali->irq;
+
+ denali->reg = devm_platform_ioremap_resource_byname(pdev, "denali_reg");
+ if (IS_ERR(denali->reg))
+ return PTR_ERR(denali->reg);
+
+ denali->host = devm_platform_ioremap_resource_byname(pdev, "nand_data");
+ if (IS_ERR(denali->host))
+ return PTR_ERR(denali->host);
+
+ dt->clk = devm_clk_get(dev, "nand");
+ if (IS_ERR(dt->clk))
+ return PTR_ERR(dt->clk);
+
+ dt->clk_x = devm_clk_get(dev, "nand_x");
+ if (IS_ERR(dt->clk_x))
+ return PTR_ERR(dt->clk_x);
+
+ dt->clk_ecc = devm_clk_get(dev, "ecc");
+ if (IS_ERR(dt->clk_ecc))
+ return PTR_ERR(dt->clk_ecc);
+
+ dt->rst = devm_reset_control_get_optional_shared(dev, "nand");
+ if (IS_ERR(dt->rst))
+ return PTR_ERR(dt->rst);
+
+ dt->rst_reg = devm_reset_control_get_optional_shared(dev, "reg");
+ if (IS_ERR(dt->rst_reg))
+ return PTR_ERR(dt->rst_reg);
+
+ ret = clk_prepare_enable(dt->clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(dt->clk_x);
+ if (ret)
+ goto out_disable_clk;
+
+ ret = clk_prepare_enable(dt->clk_ecc);
+ if (ret)
+ goto out_disable_clk_x;
+
+ denali->clk_rate = clk_get_rate(dt->clk);
+ denali->clk_x_rate = clk_get_rate(dt->clk_x);
+
+ /*
+ * Deassert the register reset, and the core reset in this order.
+ * Deasserting the core reset while the register reset is asserted
+ * will cause unpredictable behavior in the controller.
+ */
+ ret = reset_control_deassert(dt->rst_reg);
+ if (ret)
+ goto out_disable_clk_ecc;
+
+ ret = reset_control_deassert(dt->rst);
+ if (ret)
+ goto out_assert_rst_reg;
+
+ /*
+ * When the reset is deasserted, the initialization sequence is kicked
+ * (bootstrap process). The driver must wait until it finished.
+ * Otherwise, it will result in unpredictable behavior.
+ */
+ usleep_range(200, 1000);
+
+ ret = denali_init(denali);
+ if (ret)
+ goto out_assert_rst;
+
+ for_each_child_of_node(dev->of_node, np) {
+ ret = denali_dt_chip_init(denali, np);
+ if (ret) {
+ of_node_put(np);
+ goto out_remove_denali;
+ }
+ }
+
+ platform_set_drvdata(pdev, dt);
+
+ return 0;
+
+out_remove_denali:
+ denali_remove(denali);
+out_assert_rst:
+ reset_control_assert(dt->rst);
+out_assert_rst_reg:
+ reset_control_assert(dt->rst_reg);
+out_disable_clk_ecc:
+ clk_disable_unprepare(dt->clk_ecc);
+out_disable_clk_x:
+ clk_disable_unprepare(dt->clk_x);
+out_disable_clk:
+ clk_disable_unprepare(dt->clk);
+
+ return ret;
+}
+
+static void denali_dt_remove(struct platform_device *pdev)
+{
+ struct denali_dt *dt = platform_get_drvdata(pdev);
+
+ denali_remove(&dt->controller);
+ reset_control_assert(dt->rst);
+ reset_control_assert(dt->rst_reg);
+ clk_disable_unprepare(dt->clk_ecc);
+ clk_disable_unprepare(dt->clk_x);
+ clk_disable_unprepare(dt->clk);
+}
+
+static struct platform_driver denali_dt_driver = {
+ .probe = denali_dt_probe,
+ .remove_new = denali_dt_remove,
+ .driver = {
+ .name = "denali-nand-dt",
+ .of_match_table = denali_nand_dt_ids,
+ },
+};
+module_platform_driver(denali_dt_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jamie Iles");
+MODULE_DESCRIPTION("DT driver for Denali NAND controller");
diff --git a/drivers/mtd/nand/raw/denali_pci.c b/drivers/mtd/nand/raw/denali_pci.c
new file mode 100644
index 0000000000..de7e722d38
--- /dev/null
+++ b/drivers/mtd/nand/raw/denali_pci.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright © 2009-2010, Intel Corporation and its suppliers.
+ */
+
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "denali.h"
+
+#define DENALI_NAND_NAME "denali-nand-pci"
+
+#define INTEL_CE4100 1
+#define INTEL_MRST 2
+
+/* List of platforms this NAND controller has be integrated into */
+static const struct pci_device_id denali_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
+ { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
+ { /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, denali_pci_ids);
+
+NAND_ECC_CAPS_SINGLE(denali_pci_ecc_caps, denali_calc_ecc_bytes, 512, 8, 15);
+
+static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ resource_size_t csr_base, mem_base;
+ unsigned long csr_len, mem_len;
+ struct denali_controller *denali;
+ struct denali_chip *dchip;
+ int nsels, ret, i;
+
+ denali = devm_kzalloc(&dev->dev, sizeof(*denali), GFP_KERNEL);
+ if (!denali)
+ return -ENOMEM;
+
+ ret = pcim_enable_device(dev);
+ if (ret) {
+ dev_err(&dev->dev, "Spectra: pci_enable_device failed.\n");
+ return ret;
+ }
+
+ if (id->driver_data == INTEL_CE4100) {
+ mem_base = pci_resource_start(dev, 0);
+ mem_len = pci_resource_len(dev, 1);
+ csr_base = pci_resource_start(dev, 1);
+ csr_len = pci_resource_len(dev, 1);
+ } else {
+ csr_base = pci_resource_start(dev, 0);
+ csr_len = pci_resource_len(dev, 0);
+ mem_base = pci_resource_start(dev, 1);
+ mem_len = pci_resource_len(dev, 1);
+ if (!mem_len) {
+ mem_base = csr_base + csr_len;
+ mem_len = csr_len;
+ }
+ }
+
+ pci_set_master(dev);
+ denali->dev = &dev->dev;
+ denali->irq = dev->irq;
+ denali->ecc_caps = &denali_pci_ecc_caps;
+ denali->clk_rate = 50000000; /* 50 MHz */
+ denali->clk_x_rate = 200000000; /* 200 MHz */
+
+ ret = pci_request_regions(dev, DENALI_NAND_NAME);
+ if (ret) {
+ dev_err(&dev->dev, "Spectra: Unable to request memory regions\n");
+ return ret;
+ }
+
+ denali->reg = devm_ioremap(denali->dev, csr_base, csr_len);
+ if (!denali->reg) {
+ dev_err(&dev->dev, "Spectra: Unable to remap memory region\n");
+ return -ENOMEM;
+ }
+
+ denali->host = devm_ioremap(denali->dev, mem_base, mem_len);
+ if (!denali->host) {
+ dev_err(&dev->dev, "Spectra: ioremap failed!");
+ return -ENOMEM;
+ }
+
+ ret = denali_init(denali);
+ if (ret)
+ return ret;
+
+ nsels = denali->nbanks;
+
+ dchip = devm_kzalloc(denali->dev, struct_size(dchip, sels, nsels),
+ GFP_KERNEL);
+ if (!dchip) {
+ ret = -ENOMEM;
+ goto out_remove_denali;
+ }
+
+ dchip->chip.base.ecc.user_conf.flags |= NAND_ECC_MAXIMIZE_STRENGTH;
+
+ dchip->nsels = nsels;
+
+ for (i = 0; i < nsels; i++)
+ dchip->sels[i].bank = i;
+
+ ret = denali_chip_init(denali, dchip);
+ if (ret)
+ goto out_remove_denali;
+
+ pci_set_drvdata(dev, denali);
+
+ return 0;
+
+out_remove_denali:
+ denali_remove(denali);
+ return ret;
+}
+
+static void denali_pci_remove(struct pci_dev *dev)
+{
+ struct denali_controller *denali = pci_get_drvdata(dev);
+
+ denali_remove(denali);
+}
+
+static struct pci_driver denali_pci_driver = {
+ .name = DENALI_NAND_NAME,
+ .id_table = denali_pci_ids,
+ .probe = denali_pci_probe,
+ .remove = denali_pci_remove,
+};
+module_pci_driver(denali_pci_driver);
+
+MODULE_DESCRIPTION("PCI driver for Denali NAND controller");
+MODULE_AUTHOR("Intel Corporation and its suppliers");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
new file mode 100644
index 0000000000..5d2ddb037a
--- /dev/null
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -0,0 +1,1579 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * (C) 2003 Red Hat, Inc.
+ * (C) 2004 Dan Brown <dan_brown@ieee.org>
+ * (C) 2004 Kalev Lember <kalev@smartlink.ee>
+ *
+ * Author: David Woodhouse <dwmw2@infradead.org>
+ * Additional Diskonchip 2000 and Millennium support by Dan Brown <dan_brown@ieee.org>
+ * Diskonchip Millennium Plus support by Kalev Lember <kalev@smartlink.ee>
+ *
+ * Error correction code lifted from the old docecc code
+ * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
+ * Copyright (C) 2000 Netgem S.A.
+ * converted to the generic Reed-Solomon library by Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Interface to generic NAND code for M-Systems DiskOnChip devices
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/rslib.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/doc2000.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/inftl.h>
+#include <linux/module.h>
+
+/* Where to look for the devices? */
+#ifndef CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS
+#define CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS 0
+#endif
+
+static unsigned long doc_locations[] __initdata = {
+#if defined (__alpha__) || defined(__i386__) || defined(__x86_64__)
+#ifdef CONFIG_MTD_NAND_DISKONCHIP_PROBE_HIGH
+ 0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000,
+ 0xfffd0000, 0xfffd2000, 0xfffd4000, 0xfffd6000,
+ 0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000,
+ 0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000,
+ 0xfffe8000, 0xfffea000, 0xfffec000, 0xfffee000,
+#else
+ 0xc8000, 0xca000, 0xcc000, 0xce000,
+ 0xd0000, 0xd2000, 0xd4000, 0xd6000,
+ 0xd8000, 0xda000, 0xdc000, 0xde000,
+ 0xe0000, 0xe2000, 0xe4000, 0xe6000,
+ 0xe8000, 0xea000, 0xec000, 0xee000,
+#endif
+#endif
+ 0xffffffff };
+
+static struct mtd_info *doclist = NULL;
+
+struct doc_priv {
+ struct nand_controller base;
+ void __iomem *virtadr;
+ unsigned long physadr;
+ u_char ChipID;
+ u_char CDSNControl;
+ int chips_per_floor; /* The number of chips detected on each floor */
+ int curfloor;
+ int curchip;
+ int mh0_page;
+ int mh1_page;
+ struct rs_control *rs_decoder;
+ struct mtd_info *nextdoc;
+ bool supports_32b_reads;
+
+ /* Handle the last stage of initialization (BBT scan, partitioning) */
+ int (*late_init)(struct mtd_info *mtd);
+};
+
+/* This is the ecc value computed by the HW ecc generator upon writing an empty
+ page, one with all 0xff for data. */
+static u_char empty_write_ecc[6] = { 0x4b, 0x00, 0xe2, 0x0e, 0x93, 0xf7 };
+
+#define INFTL_BBT_RESERVED_BLOCKS 4
+
+#define DoC_is_MillenniumPlus(doc) ((doc)->ChipID == DOC_ChipID_DocMilPlus16 || (doc)->ChipID == DOC_ChipID_DocMilPlus32)
+#define DoC_is_Millennium(doc) ((doc)->ChipID == DOC_ChipID_DocMil)
+#define DoC_is_2000(doc) ((doc)->ChipID == DOC_ChipID_Doc2k)
+
+static int debug = 0;
+module_param(debug, int, 0);
+
+static int try_dword = 1;
+module_param(try_dword, int, 0);
+
+static int no_ecc_failures = 0;
+module_param(no_ecc_failures, int, 0);
+
+static int no_autopart = 0;
+module_param(no_autopart, int, 0);
+
+static int show_firmware_partition = 0;
+module_param(show_firmware_partition, int, 0);
+
+#ifdef CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE
+static int inftl_bbt_write = 1;
+#else
+static int inftl_bbt_write = 0;
+#endif
+module_param(inftl_bbt_write, int, 0);
+
+static unsigned long doc_config_location = CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS;
+module_param(doc_config_location, ulong, 0);
+MODULE_PARM_DESC(doc_config_location, "Physical memory address at which to probe for DiskOnChip");
+
+/* Sector size for HW ECC */
+#define SECTOR_SIZE 512
+/* The sector bytes are packed into NB_DATA 10 bit words */
+#define NB_DATA (((SECTOR_SIZE + 1) * 8 + 6) / 10)
+/* Number of roots */
+#define NROOTS 4
+/* First consective root */
+#define FCR 510
+/* Number of symbols */
+#define NN 1023
+
+/*
+ * The HW decoder in the DoC ASIC's provides us a error syndrome,
+ * which we must convert to a standard syndrome usable by the generic
+ * Reed-Solomon library code.
+ *
+ * Fabrice Bellard figured this out in the old docecc code. I added
+ * some comments, improved a minor bit and converted it to make use
+ * of the generic Reed-Solomon library. tglx
+ */
+static int doc_ecc_decode(struct rs_control *rs, uint8_t *data, uint8_t *ecc)
+{
+ int i, j, nerr, errpos[8];
+ uint8_t parity;
+ uint16_t ds[4], s[5], tmp, errval[8], syn[4];
+ struct rs_codec *cd = rs->codec;
+
+ memset(syn, 0, sizeof(syn));
+ /* Convert the ecc bytes into words */
+ ds[0] = ((ecc[4] & 0xff) >> 0) | ((ecc[5] & 0x03) << 8);
+ ds[1] = ((ecc[5] & 0xfc) >> 2) | ((ecc[2] & 0x0f) << 6);
+ ds[2] = ((ecc[2] & 0xf0) >> 4) | ((ecc[3] & 0x3f) << 4);
+ ds[3] = ((ecc[3] & 0xc0) >> 6) | ((ecc[0] & 0xff) << 2);
+ parity = ecc[1];
+
+ /* Initialize the syndrome buffer */
+ for (i = 0; i < NROOTS; i++)
+ s[i] = ds[0];
+ /*
+ * Evaluate
+ * s[i] = ds[3]x^3 + ds[2]x^2 + ds[1]x^1 + ds[0]
+ * where x = alpha^(FCR + i)
+ */
+ for (j = 1; j < NROOTS; j++) {
+ if (ds[j] == 0)
+ continue;
+ tmp = cd->index_of[ds[j]];
+ for (i = 0; i < NROOTS; i++)
+ s[i] ^= cd->alpha_to[rs_modnn(cd, tmp + (FCR + i) * j)];
+ }
+
+ /* Calc syn[i] = s[i] / alpha^(v + i) */
+ for (i = 0; i < NROOTS; i++) {
+ if (s[i])
+ syn[i] = rs_modnn(cd, cd->index_of[s[i]] + (NN - FCR - i));
+ }
+ /* Call the decoder library */
+ nerr = decode_rs16(rs, NULL, NULL, 1019, syn, 0, errpos, 0, errval);
+
+ /* Incorrectable errors ? */
+ if (nerr < 0)
+ return nerr;
+
+ /*
+ * Correct the errors. The bitpositions are a bit of magic,
+ * but they are given by the design of the de/encoder circuit
+ * in the DoC ASIC's.
+ */
+ for (i = 0; i < nerr; i++) {
+ int index, bitpos, pos = 1015 - errpos[i];
+ uint8_t val;
+ if (pos >= NB_DATA && pos < 1019)
+ continue;
+ if (pos < NB_DATA) {
+ /* extract bit position (MSB first) */
+ pos = 10 * (NB_DATA - 1 - pos) - 6;
+ /* now correct the following 10 bits. At most two bytes
+ can be modified since pos is even */
+ index = (pos >> 3) ^ 1;
+ bitpos = pos & 7;
+ if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) {
+ val = (uint8_t) (errval[i] >> (2 + bitpos));
+ parity ^= val;
+ if (index < SECTOR_SIZE)
+ data[index] ^= val;
+ }
+ index = ((pos >> 3) + 1) ^ 1;
+ bitpos = (bitpos + 10) & 7;
+ if (bitpos == 0)
+ bitpos = 8;
+ if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) {
+ val = (uint8_t) (errval[i] << (8 - bitpos));
+ parity ^= val;
+ if (index < SECTOR_SIZE)
+ data[index] ^= val;
+ }
+ }
+ }
+ /* If the parity is wrong, no rescue possible */
+ return parity ? -EBADMSG : nerr;
+}
+
+static void DoC_Delay(struct doc_priv *doc, unsigned short cycles)
+{
+ volatile char __always_unused dummy;
+ int i;
+
+ for (i = 0; i < cycles; i++) {
+ if (DoC_is_Millennium(doc))
+ dummy = ReadDOC(doc->virtadr, NOP);
+ else if (DoC_is_MillenniumPlus(doc))
+ dummy = ReadDOC(doc->virtadr, Mplus_NOP);
+ else
+ dummy = ReadDOC(doc->virtadr, DOCStatus);
+ }
+
+}
+
+#define CDSN_CTRL_FR_B_MASK (CDSN_CTRL_FR_B0 | CDSN_CTRL_FR_B1)
+
+/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
+static int _DoC_WaitReady(struct doc_priv *doc)
+{
+ void __iomem *docptr = doc->virtadr;
+ unsigned long timeo = jiffies + (HZ * 10);
+
+ if (debug)
+ printk("_DoC_WaitReady...\n");
+ /* Out-of-line routine to wait for chip response */
+ if (DoC_is_MillenniumPlus(doc)) {
+ while ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
+ if (time_after(jiffies, timeo)) {
+ printk("_DoC_WaitReady timed out.\n");
+ return -EIO;
+ }
+ udelay(1);
+ cond_resched();
+ }
+ } else {
+ while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
+ if (time_after(jiffies, timeo)) {
+ printk("_DoC_WaitReady timed out.\n");
+ return -EIO;
+ }
+ udelay(1);
+ cond_resched();
+ }
+ }
+
+ return 0;
+}
+
+static inline int DoC_WaitReady(struct doc_priv *doc)
+{
+ void __iomem *docptr = doc->virtadr;
+ int ret = 0;
+
+ if (DoC_is_MillenniumPlus(doc)) {
+ DoC_Delay(doc, 4);
+
+ if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK)
+ /* Call the out-of-line routine to wait */
+ ret = _DoC_WaitReady(doc);
+ } else {
+ DoC_Delay(doc, 4);
+
+ if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B))
+ /* Call the out-of-line routine to wait */
+ ret = _DoC_WaitReady(doc);
+ DoC_Delay(doc, 2);
+ }
+
+ if (debug)
+ printk("DoC_WaitReady OK\n");
+ return ret;
+}
+
+static void doc2000_write_byte(struct nand_chip *this, u_char datum)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ void __iomem *docptr = doc->virtadr;
+
+ if (debug)
+ printk("write_byte %02x\n", datum);
+ WriteDOC(datum, docptr, CDSNSlowIO);
+ WriteDOC(datum, docptr, 2k_CDSN_IO);
+}
+
+static void doc2000_writebuf(struct nand_chip *this, const u_char *buf,
+ int len)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ void __iomem *docptr = doc->virtadr;
+ int i;
+ if (debug)
+ printk("writebuf of %d bytes: ", len);
+ for (i = 0; i < len; i++) {
+ WriteDOC_(buf[i], docptr, DoC_2k_CDSN_IO + i);
+ if (debug && i < 16)
+ printk("%02x ", buf[i]);
+ }
+ if (debug)
+ printk("\n");
+}
+
+static void doc2000_readbuf(struct nand_chip *this, u_char *buf, int len)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ void __iomem *docptr = doc->virtadr;
+ u32 *buf32 = (u32 *)buf;
+ int i;
+
+ if (debug)
+ printk("readbuf of %d bytes: ", len);
+
+ if (!doc->supports_32b_reads ||
+ ((((unsigned long)buf) | len) & 3)) {
+ for (i = 0; i < len; i++)
+ buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i);
+ } else {
+ for (i = 0; i < len / 4; i++)
+ buf32[i] = readl(docptr + DoC_2k_CDSN_IO + i);
+ }
+}
+
+/*
+ * We need our own readid() here because it's called before the NAND chip
+ * has been initialized, and calling nand_op_readid() would lead to a NULL
+ * pointer exception when dereferencing the NAND timings.
+ */
+static void doc200x_readid(struct nand_chip *this, unsigned int cs, u8 *id)
+{
+ u8 addr = 0;
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READID, 0),
+ NAND_OP_ADDR(1, &addr, 50),
+ NAND_OP_8BIT_DATA_IN(2, id, 0),
+ };
+
+ struct nand_operation op = NAND_OPERATION(cs, instrs);
+
+ if (!id)
+ op.ninstrs--;
+
+ this->controller->ops->exec_op(this, &op, false);
+}
+
+static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct doc_priv *doc = nand_get_controller_data(this);
+ uint16_t ret;
+ u8 id[2];
+
+ doc200x_readid(this, nr, id);
+
+ ret = ((u16)id[0] << 8) | id[1];
+
+ if (doc->ChipID == DOC_ChipID_Doc2k && try_dword && !nr) {
+ /* First chip probe. See if we get same results by 32-bit access */
+ union {
+ uint32_t dword;
+ uint8_t byte[4];
+ } ident;
+ void __iomem *docptr = doc->virtadr;
+
+ doc200x_readid(this, nr, NULL);
+
+ ident.dword = readl(docptr + DoC_2k_CDSN_IO);
+ if (((ident.byte[0] << 8) | ident.byte[1]) == ret) {
+ pr_info("DiskOnChip 2000 responds to DWORD access\n");
+ doc->supports_32b_reads = true;
+ }
+ }
+
+ return ret;
+}
+
+static void __init doc2000_count_chips(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct doc_priv *doc = nand_get_controller_data(this);
+ uint16_t mfrid;
+ int i;
+
+ /* Max 4 chips per floor on DiskOnChip 2000 */
+ doc->chips_per_floor = 4;
+
+ /* Find out what the first chip is */
+ mfrid = doc200x_ident_chip(mtd, 0);
+
+ /* Find how many chips in each floor. */
+ for (i = 1; i < 4; i++) {
+ if (doc200x_ident_chip(mtd, i) != mfrid)
+ break;
+ }
+ doc->chips_per_floor = i;
+ pr_debug("Detected %d chips per floor.\n", i);
+}
+
+static void doc2001_write_byte(struct nand_chip *this, u_char datum)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ void __iomem *docptr = doc->virtadr;
+
+ WriteDOC(datum, docptr, CDSNSlowIO);
+ WriteDOC(datum, docptr, Mil_CDSN_IO);
+ WriteDOC(datum, docptr, WritePipeTerm);
+}
+
+static void doc2001_writebuf(struct nand_chip *this, const u_char *buf, int len)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ for (i = 0; i < len; i++)
+ WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i);
+ /* Terminate write pipeline */
+ WriteDOC(0x00, docptr, WritePipeTerm);
+}
+
+static void doc2001_readbuf(struct nand_chip *this, u_char *buf, int len)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ /* Start read pipeline */
+ ReadDOC(docptr, ReadPipeInit);
+
+ for (i = 0; i < len - 1; i++)
+ buf[i] = ReadDOC(docptr, Mil_CDSN_IO + (i & 0xff));
+
+ /* Terminate read pipeline */
+ buf[i] = ReadDOC(docptr, LastDataRead);
+}
+
+static void doc2001plus_writebuf(struct nand_chip *this, const u_char *buf, int len)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ if (debug)
+ printk("writebuf of %d bytes: ", len);
+ for (i = 0; i < len; i++) {
+ WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i);
+ if (debug && i < 16)
+ printk("%02x ", buf[i]);
+ }
+ if (debug)
+ printk("\n");
+}
+
+static void doc2001plus_readbuf(struct nand_chip *this, u_char *buf, int len)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ if (debug)
+ printk("readbuf of %d bytes: ", len);
+
+ /* Start read pipeline */
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+
+ for (i = 0; i < len - 2; i++) {
+ buf[i] = ReadDOC(docptr, Mil_CDSN_IO);
+ if (debug && i < 16)
+ printk("%02x ", buf[i]);
+ }
+
+ /* Terminate read pipeline */
+ if (len >= 2) {
+ buf[len - 2] = ReadDOC(docptr, Mplus_LastDataRead);
+ if (debug && i < 16)
+ printk("%02x ", buf[len - 2]);
+ }
+
+ buf[len - 1] = ReadDOC(docptr, Mplus_LastDataRead);
+ if (debug && i < 16)
+ printk("%02x ", buf[len - 1]);
+ if (debug)
+ printk("\n");
+}
+
+static void doc200x_write_control(struct doc_priv *doc, u8 value)
+{
+ WriteDOC(value, doc->virtadr, CDSNControl);
+ /* 11.4.3 -- 4 NOPs after CSDNControl write */
+ DoC_Delay(doc, 4);
+}
+
+static void doc200x_exec_instr(struct nand_chip *this,
+ const struct nand_op_instr *instr)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ unsigned int i;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ doc200x_write_control(doc, CDSN_CTRL_CE | CDSN_CTRL_CLE);
+ doc2000_write_byte(this, instr->ctx.cmd.opcode);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ doc200x_write_control(doc, CDSN_CTRL_CE | CDSN_CTRL_ALE);
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ u8 addr = instr->ctx.addr.addrs[i];
+
+ if (DoC_is_2000(doc))
+ doc2000_write_byte(this, addr);
+ else
+ doc2001_write_byte(this, addr);
+ }
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ doc200x_write_control(doc, CDSN_CTRL_CE);
+ if (DoC_is_2000(doc))
+ doc2000_readbuf(this, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ else
+ doc2001_readbuf(this, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ doc200x_write_control(doc, CDSN_CTRL_CE);
+ if (DoC_is_2000(doc))
+ doc2000_writebuf(this, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ else
+ doc2001_writebuf(this, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ DoC_WaitReady(doc);
+ break;
+ }
+
+ if (instr->delay_ns)
+ ndelay(instr->delay_ns);
+}
+
+static int doc200x_exec_op(struct nand_chip *this,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ unsigned int i;
+
+ if (check_only)
+ return true;
+
+ doc->curchip = op->cs % doc->chips_per_floor;
+ doc->curfloor = op->cs / doc->chips_per_floor;
+
+ WriteDOC(doc->curfloor, doc->virtadr, FloorSelect);
+ WriteDOC(doc->curchip, doc->virtadr, CDSNDeviceSelect);
+
+ /* Assert CE pin */
+ doc200x_write_control(doc, CDSN_CTRL_CE);
+
+ for (i = 0; i < op->ninstrs; i++)
+ doc200x_exec_instr(this, &op->instrs[i]);
+
+ /* De-assert CE pin */
+ doc200x_write_control(doc, 0);
+
+ return 0;
+}
+
+static void doc2001plus_write_pipe_term(struct doc_priv *doc)
+{
+ WriteDOC(0x00, doc->virtadr, Mplus_WritePipeTerm);
+ WriteDOC(0x00, doc->virtadr, Mplus_WritePipeTerm);
+}
+
+static void doc2001plus_exec_instr(struct nand_chip *this,
+ const struct nand_op_instr *instr)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ unsigned int i;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ WriteDOC(instr->ctx.cmd.opcode, doc->virtadr, Mplus_FlashCmd);
+ doc2001plus_write_pipe_term(doc);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ u8 addr = instr->ctx.addr.addrs[i];
+
+ WriteDOC(addr, doc->virtadr, Mplus_FlashAddress);
+ }
+ doc2001plus_write_pipe_term(doc);
+ /* deassert ALE */
+ WriteDOC(0, doc->virtadr, Mplus_FlashControl);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ doc2001plus_readbuf(this, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ doc2001plus_writebuf(this, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ doc2001plus_write_pipe_term(doc);
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ DoC_WaitReady(doc);
+ break;
+ }
+
+ if (instr->delay_ns)
+ ndelay(instr->delay_ns);
+}
+
+static int doc2001plus_exec_op(struct nand_chip *this,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ unsigned int i;
+
+ if (check_only)
+ return true;
+
+ doc->curchip = op->cs % doc->chips_per_floor;
+ doc->curfloor = op->cs / doc->chips_per_floor;
+
+ /* Assert ChipEnable and deassert WriteProtect */
+ WriteDOC(DOC_FLASH_CE, doc->virtadr, Mplus_FlashSelect);
+
+ for (i = 0; i < op->ninstrs; i++)
+ doc2001plus_exec_instr(this, &op->instrs[i]);
+
+ /* De-assert ChipEnable */
+ WriteDOC(0, doc->virtadr, Mplus_FlashSelect);
+
+ return 0;
+}
+
+static void doc200x_enable_hwecc(struct nand_chip *this, int mode)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ void __iomem *docptr = doc->virtadr;
+
+ /* Prime the ECC engine */
+ switch (mode) {
+ case NAND_ECC_READ:
+ WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
+ WriteDOC(DOC_ECC_EN, docptr, ECCConf);
+ break;
+ case NAND_ECC_WRITE:
+ WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
+ WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
+ break;
+ }
+}
+
+static void doc2001plus_enable_hwecc(struct nand_chip *this, int mode)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ void __iomem *docptr = doc->virtadr;
+
+ /* Prime the ECC engine */
+ switch (mode) {
+ case NAND_ECC_READ:
+ WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
+ WriteDOC(DOC_ECC_EN, docptr, Mplus_ECCConf);
+ break;
+ case NAND_ECC_WRITE:
+ WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
+ WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, Mplus_ECCConf);
+ break;
+ }
+}
+
+/* This code is only called on write */
+static int doc200x_calculate_ecc(struct nand_chip *this, const u_char *dat,
+ unsigned char *ecc_code)
+{
+ struct doc_priv *doc = nand_get_controller_data(this);
+ void __iomem *docptr = doc->virtadr;
+ int i;
+ int __always_unused emptymatch = 1;
+
+ /* flush the pipeline */
+ if (DoC_is_2000(doc)) {
+ WriteDOC(doc->CDSNControl & ~CDSN_CTRL_FLASH_IO, docptr, CDSNControl);
+ WriteDOC(0, docptr, 2k_CDSN_IO);
+ WriteDOC(0, docptr, 2k_CDSN_IO);
+ WriteDOC(0, docptr, 2k_CDSN_IO);
+ WriteDOC(doc->CDSNControl, docptr, CDSNControl);
+ } else if (DoC_is_MillenniumPlus(doc)) {
+ WriteDOC(0, docptr, Mplus_NOP);
+ WriteDOC(0, docptr, Mplus_NOP);
+ WriteDOC(0, docptr, Mplus_NOP);
+ } else {
+ WriteDOC(0, docptr, NOP);
+ WriteDOC(0, docptr, NOP);
+ WriteDOC(0, docptr, NOP);
+ }
+
+ for (i = 0; i < 6; i++) {
+ if (DoC_is_MillenniumPlus(doc))
+ ecc_code[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i);
+ else
+ ecc_code[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
+ if (ecc_code[i] != empty_write_ecc[i])
+ emptymatch = 0;
+ }
+ if (DoC_is_MillenniumPlus(doc))
+ WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
+ else
+ WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
+#if 0
+ /* If emptymatch=1, we might have an all-0xff data buffer. Check. */
+ if (emptymatch) {
+ /* Note: this somewhat expensive test should not be triggered
+ often. It could be optimized away by examining the data in
+ the writebuf routine, and remembering the result. */
+ for (i = 0; i < 512; i++) {
+ if (dat[i] == 0xff)
+ continue;
+ emptymatch = 0;
+ break;
+ }
+ }
+ /* If emptymatch still =1, we do have an all-0xff data buffer.
+ Return all-0xff ecc value instead of the computed one, so
+ it'll look just like a freshly-erased page. */
+ if (emptymatch)
+ memset(ecc_code, 0xff, 6);
+#endif
+ return 0;
+}
+
+static int doc200x_correct_data(struct nand_chip *this, u_char *dat,
+ u_char *read_ecc, u_char *isnull)
+{
+ int i, ret = 0;
+ struct doc_priv *doc = nand_get_controller_data(this);
+ void __iomem *docptr = doc->virtadr;
+ uint8_t calc_ecc[6];
+ volatile u_char dummy;
+
+ /* flush the pipeline */
+ if (DoC_is_2000(doc)) {
+ dummy = ReadDOC(docptr, 2k_ECCStatus);
+ dummy = ReadDOC(docptr, 2k_ECCStatus);
+ dummy = ReadDOC(docptr, 2k_ECCStatus);
+ } else if (DoC_is_MillenniumPlus(doc)) {
+ dummy = ReadDOC(docptr, Mplus_ECCConf);
+ dummy = ReadDOC(docptr, Mplus_ECCConf);
+ dummy = ReadDOC(docptr, Mplus_ECCConf);
+ } else {
+ dummy = ReadDOC(docptr, ECCConf);
+ dummy = ReadDOC(docptr, ECCConf);
+ dummy = ReadDOC(docptr, ECCConf);
+ }
+
+ /* Error occurred ? */
+ if (dummy & 0x80) {
+ for (i = 0; i < 6; i++) {
+ if (DoC_is_MillenniumPlus(doc))
+ calc_ecc[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i);
+ else
+ calc_ecc[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
+ }
+
+ ret = doc_ecc_decode(doc->rs_decoder, dat, calc_ecc);
+ if (ret > 0)
+ pr_err("doc200x_correct_data corrected %d errors\n",
+ ret);
+ }
+ if (DoC_is_MillenniumPlus(doc))
+ WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
+ else
+ WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
+ if (no_ecc_failures && mtd_is_eccerr(ret)) {
+ pr_err("suppressing ECC failure\n");
+ ret = 0;
+ }
+ return ret;
+}
+
+//u_char mydatabuf[528];
+
+static int doc200x_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 0;
+ oobregion->length = 6;
+
+ return 0;
+}
+
+static int doc200x_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 1)
+ return -ERANGE;
+
+ /*
+ * The strange out-of-order free bytes definition is a (possibly
+ * unneeded) attempt to retain compatibility. It used to read:
+ * .oobfree = { {8, 8} }
+ * Since that leaves two bytes unusable, it was changed. But the
+ * following scheme might affect existing jffs2 installs by moving the
+ * cleanmarker:
+ * .oobfree = { {6, 10} }
+ * jffs2 seems to handle the above gracefully, but the current scheme
+ * seems safer. The only problem with it is that any code retrieving
+ * free bytes position must be able to handle out-of-order segments.
+ */
+ if (!section) {
+ oobregion->offset = 8;
+ oobregion->length = 8;
+ } else {
+ oobregion->offset = 6;
+ oobregion->length = 2;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops doc200x_ooblayout_ops = {
+ .ecc = doc200x_ooblayout_ecc,
+ .free = doc200x_ooblayout_free,
+};
+
+/* Find the (I)NFTL Media Header, and optionally also the mirror media header.
+ On successful return, buf will contain a copy of the media header for
+ further processing. id is the string to scan for, and will presumably be
+ either "ANAND" or "BNAND". If findmirror=1, also look for the mirror media
+ header. The page #s of the found media headers are placed in mh0_page and
+ mh1_page in the DOC private structure. */
+static int __init find_media_headers(struct mtd_info *mtd, u_char *buf, const char *id, int findmirror)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct doc_priv *doc = nand_get_controller_data(this);
+ unsigned offs;
+ int ret;
+ size_t retlen;
+
+ for (offs = 0; offs < mtd->size; offs += mtd->erasesize) {
+ ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf);
+ if (retlen != mtd->writesize)
+ continue;
+ if (ret) {
+ pr_warn("ECC error scanning DOC at 0x%x\n", offs);
+ }
+ if (memcmp(buf, id, 6))
+ continue;
+ pr_info("Found DiskOnChip %s Media Header at 0x%x\n", id, offs);
+ if (doc->mh0_page == -1) {
+ doc->mh0_page = offs >> this->page_shift;
+ if (!findmirror)
+ return 1;
+ continue;
+ }
+ doc->mh1_page = offs >> this->page_shift;
+ return 2;
+ }
+ if (doc->mh0_page == -1) {
+ pr_warn("DiskOnChip %s Media Header not found.\n", id);
+ return 0;
+ }
+ /* Only one mediaheader was found. We want buf to contain a
+ mediaheader on return, so we'll have to re-read the one we found. */
+ offs = doc->mh0_page << this->page_shift;
+ ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf);
+ if (retlen != mtd->writesize) {
+ /* Insanity. Give up. */
+ pr_err("Read DiskOnChip Media Header once, but can't reread it???\n");
+ return 0;
+ }
+ return 1;
+}
+
+static inline int __init nftl_partscan(struct mtd_info *mtd, struct mtd_partition *parts)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct doc_priv *doc = nand_get_controller_data(this);
+ struct nand_memory_organization *memorg;
+ int ret = 0;
+ u_char *buf;
+ struct NFTLMediaHeader *mh;
+ const unsigned psize = 1 << this->page_shift;
+ int numparts = 0;
+ unsigned blocks, maxblocks;
+ int offs, numheaders;
+
+ memorg = nanddev_get_memorg(&this->base);
+
+ buf = kmalloc(mtd->writesize, GFP_KERNEL);
+ if (!buf) {
+ return 0;
+ }
+ if (!(numheaders = find_media_headers(mtd, buf, "ANAND", 1)))
+ goto out;
+ mh = (struct NFTLMediaHeader *)buf;
+
+ le16_to_cpus(&mh->NumEraseUnits);
+ le16_to_cpus(&mh->FirstPhysicalEUN);
+ le32_to_cpus(&mh->FormattedSize);
+
+ pr_info(" DataOrgID = %s\n"
+ " NumEraseUnits = %d\n"
+ " FirstPhysicalEUN = %d\n"
+ " FormattedSize = %d\n"
+ " UnitSizeFactor = %d\n",
+ mh->DataOrgID, mh->NumEraseUnits,
+ mh->FirstPhysicalEUN, mh->FormattedSize,
+ mh->UnitSizeFactor);
+
+ blocks = mtd->size >> this->phys_erase_shift;
+ maxblocks = min(32768U, mtd->erasesize - psize);
+
+ if (mh->UnitSizeFactor == 0x00) {
+ /* Auto-determine UnitSizeFactor. The constraints are:
+ - There can be at most 32768 virtual blocks.
+ - There can be at most (virtual block size - page size)
+ virtual blocks (because MediaHeader+BBT must fit in 1).
+ */
+ mh->UnitSizeFactor = 0xff;
+ while (blocks > maxblocks) {
+ blocks >>= 1;
+ maxblocks = min(32768U, (maxblocks << 1) + psize);
+ mh->UnitSizeFactor--;
+ }
+ pr_warn("UnitSizeFactor=0x00 detected. Correct value is assumed to be 0x%02x.\n", mh->UnitSizeFactor);
+ }
+
+ /* NOTE: The lines below modify internal variables of the NAND and MTD
+ layers; variables with have already been configured by nand_scan.
+ Unfortunately, we didn't know before this point what these values
+ should be. Thus, this code is somewhat dependent on the exact
+ implementation of the NAND layer. */
+ if (mh->UnitSizeFactor != 0xff) {
+ this->bbt_erase_shift += (0xff - mh->UnitSizeFactor);
+ memorg->pages_per_eraseblock <<= (0xff - mh->UnitSizeFactor);
+ mtd->erasesize <<= (0xff - mh->UnitSizeFactor);
+ pr_info("Setting virtual erase size to %d\n", mtd->erasesize);
+ blocks = mtd->size >> this->bbt_erase_shift;
+ maxblocks = min(32768U, mtd->erasesize - psize);
+ }
+
+ if (blocks > maxblocks) {
+ pr_err("UnitSizeFactor of 0x%02x is inconsistent with device size. Aborting.\n", mh->UnitSizeFactor);
+ goto out;
+ }
+
+ /* Skip past the media headers. */
+ offs = max(doc->mh0_page, doc->mh1_page);
+ offs <<= this->page_shift;
+ offs += mtd->erasesize;
+
+ if (show_firmware_partition == 1) {
+ parts[0].name = " DiskOnChip Firmware / Media Header partition";
+ parts[0].offset = 0;
+ parts[0].size = offs;
+ numparts = 1;
+ }
+
+ parts[numparts].name = " DiskOnChip BDTL partition";
+ parts[numparts].offset = offs;
+ parts[numparts].size = (mh->NumEraseUnits - numheaders) << this->bbt_erase_shift;
+
+ offs += parts[numparts].size;
+ numparts++;
+
+ if (offs < mtd->size) {
+ parts[numparts].name = " DiskOnChip Remainder partition";
+ parts[numparts].offset = offs;
+ parts[numparts].size = mtd->size - offs;
+ numparts++;
+ }
+
+ ret = numparts;
+ out:
+ kfree(buf);
+ return ret;
+}
+
+/* This is a stripped-down copy of the code in inftlmount.c */
+static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partition *parts)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct doc_priv *doc = nand_get_controller_data(this);
+ int ret = 0;
+ u_char *buf;
+ struct INFTLMediaHeader *mh;
+ struct INFTLPartition *ip;
+ int numparts = 0;
+ int blocks;
+ int vshift, lastvunit = 0;
+ int i;
+ int end = mtd->size;
+
+ if (inftl_bbt_write)
+ end -= (INFTL_BBT_RESERVED_BLOCKS << this->phys_erase_shift);
+
+ buf = kmalloc(mtd->writesize, GFP_KERNEL);
+ if (!buf) {
+ return 0;
+ }
+
+ if (!find_media_headers(mtd, buf, "BNAND", 0))
+ goto out;
+ doc->mh1_page = doc->mh0_page + (4096 >> this->page_shift);
+ mh = (struct INFTLMediaHeader *)buf;
+
+ le32_to_cpus(&mh->NoOfBootImageBlocks);
+ le32_to_cpus(&mh->NoOfBinaryPartitions);
+ le32_to_cpus(&mh->NoOfBDTLPartitions);
+ le32_to_cpus(&mh->BlockMultiplierBits);
+ le32_to_cpus(&mh->FormatFlags);
+ le32_to_cpus(&mh->PercentUsed);
+
+ pr_info(" bootRecordID = %s\n"
+ " NoOfBootImageBlocks = %d\n"
+ " NoOfBinaryPartitions = %d\n"
+ " NoOfBDTLPartitions = %d\n"
+ " BlockMultiplierBits = %d\n"
+ " FormatFlgs = %d\n"
+ " OsakVersion = %d.%d.%d.%d\n"
+ " PercentUsed = %d\n",
+ mh->bootRecordID, mh->NoOfBootImageBlocks,
+ mh->NoOfBinaryPartitions,
+ mh->NoOfBDTLPartitions,
+ mh->BlockMultiplierBits, mh->FormatFlags,
+ ((unsigned char *) &mh->OsakVersion)[0] & 0xf,
+ ((unsigned char *) &mh->OsakVersion)[1] & 0xf,
+ ((unsigned char *) &mh->OsakVersion)[2] & 0xf,
+ ((unsigned char *) &mh->OsakVersion)[3] & 0xf,
+ mh->PercentUsed);
+
+ vshift = this->phys_erase_shift + mh->BlockMultiplierBits;
+
+ blocks = mtd->size >> vshift;
+ if (blocks > 32768) {
+ pr_err("BlockMultiplierBits=%d is inconsistent with device size. Aborting.\n", mh->BlockMultiplierBits);
+ goto out;
+ }
+
+ blocks = doc->chips_per_floor << (this->chip_shift - this->phys_erase_shift);
+ if (inftl_bbt_write && (blocks > mtd->erasesize)) {
+ pr_err("Writeable BBTs spanning more than one erase block are not yet supported. FIX ME!\n");
+ goto out;
+ }
+
+ /* Scan the partitions */
+ for (i = 0; (i < 4); i++) {
+ ip = &(mh->Partitions[i]);
+ le32_to_cpus(&ip->virtualUnits);
+ le32_to_cpus(&ip->firstUnit);
+ le32_to_cpus(&ip->lastUnit);
+ le32_to_cpus(&ip->flags);
+ le32_to_cpus(&ip->spareUnits);
+ le32_to_cpus(&ip->Reserved0);
+
+ pr_info(" PARTITION[%d] ->\n"
+ " virtualUnits = %d\n"
+ " firstUnit = %d\n"
+ " lastUnit = %d\n"
+ " flags = 0x%x\n"
+ " spareUnits = %d\n",
+ i, ip->virtualUnits, ip->firstUnit,
+ ip->lastUnit, ip->flags,
+ ip->spareUnits);
+
+ if ((show_firmware_partition == 1) &&
+ (i == 0) && (ip->firstUnit > 0)) {
+ parts[0].name = " DiskOnChip IPL / Media Header partition";
+ parts[0].offset = 0;
+ parts[0].size = mtd->erasesize * ip->firstUnit;
+ numparts = 1;
+ }
+
+ if (ip->flags & INFTL_BINARY)
+ parts[numparts].name = " DiskOnChip BDK partition";
+ else
+ parts[numparts].name = " DiskOnChip BDTL partition";
+ parts[numparts].offset = ip->firstUnit << vshift;
+ parts[numparts].size = (1 + ip->lastUnit - ip->firstUnit) << vshift;
+ numparts++;
+ if (ip->lastUnit > lastvunit)
+ lastvunit = ip->lastUnit;
+ if (ip->flags & INFTL_LAST)
+ break;
+ }
+ lastvunit++;
+ if ((lastvunit << vshift) < end) {
+ parts[numparts].name = " DiskOnChip Remainder partition";
+ parts[numparts].offset = lastvunit << vshift;
+ parts[numparts].size = end - parts[numparts].offset;
+ numparts++;
+ }
+ ret = numparts;
+ out:
+ kfree(buf);
+ return ret;
+}
+
+static int __init nftl_scan_bbt(struct mtd_info *mtd)
+{
+ int ret, numparts;
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct doc_priv *doc = nand_get_controller_data(this);
+ struct mtd_partition parts[2];
+
+ memset((char *)parts, 0, sizeof(parts));
+ /* On NFTL, we have to find the media headers before we can read the
+ BBTs, since they're stored in the media header eraseblocks. */
+ numparts = nftl_partscan(mtd, parts);
+ if (!numparts)
+ return -EIO;
+ this->bbt_td->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT |
+ NAND_BBT_SAVECONTENT | NAND_BBT_WRITE |
+ NAND_BBT_VERSION;
+ this->bbt_td->veroffs = 7;
+ this->bbt_td->pages[0] = doc->mh0_page + 1;
+ if (doc->mh1_page != -1) {
+ this->bbt_md->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT |
+ NAND_BBT_SAVECONTENT | NAND_BBT_WRITE |
+ NAND_BBT_VERSION;
+ this->bbt_md->veroffs = 7;
+ this->bbt_md->pages[0] = doc->mh1_page + 1;
+ } else {
+ this->bbt_md = NULL;
+ }
+
+ ret = nand_create_bbt(this);
+ if (ret)
+ return ret;
+
+ return mtd_device_register(mtd, parts, no_autopart ? 0 : numparts);
+}
+
+static int __init inftl_scan_bbt(struct mtd_info *mtd)
+{
+ int ret, numparts;
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct doc_priv *doc = nand_get_controller_data(this);
+ struct mtd_partition parts[5];
+
+ if (nanddev_ntargets(&this->base) > doc->chips_per_floor) {
+ pr_err("Multi-floor INFTL devices not yet supported.\n");
+ return -EIO;
+ }
+
+ if (DoC_is_MillenniumPlus(doc)) {
+ this->bbt_td->options = NAND_BBT_2BIT | NAND_BBT_ABSPAGE;
+ if (inftl_bbt_write)
+ this->bbt_td->options |= NAND_BBT_WRITE;
+ this->bbt_td->pages[0] = 2;
+ this->bbt_md = NULL;
+ } else {
+ this->bbt_td->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | NAND_BBT_VERSION;
+ if (inftl_bbt_write)
+ this->bbt_td->options |= NAND_BBT_WRITE;
+ this->bbt_td->offs = 8;
+ this->bbt_td->len = 8;
+ this->bbt_td->veroffs = 7;
+ this->bbt_td->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
+ this->bbt_td->reserved_block_code = 0x01;
+ this->bbt_td->pattern = "MSYS_BBT";
+
+ this->bbt_md->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | NAND_BBT_VERSION;
+ if (inftl_bbt_write)
+ this->bbt_md->options |= NAND_BBT_WRITE;
+ this->bbt_md->offs = 8;
+ this->bbt_md->len = 8;
+ this->bbt_md->veroffs = 7;
+ this->bbt_md->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
+ this->bbt_md->reserved_block_code = 0x01;
+ this->bbt_md->pattern = "TBB_SYSM";
+ }
+
+ ret = nand_create_bbt(this);
+ if (ret)
+ return ret;
+
+ memset((char *)parts, 0, sizeof(parts));
+ numparts = inftl_partscan(mtd, parts);
+ /* At least for now, require the INFTL Media Header. We could probably
+ do without it for non-INFTL use, since all it gives us is
+ autopartitioning, but I want to give it more thought. */
+ if (!numparts)
+ return -EIO;
+ return mtd_device_register(mtd, parts, no_autopart ? 0 : numparts);
+}
+
+static inline int __init doc2000_init(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct doc_priv *doc = nand_get_controller_data(this);
+
+ doc->late_init = nftl_scan_bbt;
+
+ doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO;
+ doc2000_count_chips(mtd);
+ mtd->name = "DiskOnChip 2000 (NFTL Model)";
+ return (4 * doc->chips_per_floor);
+}
+
+static inline int __init doc2001_init(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct doc_priv *doc = nand_get_controller_data(this);
+
+ ReadDOC(doc->virtadr, ChipID);
+ ReadDOC(doc->virtadr, ChipID);
+ ReadDOC(doc->virtadr, ChipID);
+ if (ReadDOC(doc->virtadr, ChipID) != DOC_ChipID_DocMil) {
+ /* It's not a Millennium; it's one of the newer
+ DiskOnChip 2000 units with a similar ASIC.
+ Treat it like a Millennium, except that it
+ can have multiple chips. */
+ doc2000_count_chips(mtd);
+ mtd->name = "DiskOnChip 2000 (INFTL Model)";
+ doc->late_init = inftl_scan_bbt;
+ return (4 * doc->chips_per_floor);
+ } else {
+ /* Bog-standard Millennium */
+ doc->chips_per_floor = 1;
+ mtd->name = "DiskOnChip Millennium";
+ doc->late_init = nftl_scan_bbt;
+ return 1;
+ }
+}
+
+static inline int __init doc2001plus_init(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct doc_priv *doc = nand_get_controller_data(this);
+
+ doc->late_init = inftl_scan_bbt;
+ this->ecc.hwctl = doc2001plus_enable_hwecc;
+
+ doc->chips_per_floor = 1;
+ mtd->name = "DiskOnChip Millennium Plus";
+
+ return 1;
+}
+
+static int doc200x_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 6;
+ chip->ecc.strength = 2;
+ chip->ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
+ chip->ecc.hwctl = doc200x_enable_hwecc;
+ chip->ecc.calculate = doc200x_calculate_ecc;
+ chip->ecc.correct = doc200x_correct_data;
+
+ return 0;
+}
+
+static const struct nand_controller_ops doc200x_ops = {
+ .exec_op = doc200x_exec_op,
+ .attach_chip = doc200x_attach_chip,
+};
+
+static const struct nand_controller_ops doc2001plus_ops = {
+ .exec_op = doc2001plus_exec_op,
+ .attach_chip = doc200x_attach_chip,
+};
+
+static int __init doc_probe(unsigned long physadr)
+{
+ struct nand_chip *nand = NULL;
+ struct doc_priv *doc = NULL;
+ unsigned char ChipID;
+ struct mtd_info *mtd;
+ void __iomem *virtadr;
+ unsigned char save_control;
+ unsigned char tmp, tmpb, tmpc;
+ int reg, len, numchips;
+ int ret = 0;
+
+ if (!request_mem_region(physadr, DOC_IOREMAP_LEN, "DiskOnChip"))
+ return -EBUSY;
+ virtadr = ioremap(physadr, DOC_IOREMAP_LEN);
+ if (!virtadr) {
+ pr_err("Diskonchip ioremap failed: 0x%x bytes at 0x%lx\n",
+ DOC_IOREMAP_LEN, physadr);
+ ret = -EIO;
+ goto error_ioremap;
+ }
+
+ /* It's not possible to cleanly detect the DiskOnChip - the
+ * bootup procedure will put the device into reset mode, and
+ * it's not possible to talk to it without actually writing
+ * to the DOCControl register. So we store the current contents
+ * of the DOCControl register's location, in case we later decide
+ * that it's not a DiskOnChip, and want to put it back how we
+ * found it.
+ */
+ save_control = ReadDOC(virtadr, DOCControl);
+
+ /* Reset the DiskOnChip ASIC */
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, virtadr, DOCControl);
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, virtadr, DOCControl);
+
+ /* Enable the DiskOnChip ASIC */
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, virtadr, DOCControl);
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, virtadr, DOCControl);
+
+ ChipID = ReadDOC(virtadr, ChipID);
+
+ switch (ChipID) {
+ case DOC_ChipID_Doc2k:
+ reg = DoC_2k_ECCStatus;
+ break;
+ case DOC_ChipID_DocMil:
+ reg = DoC_ECCConf;
+ break;
+ case DOC_ChipID_DocMilPlus16:
+ case DOC_ChipID_DocMilPlus32:
+ case 0:
+ /* Possible Millennium Plus, need to do more checks */
+ /* Possibly release from power down mode */
+ for (tmp = 0; (tmp < 4); tmp++)
+ ReadDOC(virtadr, Mplus_Power);
+
+ /* Reset the Millennium Plus ASIC */
+ tmp = DOC_MODE_RESET | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | DOC_MODE_BDECT;
+ WriteDOC(tmp, virtadr, Mplus_DOCControl);
+ WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
+
+ usleep_range(1000, 2000);
+ /* Enable the Millennium Plus ASIC */
+ tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | DOC_MODE_BDECT;
+ WriteDOC(tmp, virtadr, Mplus_DOCControl);
+ WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
+ usleep_range(1000, 2000);
+
+ ChipID = ReadDOC(virtadr, ChipID);
+
+ switch (ChipID) {
+ case DOC_ChipID_DocMilPlus16:
+ reg = DoC_Mplus_Toggle;
+ break;
+ case DOC_ChipID_DocMilPlus32:
+ pr_err("DiskOnChip Millennium Plus 32MB is not supported, ignoring.\n");
+ fallthrough;
+ default:
+ ret = -ENODEV;
+ goto notfound;
+ }
+ break;
+
+ default:
+ ret = -ENODEV;
+ goto notfound;
+ }
+ /* Check the TOGGLE bit in the ECC register */
+ tmp = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
+ tmpb = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
+ tmpc = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
+ if ((tmp == tmpb) || (tmp != tmpc)) {
+ pr_warn("Possible DiskOnChip at 0x%lx failed TOGGLE test, dropping.\n", physadr);
+ ret = -ENODEV;
+ goto notfound;
+ }
+
+ for (mtd = doclist; mtd; mtd = doc->nextdoc) {
+ unsigned char oldval;
+ unsigned char newval;
+ nand = mtd_to_nand(mtd);
+ doc = nand_get_controller_data(nand);
+ /* Use the alias resolution register to determine if this is
+ in fact the same DOC aliased to a new address. If writes
+ to one chip's alias resolution register change the value on
+ the other chip, they're the same chip. */
+ if (ChipID == DOC_ChipID_DocMilPlus16) {
+ oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution);
+ newval = ReadDOC(virtadr, Mplus_AliasResolution);
+ } else {
+ oldval = ReadDOC(doc->virtadr, AliasResolution);
+ newval = ReadDOC(virtadr, AliasResolution);
+ }
+ if (oldval != newval)
+ continue;
+ if (ChipID == DOC_ChipID_DocMilPlus16) {
+ WriteDOC(~newval, virtadr, Mplus_AliasResolution);
+ oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution);
+ WriteDOC(newval, virtadr, Mplus_AliasResolution); // restore it
+ } else {
+ WriteDOC(~newval, virtadr, AliasResolution);
+ oldval = ReadDOC(doc->virtadr, AliasResolution);
+ WriteDOC(newval, virtadr, AliasResolution); // restore it
+ }
+ newval = ~newval;
+ if (oldval == newval) {
+ pr_debug("Found alias of DOC at 0x%lx to 0x%lx\n",
+ doc->physadr, physadr);
+ goto notfound;
+ }
+ }
+
+ pr_notice("DiskOnChip found at 0x%lx\n", physadr);
+
+ len = sizeof(struct nand_chip) + sizeof(struct doc_priv) +
+ (2 * sizeof(struct nand_bbt_descr));
+ nand = kzalloc(len, GFP_KERNEL);
+ if (!nand) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ /*
+ * Allocate a RS codec instance
+ *
+ * Symbolsize is 10 (bits)
+ * Primitve polynomial is x^10+x^3+1
+ * First consecutive root is 510
+ * Primitve element to generate roots = 1
+ * Generator polinomial degree = 4
+ */
+ doc = (struct doc_priv *) (nand + 1);
+ doc->rs_decoder = init_rs(10, 0x409, FCR, 1, NROOTS);
+ if (!doc->rs_decoder) {
+ pr_err("DiskOnChip: Could not create a RS codec\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ nand_controller_init(&doc->base);
+ if (ChipID == DOC_ChipID_DocMilPlus16)
+ doc->base.ops = &doc2001plus_ops;
+ else
+ doc->base.ops = &doc200x_ops;
+
+ mtd = nand_to_mtd(nand);
+ nand->bbt_td = (struct nand_bbt_descr *) (doc + 1);
+ nand->bbt_md = nand->bbt_td + 1;
+
+ mtd->owner = THIS_MODULE;
+ mtd_set_ooblayout(mtd, &doc200x_ooblayout_ops);
+
+ nand->controller = &doc->base;
+ nand_set_controller_data(nand, doc);
+ nand->bbt_options = NAND_BBT_USE_FLASH;
+ /* Skip the automatic BBT scan so we can run it manually */
+ nand->options |= NAND_SKIP_BBTSCAN | NAND_NO_BBM_QUIRK;
+
+ doc->physadr = physadr;
+ doc->virtadr = virtadr;
+ doc->ChipID = ChipID;
+ doc->curfloor = -1;
+ doc->curchip = -1;
+ doc->mh0_page = -1;
+ doc->mh1_page = -1;
+ doc->nextdoc = doclist;
+
+ if (ChipID == DOC_ChipID_Doc2k)
+ numchips = doc2000_init(mtd);
+ else if (ChipID == DOC_ChipID_DocMilPlus16)
+ numchips = doc2001plus_init(mtd);
+ else
+ numchips = doc2001_init(mtd);
+
+ if ((ret = nand_scan(nand, numchips)) || (ret = doc->late_init(mtd))) {
+ /* DBB note: i believe nand_cleanup is necessary here, as
+ buffers may have been allocated in nand_base. Check with
+ Thomas. FIX ME! */
+ nand_cleanup(nand);
+ goto fail;
+ }
+
+ /* Success! */
+ doclist = mtd;
+ return 0;
+
+ notfound:
+ /* Put back the contents of the DOCControl register, in case it's not
+ actually a DiskOnChip. */
+ WriteDOC(save_control, virtadr, DOCControl);
+ fail:
+ if (doc)
+ free_rs(doc->rs_decoder);
+ kfree(nand);
+ iounmap(virtadr);
+
+error_ioremap:
+ release_mem_region(physadr, DOC_IOREMAP_LEN);
+
+ return ret;
+}
+
+static void release_nanddoc(void)
+{
+ struct mtd_info *mtd, *nextmtd;
+ struct nand_chip *nand;
+ struct doc_priv *doc;
+ int ret;
+
+ for (mtd = doclist; mtd; mtd = nextmtd) {
+ nand = mtd_to_nand(mtd);
+ doc = nand_get_controller_data(nand);
+
+ nextmtd = doc->nextdoc;
+ ret = mtd_device_unregister(mtd);
+ WARN_ON(ret);
+ nand_cleanup(nand);
+ iounmap(doc->virtadr);
+ release_mem_region(doc->physadr, DOC_IOREMAP_LEN);
+ free_rs(doc->rs_decoder);
+ kfree(nand);
+ }
+}
+
+static int __init init_nanddoc(void)
+{
+ int i, ret = 0;
+
+ if (doc_config_location) {
+ pr_info("Using configured DiskOnChip probe address 0x%lx\n",
+ doc_config_location);
+ ret = doc_probe(doc_config_location);
+ if (ret < 0)
+ return ret;
+ } else {
+ for (i = 0; (doc_locations[i] != 0xffffffff); i++) {
+ doc_probe(doc_locations[i]);
+ }
+ }
+ /* No banner message any more. Print a message if no DiskOnChip
+ found, so the user knows we at least tried. */
+ if (!doclist) {
+ pr_info("No valid DiskOnChip devices found\n");
+ ret = -ENODEV;
+ }
+ return ret;
+}
+
+static void __exit cleanup_nanddoc(void)
+{
+ /* Cleanup the nand/DoC resources */
+ release_nanddoc();
+}
+
+module_init(init_nanddoc);
+module_exit(cleanup_nanddoc);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("M-Systems DiskOnChip 2000, Millennium and Millennium Plus device driver");
diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c
new file mode 100644
index 0000000000..1e3a80f06f
--- /dev/null
+++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c
@@ -0,0 +1,1008 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Freescale Enhanced Local Bus Controller NAND driver
+ *
+ * Copyright © 2006-2007, 2010 Freescale Semiconductor
+ *
+ * Authors: Nick Spence <nick.spence@freescale.com>,
+ * Scott Wood <scottwood@freescale.com>
+ * Jack Lan <jack.lan@freescale.com>
+ * Roy Zang <tie-fei.zang@freescale.com>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+#include <asm/fsl_lbc.h>
+
+#define MAX_BANKS 8
+#define ERR_BYTE 0xFF /* Value returned for read bytes when read failed */
+#define FCM_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait for FCM */
+
+/* mtd information per set */
+
+struct fsl_elbc_mtd {
+ struct nand_chip chip;
+ struct fsl_lbc_ctrl *ctrl;
+
+ struct device *dev;
+ int bank; /* Chip select bank number */
+ u8 __iomem *vbase; /* Chip select base virtual address */
+ int page_size; /* NAND page size (0=512, 1=2048) */
+ unsigned int fmr; /* FCM Flash Mode Register value */
+};
+
+/* Freescale eLBC FCM controller information */
+
+struct fsl_elbc_fcm_ctrl {
+ struct nand_controller controller;
+ struct fsl_elbc_mtd *chips[MAX_BANKS];
+
+ u8 __iomem *addr; /* Address of assigned FCM buffer */
+ unsigned int page; /* Last page written to / read from */
+ unsigned int read_bytes; /* Number of bytes read during command */
+ unsigned int column; /* Saved column from SEQIN */
+ unsigned int index; /* Pointer to next byte to 'read' */
+ unsigned int status; /* status read from LTESR after last op */
+ unsigned int mdr; /* UPM/FCM Data Register value */
+ unsigned int use_mdr; /* Non zero if the MDR is to be set */
+ unsigned int oob; /* Non zero if operating on OOB data */
+ unsigned int counter; /* counter for the initializations */
+ unsigned int max_bitflips; /* Saved during READ0 cmd */
+};
+
+/* These map to the positions used by the FCM hardware ECC generator */
+
+static int fsl_elbc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (16 * section) + 6;
+ if (priv->fmr & FMR_ECCM)
+ oobregion->offset += 2;
+
+ oobregion->length = chip->ecc.bytes;
+
+ return 0;
+}
+
+static int fsl_elbc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+
+ if (section > chip->ecc.steps)
+ return -ERANGE;
+
+ if (!section) {
+ oobregion->offset = 0;
+ if (mtd->writesize > 512)
+ oobregion->offset++;
+ oobregion->length = (priv->fmr & FMR_ECCM) ? 7 : 5;
+ } else {
+ oobregion->offset = (16 * section) -
+ ((priv->fmr & FMR_ECCM) ? 5 : 7);
+ if (section < chip->ecc.steps)
+ oobregion->length = 13;
+ else
+ oobregion->length = mtd->oobsize - oobregion->offset;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops fsl_elbc_ooblayout_ops = {
+ .ecc = fsl_elbc_ooblayout_ecc,
+ .free = fsl_elbc_ooblayout_free,
+};
+
+/*
+ * ELBC may use HW ECC, so that OOB offsets, that NAND core uses for bbt,
+ * interfere with ECC positions, that's why we implement our own descriptors.
+ * OOB {11, 5}, works for both SP and LP chips, with ECCM = 1 and ECCM = 0.
+ */
+static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 11,
+ .len = 4,
+ .veroffs = 15,
+ .maxblocks = 4,
+ .pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 11,
+ .len = 4,
+ .veroffs = 15,
+ .maxblocks = 4,
+ .pattern = mirror_pattern,
+};
+
+/*=================================*/
+
+/*
+ * Set up the FCM hardware block and page address fields, and the fcm
+ * structure addr field to point to the correct FCM buffer in memory
+ */
+static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+ int buf_num;
+
+ elbc_fcm_ctrl->page = page_addr;
+
+ if (priv->page_size) {
+ /*
+ * large page size chip : FPAR[PI] save the lowest 6 bits,
+ * FBAR[BLK] save the other bits.
+ */
+ out_be32(&lbc->fbar, page_addr >> 6);
+ out_be32(&lbc->fpar,
+ ((page_addr << FPAR_LP_PI_SHIFT) & FPAR_LP_PI) |
+ (oob ? FPAR_LP_MS : 0) | column);
+ buf_num = (page_addr & 1) << 2;
+ } else {
+ /*
+ * small page size chip : FPAR[PI] save the lowest 5 bits,
+ * FBAR[BLK] save the other bits.
+ */
+ out_be32(&lbc->fbar, page_addr >> 5);
+ out_be32(&lbc->fpar,
+ ((page_addr << FPAR_SP_PI_SHIFT) & FPAR_SP_PI) |
+ (oob ? FPAR_SP_MS : 0) | column);
+ buf_num = page_addr & 7;
+ }
+
+ elbc_fcm_ctrl->addr = priv->vbase + buf_num * 1024;
+ elbc_fcm_ctrl->index = column;
+
+ /* for OOB data point to the second half of the buffer */
+ if (oob)
+ elbc_fcm_ctrl->index += priv->page_size ? 2048 : 512;
+
+ dev_vdbg(priv->dev, "set_addr: bank=%d, "
+ "elbc_fcm_ctrl->addr=0x%p (0x%p), "
+ "index %x, pes %d ps %d\n",
+ buf_num, elbc_fcm_ctrl->addr, priv->vbase,
+ elbc_fcm_ctrl->index,
+ chip->phys_erase_shift, chip->page_shift);
+}
+
+/*
+ * execute FCM command and wait for it to complete
+ */
+static int fsl_elbc_run_command(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+
+ /* Setup the FMR[OP] to execute without write protection */
+ out_be32(&lbc->fmr, priv->fmr | 3);
+ if (elbc_fcm_ctrl->use_mdr)
+ out_be32(&lbc->mdr, elbc_fcm_ctrl->mdr);
+
+ dev_vdbg(priv->dev,
+ "fsl_elbc_run_command: fmr=%08x fir=%08x fcr=%08x\n",
+ in_be32(&lbc->fmr), in_be32(&lbc->fir), in_be32(&lbc->fcr));
+ dev_vdbg(priv->dev,
+ "fsl_elbc_run_command: fbar=%08x fpar=%08x "
+ "fbcr=%08x bank=%d\n",
+ in_be32(&lbc->fbar), in_be32(&lbc->fpar),
+ in_be32(&lbc->fbcr), priv->bank);
+
+ ctrl->irq_status = 0;
+ /* execute special operation */
+ out_be32(&lbc->lsor, priv->bank);
+
+ /* wait for FCM complete flag or timeout */
+ wait_event_timeout(ctrl->irq_wait, ctrl->irq_status,
+ FCM_TIMEOUT_MSECS * HZ/1000);
+ elbc_fcm_ctrl->status = ctrl->irq_status;
+ /* store mdr value in case it was needed */
+ if (elbc_fcm_ctrl->use_mdr)
+ elbc_fcm_ctrl->mdr = in_be32(&lbc->mdr);
+
+ elbc_fcm_ctrl->use_mdr = 0;
+
+ if (elbc_fcm_ctrl->status != LTESR_CC) {
+ dev_info(priv->dev,
+ "command failed: fir %x fcr %x status %x mdr %x\n",
+ in_be32(&lbc->fir), in_be32(&lbc->fcr),
+ elbc_fcm_ctrl->status, elbc_fcm_ctrl->mdr);
+ return -EIO;
+ }
+
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ elbc_fcm_ctrl->max_bitflips = 0;
+
+ if (elbc_fcm_ctrl->read_bytes == mtd->writesize + mtd->oobsize) {
+ uint32_t lteccr = in_be32(&lbc->lteccr);
+ /*
+ * if command was a full page read and the ELBC
+ * has the LTECCR register, then bits 12-15 (ppc order) of
+ * LTECCR indicates which 512 byte sub-pages had fixed errors.
+ * bits 28-31 are uncorrectable errors, marked elsewhere.
+ * for small page nand only 1 bit is used.
+ * if the ELBC doesn't have the lteccr register it reads 0
+ * FIXME: 4 bits can be corrected on NANDs with 2k pages, so
+ * count the number of sub-pages with bitflips and update
+ * ecc_stats.corrected accordingly.
+ */
+ if (lteccr & 0x000F000F)
+ out_be32(&lbc->lteccr, 0x000F000F); /* clear lteccr */
+ if (lteccr & 0x000F0000) {
+ mtd->ecc_stats.corrected++;
+ elbc_fcm_ctrl->max_bitflips = 1;
+ }
+ }
+
+ return 0;
+}
+
+static void fsl_elbc_do_read(struct nand_chip *chip, int oob)
+{
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+
+ if (priv->page_size) {
+ out_be32(&lbc->fir,
+ (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CA << FIR_OP1_SHIFT) |
+ (FIR_OP_PA << FIR_OP2_SHIFT) |
+ (FIR_OP_CM1 << FIR_OP3_SHIFT) |
+ (FIR_OP_RBW << FIR_OP4_SHIFT));
+
+ out_be32(&lbc->fcr, (NAND_CMD_READ0 << FCR_CMD0_SHIFT) |
+ (NAND_CMD_READSTART << FCR_CMD1_SHIFT));
+ } else {
+ out_be32(&lbc->fir,
+ (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CA << FIR_OP1_SHIFT) |
+ (FIR_OP_PA << FIR_OP2_SHIFT) |
+ (FIR_OP_RBW << FIR_OP3_SHIFT));
+
+ if (oob)
+ out_be32(&lbc->fcr, NAND_CMD_READOOB << FCR_CMD0_SHIFT);
+ else
+ out_be32(&lbc->fcr, NAND_CMD_READ0 << FCR_CMD0_SHIFT);
+ }
+}
+
+/* cmdfunc send commands to the FCM */
+static void fsl_elbc_cmdfunc(struct nand_chip *chip, unsigned int command,
+ int column, int page_addr)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+
+ elbc_fcm_ctrl->use_mdr = 0;
+
+ /* clear the read buffer */
+ elbc_fcm_ctrl->read_bytes = 0;
+ if (command != NAND_CMD_PAGEPROG)
+ elbc_fcm_ctrl->index = 0;
+
+ switch (command) {
+ /* READ0 and READ1 read the entire buffer to use hardware ECC. */
+ case NAND_CMD_READ1:
+ column += 256;
+ fallthrough;
+ case NAND_CMD_READ0:
+ dev_dbg(priv->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_READ0, page_addr:"
+ " 0x%x, column: 0x%x.\n", page_addr, column);
+
+
+ out_be32(&lbc->fbcr, 0); /* read entire page to enable ECC */
+ set_addr(mtd, 0, page_addr, 0);
+
+ elbc_fcm_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+ elbc_fcm_ctrl->index += column;
+
+ fsl_elbc_do_read(chip, 0);
+ fsl_elbc_run_command(mtd);
+ return;
+
+ /* RNDOUT moves the pointer inside the page */
+ case NAND_CMD_RNDOUT:
+ dev_dbg(priv->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_RNDOUT, column: 0x%x.\n",
+ column);
+
+ elbc_fcm_ctrl->index = column;
+ return;
+
+ /* READOOB reads only the OOB because no ECC is performed. */
+ case NAND_CMD_READOOB:
+ dev_vdbg(priv->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_READOOB, page_addr:"
+ " 0x%x, column: 0x%x.\n", page_addr, column);
+
+ out_be32(&lbc->fbcr, mtd->oobsize - column);
+ set_addr(mtd, column, page_addr, 1);
+
+ elbc_fcm_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+
+ fsl_elbc_do_read(chip, 1);
+ fsl_elbc_run_command(mtd);
+ return;
+
+ case NAND_CMD_READID:
+ case NAND_CMD_PARAM:
+ dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD %x\n", command);
+
+ out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+ (FIR_OP_UA << FIR_OP1_SHIFT) |
+ (FIR_OP_RBW << FIR_OP2_SHIFT));
+ out_be32(&lbc->fcr, command << FCR_CMD0_SHIFT);
+ /*
+ * although currently it's 8 bytes for READID, we always read
+ * the maximum 256 bytes(for PARAM)
+ */
+ out_be32(&lbc->fbcr, 256);
+ elbc_fcm_ctrl->read_bytes = 256;
+ elbc_fcm_ctrl->use_mdr = 1;
+ elbc_fcm_ctrl->mdr = column;
+ set_addr(mtd, 0, 0, 0);
+ fsl_elbc_run_command(mtd);
+ return;
+
+ /* ERASE1 stores the block and page address */
+ case NAND_CMD_ERASE1:
+ dev_vdbg(priv->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_ERASE1, "
+ "page_addr: 0x%x.\n", page_addr);
+ set_addr(mtd, 0, page_addr, 0);
+ return;
+
+ /* ERASE2 uses the block and page address from ERASE1 */
+ case NAND_CMD_ERASE2:
+ dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_ERASE2.\n");
+
+ out_be32(&lbc->fir,
+ (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+ (FIR_OP_PA << FIR_OP1_SHIFT) |
+ (FIR_OP_CM2 << FIR_OP2_SHIFT) |
+ (FIR_OP_CW1 << FIR_OP3_SHIFT) |
+ (FIR_OP_RS << FIR_OP4_SHIFT));
+
+ out_be32(&lbc->fcr,
+ (NAND_CMD_ERASE1 << FCR_CMD0_SHIFT) |
+ (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
+ (NAND_CMD_ERASE2 << FCR_CMD2_SHIFT));
+
+ out_be32(&lbc->fbcr, 0);
+ elbc_fcm_ctrl->read_bytes = 0;
+ elbc_fcm_ctrl->use_mdr = 1;
+
+ fsl_elbc_run_command(mtd);
+ return;
+
+ /* SEQIN sets up the addr buffer and all registers except the length */
+ case NAND_CMD_SEQIN: {
+ __be32 fcr;
+ dev_vdbg(priv->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_SEQIN/PAGE_PROG, "
+ "page_addr: 0x%x, column: 0x%x.\n",
+ page_addr, column);
+
+ elbc_fcm_ctrl->column = column;
+ elbc_fcm_ctrl->use_mdr = 1;
+
+ if (column >= mtd->writesize) {
+ /* OOB area */
+ column -= mtd->writesize;
+ elbc_fcm_ctrl->oob = 1;
+ } else {
+ WARN_ON(column != 0);
+ elbc_fcm_ctrl->oob = 0;
+ }
+
+ fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
+ (NAND_CMD_SEQIN << FCR_CMD2_SHIFT) |
+ (NAND_CMD_PAGEPROG << FCR_CMD3_SHIFT);
+
+ if (priv->page_size) {
+ out_be32(&lbc->fir,
+ (FIR_OP_CM2 << FIR_OP0_SHIFT) |
+ (FIR_OP_CA << FIR_OP1_SHIFT) |
+ (FIR_OP_PA << FIR_OP2_SHIFT) |
+ (FIR_OP_WB << FIR_OP3_SHIFT) |
+ (FIR_OP_CM3 << FIR_OP4_SHIFT) |
+ (FIR_OP_CW1 << FIR_OP5_SHIFT) |
+ (FIR_OP_RS << FIR_OP6_SHIFT));
+ } else {
+ out_be32(&lbc->fir,
+ (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CM2 << FIR_OP1_SHIFT) |
+ (FIR_OP_CA << FIR_OP2_SHIFT) |
+ (FIR_OP_PA << FIR_OP3_SHIFT) |
+ (FIR_OP_WB << FIR_OP4_SHIFT) |
+ (FIR_OP_CM3 << FIR_OP5_SHIFT) |
+ (FIR_OP_CW1 << FIR_OP6_SHIFT) |
+ (FIR_OP_RS << FIR_OP7_SHIFT));
+
+ if (elbc_fcm_ctrl->oob)
+ /* OOB area --> READOOB */
+ fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT;
+ else
+ /* First 256 bytes --> READ0 */
+ fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
+ }
+
+ out_be32(&lbc->fcr, fcr);
+ set_addr(mtd, column, page_addr, elbc_fcm_ctrl->oob);
+ return;
+ }
+
+ /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
+ case NAND_CMD_PAGEPROG: {
+ dev_vdbg(priv->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG "
+ "writing %d bytes.\n", elbc_fcm_ctrl->index);
+
+ /* if the write did not start at 0 or is not a full page
+ * then set the exact length, otherwise use a full page
+ * write so the HW generates the ECC.
+ */
+ if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 ||
+ elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize)
+ out_be32(&lbc->fbcr,
+ elbc_fcm_ctrl->index - elbc_fcm_ctrl->column);
+ else
+ out_be32(&lbc->fbcr, 0);
+
+ fsl_elbc_run_command(mtd);
+ return;
+ }
+
+ /* CMD_STATUS must read the status byte while CEB is active */
+ /* Note - it does not wait for the ready line */
+ case NAND_CMD_STATUS:
+ out_be32(&lbc->fir,
+ (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+ (FIR_OP_RBW << FIR_OP1_SHIFT));
+ out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT);
+ out_be32(&lbc->fbcr, 1);
+ set_addr(mtd, 0, 0, 0);
+ elbc_fcm_ctrl->read_bytes = 1;
+
+ fsl_elbc_run_command(mtd);
+
+ /* The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+ setbits8(elbc_fcm_ctrl->addr, NAND_STATUS_WP);
+ return;
+
+ /* RESET without waiting for the ready line */
+ case NAND_CMD_RESET:
+ dev_dbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_RESET.\n");
+ out_be32(&lbc->fir, FIR_OP_CM0 << FIR_OP0_SHIFT);
+ out_be32(&lbc->fcr, NAND_CMD_RESET << FCR_CMD0_SHIFT);
+ fsl_elbc_run_command(mtd);
+ return;
+
+ default:
+ dev_err(priv->dev,
+ "fsl_elbc_cmdfunc: error, unsupported command 0x%x.\n",
+ command);
+ }
+}
+
+static void fsl_elbc_select_chip(struct nand_chip *chip, int cs)
+{
+ /* The hardware does not seem to support multiple
+ * chips per bank.
+ */
+}
+
+/*
+ * Write buf to the FCM Controller Data Buffer
+ */
+static void fsl_elbc_write_buf(struct nand_chip *chip, const u8 *buf, int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
+ unsigned int bufsize = mtd->writesize + mtd->oobsize;
+
+ if (len <= 0) {
+ dev_err(priv->dev, "write_buf of %d bytes", len);
+ elbc_fcm_ctrl->status = 0;
+ return;
+ }
+
+ if ((unsigned int)len > bufsize - elbc_fcm_ctrl->index) {
+ dev_err(priv->dev,
+ "write_buf beyond end of buffer "
+ "(%d requested, %u available)\n",
+ len, bufsize - elbc_fcm_ctrl->index);
+ len = bufsize - elbc_fcm_ctrl->index;
+ }
+
+ memcpy_toio(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], buf, len);
+ /*
+ * This is workaround for the weird elbc hangs during nand write,
+ * Scott Wood says: "...perhaps difference in how long it takes a
+ * write to make it through the localbus compared to a write to IMMR
+ * is causing problems, and sync isn't helping for some reason."
+ * Reading back the last byte helps though.
+ */
+ in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index] + len - 1);
+
+ elbc_fcm_ctrl->index += len;
+}
+
+/*
+ * read a byte from either the FCM hardware buffer if it has any data left
+ * otherwise issue a command to read a single byte.
+ */
+static u8 fsl_elbc_read_byte(struct nand_chip *chip)
+{
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
+
+ /* If there are still bytes in the FCM, then use the next byte. */
+ if (elbc_fcm_ctrl->index < elbc_fcm_ctrl->read_bytes)
+ return in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index++]);
+
+ dev_err(priv->dev, "read_byte beyond end of buffer\n");
+ return ERR_BYTE;
+}
+
+/*
+ * Read from the FCM Controller Data Buffer
+ */
+static void fsl_elbc_read_buf(struct nand_chip *chip, u8 *buf, int len)
+{
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
+ int avail;
+
+ if (len < 0)
+ return;
+
+ avail = min((unsigned int)len,
+ elbc_fcm_ctrl->read_bytes - elbc_fcm_ctrl->index);
+ memcpy_fromio(buf, &elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], avail);
+ elbc_fcm_ctrl->index += avail;
+
+ if (len > avail)
+ dev_err(priv->dev,
+ "read_buf beyond end of buffer "
+ "(%d requested, %d available)\n",
+ len, avail);
+}
+
+/* This function is called after Program and Erase Operations to
+ * check for success or failure.
+ */
+static int fsl_elbc_wait(struct nand_chip *chip)
+{
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
+
+ if (elbc_fcm_ctrl->status != LTESR_CC)
+ return NAND_STATUS_FAIL;
+
+ /* The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+ return (elbc_fcm_ctrl->mdr & 0xff) | NAND_STATUS_WP;
+}
+
+static int fsl_elbc_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
+ if (oob_required)
+ fsl_elbc_read_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ if (fsl_elbc_wait(chip) & NAND_STATUS_FAIL)
+ mtd->ecc_stats.failed++;
+
+ return elbc_fcm_ctrl->max_bitflips;
+}
+
+/* ECC will be calculated automatically, and errors will be detected in
+ * waitfunc.
+ */
+static int fsl_elbc_write_page(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
+ fsl_elbc_write_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ return nand_prog_page_end_op(chip);
+}
+
+/* ECC will be calculated automatically, and errors will be detected in
+ * waitfunc.
+ */
+static int fsl_elbc_write_subpage(struct nand_chip *chip, uint32_t offset,
+ uint32_t data_len, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ fsl_elbc_write_buf(chip, buf, mtd->writesize);
+ fsl_elbc_write_buf(chip, chip->oob_poi, mtd->oobsize);
+ return nand_prog_page_end_op(chip);
+}
+
+static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
+{
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+ struct nand_chip *chip = &priv->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ dev_dbg(priv->dev, "eLBC Set Information for bank %d\n", priv->bank);
+
+ /* Fill in fsl_elbc_mtd structure */
+ mtd->dev.parent = priv->dev;
+ nand_set_flash_node(chip, priv->dev->of_node);
+
+ /* set timeout to maximum */
+ priv->fmr = 15 << FMR_CWTO_SHIFT;
+ if (in_be32(&lbc->bank[priv->bank].or) & OR_FCM_PGS)
+ priv->fmr |= FMR_ECCM;
+
+ /* fill in nand_chip structure */
+ /* set up function call table */
+ chip->legacy.read_byte = fsl_elbc_read_byte;
+ chip->legacy.write_buf = fsl_elbc_write_buf;
+ chip->legacy.read_buf = fsl_elbc_read_buf;
+ chip->legacy.select_chip = fsl_elbc_select_chip;
+ chip->legacy.cmdfunc = fsl_elbc_cmdfunc;
+ chip->legacy.waitfunc = fsl_elbc_wait;
+ chip->legacy.set_features = nand_get_set_features_notsupp;
+ chip->legacy.get_features = nand_get_set_features_notsupp;
+
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+
+ /* set up nand options */
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+
+ chip->controller = &elbc_fcm_ctrl->controller;
+ nand_set_controller_data(chip, priv);
+
+ return 0;
+}
+
+static int fsl_elbc_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+ unsigned int al;
+ u32 br;
+
+ /*
+ * if ECC was not chosen in DT, decide whether to use HW or SW ECC from
+ * CS Base Register
+ */
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID) {
+ /* If CS Base Register selects full hardware ECC then use it */
+ if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
+ BR_DECC_CHK_GEN) {
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ } else {
+ /* otherwise fall back to default software ECC */
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+ }
+ }
+
+ switch (chip->ecc.engine_type) {
+ /* if HW ECC was chosen, setup ecc and oob layout */
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ chip->ecc.read_page = fsl_elbc_read_page;
+ chip->ecc.write_page = fsl_elbc_write_page;
+ chip->ecc.write_subpage = fsl_elbc_write_subpage;
+ mtd_set_ooblayout(mtd, &fsl_elbc_ooblayout_ops);
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
+ break;
+
+ /* if none or SW ECC was chosen, we do not need to set anything here */
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* enable/disable HW ECC checking and generating based on if HW ECC was chosen */
+ br = in_be32(&lbc->bank[priv->bank].br) & ~BR_DECC;
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
+ out_be32(&lbc->bank[priv->bank].br, br | BR_DECC_CHK_GEN);
+ else
+ out_be32(&lbc->bank[priv->bank].br, br | BR_DECC_OFF);
+
+ /* calculate FMR Address Length field */
+ al = 0;
+ if (chip->pagemask & 0xffff0000)
+ al++;
+ if (chip->pagemask & 0xff000000)
+ al++;
+
+ priv->fmr |= al << FMR_AL_SHIFT;
+
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->numchips = %d\n",
+ nanddev_ntargets(&chip->base));
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->chipsize = %lld\n",
+ nanddev_target_size(&chip->base));
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->pagemask = %8x\n",
+ chip->pagemask);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->legacy.chip_delay = %d\n",
+ chip->legacy.chip_delay);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->badblockpos = %d\n",
+ chip->badblockpos);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->chip_shift = %d\n",
+ chip->chip_shift);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->page_shift = %d\n",
+ chip->page_shift);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n",
+ chip->phys_erase_shift);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.engine_type = %d\n",
+ chip->ecc.engine_type);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.steps = %d\n",
+ chip->ecc.steps);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.bytes = %d\n",
+ chip->ecc.bytes);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.total = %d\n",
+ chip->ecc.total);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->ooblayout = %p\n",
+ mtd->ooblayout);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->flags = %08x\n", mtd->flags);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->size = %lld\n", mtd->size);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->erasesize = %d\n",
+ mtd->erasesize);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->writesize = %d\n",
+ mtd->writesize);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->oobsize = %d\n",
+ mtd->oobsize);
+
+ /* adjust Option Register and ECC to match Flash page size */
+ if (mtd->writesize == 512) {
+ priv->page_size = 0;
+ clrbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
+ } else if (mtd->writesize == 2048) {
+ priv->page_size = 1;
+ setbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
+ } else {
+ dev_err(priv->dev,
+ "fsl_elbc_init: page size %d is not supported\n",
+ mtd->writesize);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops fsl_elbc_controller_ops = {
+ .attach_chip = fsl_elbc_attach_chip,
+};
+
+static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
+{
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
+ struct mtd_info *mtd = nand_to_mtd(&priv->chip);
+
+ kfree(mtd->name);
+
+ if (priv->vbase)
+ iounmap(priv->vbase);
+
+ elbc_fcm_ctrl->chips[priv->bank] = NULL;
+ kfree(priv);
+ return 0;
+}
+
+static DEFINE_MUTEX(fsl_elbc_nand_mutex);
+
+static int fsl_elbc_nand_probe(struct platform_device *pdev)
+{
+ struct fsl_lbc_regs __iomem *lbc;
+ struct fsl_elbc_mtd *priv;
+ struct resource res;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl;
+ static const char *part_probe_types[]
+ = { "cmdlinepart", "RedBoot", "ofpart", NULL };
+ int ret;
+ int bank;
+ struct device *dev;
+ struct device_node *node = pdev->dev.of_node;
+ struct mtd_info *mtd;
+
+ if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
+ return -ENODEV;
+ lbc = fsl_lbc_ctrl_dev->regs;
+ dev = fsl_lbc_ctrl_dev->dev;
+
+ /* get, allocate and map the memory resource */
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "failed to get resource\n");
+ return ret;
+ }
+
+ /* find which chip select it is connected to */
+ for (bank = 0; bank < MAX_BANKS; bank++)
+ if ((in_be32(&lbc->bank[bank].br) & BR_V) &&
+ (in_be32(&lbc->bank[bank].br) & BR_MSEL) == BR_MS_FCM &&
+ (in_be32(&lbc->bank[bank].br) &
+ in_be32(&lbc->bank[bank].or) & BR_BA)
+ == fsl_lbc_addr(res.start))
+ break;
+
+ if (bank >= MAX_BANKS) {
+ dev_err(dev, "address did not match any chip selects\n");
+ return -ENODEV;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_lock(&fsl_elbc_nand_mutex);
+ if (!fsl_lbc_ctrl_dev->nand) {
+ elbc_fcm_ctrl = kzalloc(sizeof(*elbc_fcm_ctrl), GFP_KERNEL);
+ if (!elbc_fcm_ctrl) {
+ mutex_unlock(&fsl_elbc_nand_mutex);
+ ret = -ENOMEM;
+ goto err;
+ }
+ elbc_fcm_ctrl->counter++;
+
+ nand_controller_init(&elbc_fcm_ctrl->controller);
+ fsl_lbc_ctrl_dev->nand = elbc_fcm_ctrl;
+ } else {
+ elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
+ }
+ mutex_unlock(&fsl_elbc_nand_mutex);
+
+ elbc_fcm_ctrl->chips[bank] = priv;
+ priv->bank = bank;
+ priv->ctrl = fsl_lbc_ctrl_dev;
+ priv->dev = &pdev->dev;
+ dev_set_drvdata(priv->dev, priv);
+
+ priv->vbase = ioremap(res.start, resource_size(&res));
+ if (!priv->vbase) {
+ dev_err(dev, "failed to map chip region\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ mtd = nand_to_mtd(&priv->chip);
+ mtd->name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
+ if (!nand_to_mtd(&priv->chip)->name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = fsl_elbc_chip_init(priv);
+ if (ret)
+ goto err;
+
+ priv->chip.controller->ops = &fsl_elbc_controller_ops;
+ ret = nand_scan(&priv->chip, 1);
+ if (ret)
+ goto err;
+
+ /* First look for RedBoot table or partitions on the command
+ * line, these take precedence over device tree information */
+ ret = mtd_device_parse_register(mtd, part_probe_types, NULL, NULL, 0);
+ if (ret)
+ goto cleanup_nand;
+
+ pr_info("eLBC NAND device at 0x%llx, bank %d\n",
+ (unsigned long long)res.start, priv->bank);
+
+ return 0;
+
+cleanup_nand:
+ nand_cleanup(&priv->chip);
+err:
+ fsl_elbc_chip_remove(priv);
+
+ return ret;
+}
+
+static void fsl_elbc_nand_remove(struct platform_device *pdev)
+{
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
+ struct fsl_elbc_mtd *priv = dev_get_drvdata(&pdev->dev);
+ struct nand_chip *chip = &priv->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+
+ fsl_elbc_chip_remove(priv);
+
+ mutex_lock(&fsl_elbc_nand_mutex);
+ elbc_fcm_ctrl->counter--;
+ if (!elbc_fcm_ctrl->counter) {
+ fsl_lbc_ctrl_dev->nand = NULL;
+ kfree(elbc_fcm_ctrl);
+ }
+ mutex_unlock(&fsl_elbc_nand_mutex);
+
+}
+
+static const struct of_device_id fsl_elbc_nand_match[] = {
+ { .compatible = "fsl,elbc-fcm-nand", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, fsl_elbc_nand_match);
+
+static struct platform_driver fsl_elbc_nand_driver = {
+ .driver = {
+ .name = "fsl,elbc-fcm-nand",
+ .of_match_table = fsl_elbc_nand_match,
+ },
+ .probe = fsl_elbc_nand_probe,
+ .remove_new = fsl_elbc_nand_remove,
+};
+
+module_platform_driver(fsl_elbc_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Freescale");
+MODULE_DESCRIPTION("Freescale Enhanced Local Bus Controller MTD NAND driver");
diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c
new file mode 100644
index 0000000000..f0e2318ce0
--- /dev/null
+++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c
@@ -0,0 +1,1140 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Freescale Integrated Flash Controller NAND driver
+ *
+ * Copyright 2011-2012 Freescale Semiconductor, Inc
+ *
+ * Author: Dipen Dudhat <Dipen.Dudhat@freescale.com>
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/fsl_ifc.h>
+#include <linux/iopoll.h>
+
+#define ERR_BYTE 0xFF /* Value returned for read
+ bytes when read failed */
+#define IFC_TIMEOUT_MSECS 1000 /* Maximum timeout to wait
+ for IFC NAND Machine */
+
+struct fsl_ifc_ctrl;
+
+/* mtd information per set */
+struct fsl_ifc_mtd {
+ struct nand_chip chip;
+ struct fsl_ifc_ctrl *ctrl;
+
+ struct device *dev;
+ int bank; /* Chip select bank number */
+ unsigned int bufnum_mask; /* bufnum = page & bufnum_mask */
+ u8 __iomem *vbase; /* Chip select base virtual address */
+};
+
+/* overview of the fsl ifc controller */
+struct fsl_ifc_nand_ctrl {
+ struct nand_controller controller;
+ struct fsl_ifc_mtd *chips[FSL_IFC_BANK_COUNT];
+
+ void __iomem *addr; /* Address of assigned IFC buffer */
+ unsigned int page; /* Last page written to / read from */
+ unsigned int read_bytes;/* Number of bytes read during command */
+ unsigned int column; /* Saved column from SEQIN */
+ unsigned int index; /* Pointer to next byte to 'read' */
+ unsigned int oob; /* Non zero if operating on OOB data */
+ unsigned int eccread; /* Non zero for a full-page ECC read */
+ unsigned int counter; /* counter for the initializations */
+ unsigned int max_bitflips; /* Saved during READ0 cmd */
+};
+
+static struct fsl_ifc_nand_ctrl *ifc_nand_ctrl;
+
+/*
+ * Generic flash bbt descriptors
+ */
+static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 2, /* 0 on 8-bit small page */
+ .len = 4,
+ .veroffs = 6,
+ .maxblocks = 4,
+ .pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 2, /* 0 on 8-bit small page */
+ .len = 4,
+ .veroffs = 6,
+ .maxblocks = 4,
+ .pattern = mirror_pattern,
+};
+
+static int fsl_ifc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 8;
+ oobregion->length = chip->ecc.total;
+
+ return 0;
+}
+
+static int fsl_ifc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section > 1)
+ return -ERANGE;
+
+ if (mtd->writesize == 512 &&
+ !(chip->options & NAND_BUSWIDTH_16)) {
+ if (!section) {
+ oobregion->offset = 0;
+ oobregion->length = 5;
+ } else {
+ oobregion->offset = 6;
+ oobregion->length = 2;
+ }
+
+ return 0;
+ }
+
+ if (!section) {
+ oobregion->offset = 2;
+ oobregion->length = 6;
+ } else {
+ oobregion->offset = chip->ecc.total + 8;
+ oobregion->length = mtd->oobsize - oobregion->offset;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops fsl_ifc_ooblayout_ops = {
+ .ecc = fsl_ifc_ooblayout_ecc,
+ .free = fsl_ifc_ooblayout_free,
+};
+
+/*
+ * Set up the IFC hardware block and page address fields, and the ifc nand
+ * structure addr field to point to the correct IFC buffer in memory
+ */
+static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
+ int buf_num;
+
+ ifc_nand_ctrl->page = page_addr;
+ /* Program ROW0/COL0 */
+ ifc_out32(page_addr, &ifc->ifc_nand.row0);
+ ifc_out32((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0);
+
+ buf_num = page_addr & priv->bufnum_mask;
+
+ ifc_nand_ctrl->addr = priv->vbase + buf_num * (mtd->writesize * 2);
+ ifc_nand_ctrl->index = column;
+
+ /* for OOB data point to the second half of the buffer */
+ if (oob)
+ ifc_nand_ctrl->index += mtd->writesize;
+}
+
+/* returns nonzero if entire page is blank */
+static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl,
+ u32 eccstat, unsigned int bufnum)
+{
+ return (eccstat >> ((3 - bufnum % 4) * 8)) & 15;
+}
+
+/*
+ * execute IFC NAND command and wait for it to complete
+ */
+static void fsl_ifc_run_command(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
+ u32 eccstat;
+ int i;
+
+ /* set the chip select for NAND Transaction */
+ ifc_out32(priv->bank << IFC_NAND_CSEL_SHIFT,
+ &ifc->ifc_nand.nand_csel);
+
+ dev_vdbg(priv->dev,
+ "%s: fir0=%08x fcr0=%08x\n",
+ __func__,
+ ifc_in32(&ifc->ifc_nand.nand_fir0),
+ ifc_in32(&ifc->ifc_nand.nand_fcr0));
+
+ ctrl->nand_stat = 0;
+
+ /* start read/write seq */
+ ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
+
+ /* wait for command complete flag or timeout */
+ wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
+ msecs_to_jiffies(IFC_TIMEOUT_MSECS));
+
+ /* ctrl->nand_stat will be updated from IRQ context */
+ if (!ctrl->nand_stat)
+ dev_err(priv->dev, "Controller is not responding\n");
+ if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_FTOER)
+ dev_err(priv->dev, "NAND Flash Timeout Error\n");
+ if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_WPER)
+ dev_err(priv->dev, "NAND Flash Write Protect Error\n");
+
+ nctrl->max_bitflips = 0;
+
+ if (nctrl->eccread) {
+ int errors;
+ int bufnum = nctrl->page & priv->bufnum_mask;
+ int sector_start = bufnum * chip->ecc.steps;
+ int sector_end = sector_start + chip->ecc.steps - 1;
+ __be32 __iomem *eccstat_regs;
+
+ eccstat_regs = ifc->ifc_nand.nand_eccstat;
+ eccstat = ifc_in32(&eccstat_regs[sector_start / 4]);
+
+ for (i = sector_start; i <= sector_end; i++) {
+ if (i != sector_start && !(i % 4))
+ eccstat = ifc_in32(&eccstat_regs[i / 4]);
+
+ errors = check_read_ecc(mtd, ctrl, eccstat, i);
+
+ if (errors == 15) {
+ /*
+ * Uncorrectable error.
+ * We'll check for blank pages later.
+ *
+ * We disable ECCER reporting due to...
+ * erratum IFC-A002770 -- so report it now if we
+ * see an uncorrectable error in ECCSTAT.
+ */
+ ctrl->nand_stat |= IFC_NAND_EVTER_STAT_ECCER;
+ continue;
+ }
+
+ mtd->ecc_stats.corrected += errors;
+ nctrl->max_bitflips = max_t(unsigned int,
+ nctrl->max_bitflips,
+ errors);
+ }
+
+ nctrl->eccread = 0;
+ }
+}
+
+static void fsl_ifc_do_read(struct nand_chip *chip,
+ int oob,
+ struct mtd_info *mtd)
+{
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
+
+ /* Program FIR/IFC_NAND_FCR0 for Small/Large page */
+ if (mtd->writesize > 512) {
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(0x0, &ifc->ifc_nand.nand_fir1);
+
+ ifc_out32((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT),
+ &ifc->ifc_nand.nand_fcr0);
+ } else {
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(0x0, &ifc->ifc_nand.nand_fir1);
+
+ if (oob)
+ ifc_out32(NAND_CMD_READOOB <<
+ IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ else
+ ifc_out32(NAND_CMD_READ0 <<
+ IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ }
+}
+
+/* cmdfunc send commands to the IFC NAND Machine */
+static void fsl_ifc_cmdfunc(struct nand_chip *chip, unsigned int command,
+ int column, int page_addr) {
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
+
+ /* clear the read buffer */
+ ifc_nand_ctrl->read_bytes = 0;
+ if (command != NAND_CMD_PAGEPROG)
+ ifc_nand_ctrl->index = 0;
+
+ switch (command) {
+ /* READ0 read the entire buffer to use hardware ECC. */
+ case NAND_CMD_READ0:
+ ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
+ set_addr(mtd, 0, page_addr, 0);
+
+ ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+ ifc_nand_ctrl->index += column;
+
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
+ ifc_nand_ctrl->eccread = 1;
+
+ fsl_ifc_do_read(chip, 0, mtd);
+ fsl_ifc_run_command(mtd);
+ return;
+
+ /* READOOB reads only the OOB because no ECC is performed. */
+ case NAND_CMD_READOOB:
+ ifc_out32(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr);
+ set_addr(mtd, column, page_addr, 1);
+
+ ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+
+ fsl_ifc_do_read(chip, 1, mtd);
+ fsl_ifc_run_command(mtd);
+
+ return;
+
+ case NAND_CMD_READID:
+ case NAND_CMD_PARAM: {
+ /*
+ * For READID, read 8 bytes that are currently used.
+ * For PARAM, read all 3 copies of 256-bytes pages.
+ */
+ int len = 8;
+ int timing = IFC_FIR_OP_RB;
+ if (command == NAND_CMD_PARAM) {
+ timing = IFC_FIR_OP_RBCD;
+ len = 256 * 3;
+ }
+
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+ (timing << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(command << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ ifc_out32(column, &ifc->ifc_nand.row3);
+
+ ifc_out32(len, &ifc->ifc_nand.nand_fbcr);
+ ifc_nand_ctrl->read_bytes = len;
+
+ set_addr(mtd, 0, 0, 0);
+ fsl_ifc_run_command(mtd);
+ return;
+ }
+
+ /* ERASE1 stores the block and page address */
+ case NAND_CMD_ERASE1:
+ set_addr(mtd, 0, page_addr, 0);
+ return;
+
+ /* ERASE2 uses the block and page address from ERASE1 */
+ case NAND_CMD_ERASE2:
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+
+ ifc_out32((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT),
+ &ifc->ifc_nand.nand_fcr0);
+
+ ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
+ ifc_nand_ctrl->read_bytes = 0;
+ fsl_ifc_run_command(mtd);
+ return;
+
+ /* SEQIN sets up the addr buffer and all registers except the length */
+ case NAND_CMD_SEQIN: {
+ u32 nand_fcr0;
+ ifc_nand_ctrl->column = column;
+ ifc_nand_ctrl->oob = 0;
+
+ if (mtd->writesize > 512) {
+ nand_fcr0 =
+ (NAND_CMD_SEQIN << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_STATUS << IFC_NAND_FCR0_CMD1_SHIFT) |
+ (NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD2_SHIFT);
+
+ ifc_out32(
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(
+ (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) |
+ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP6_SHIFT) |
+ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT),
+ &ifc->ifc_nand.nand_fir1);
+ } else {
+ nand_fcr0 = ((NAND_CMD_PAGEPROG <<
+ IFC_NAND_FCR0_CMD1_SHIFT) |
+ (NAND_CMD_SEQIN <<
+ IFC_NAND_FCR0_CMD2_SHIFT) |
+ (NAND_CMD_STATUS <<
+ IFC_NAND_FCR0_CMD3_SHIFT));
+
+ ifc_out32(
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) |
+ (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) |
+ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP7_SHIFT) |
+ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT),
+ &ifc->ifc_nand.nand_fir1);
+
+ if (column >= mtd->writesize)
+ nand_fcr0 |=
+ NAND_CMD_READOOB << IFC_NAND_FCR0_CMD0_SHIFT;
+ else
+ nand_fcr0 |=
+ NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT;
+ }
+
+ if (column >= mtd->writesize) {
+ /* OOB area --> READOOB */
+ column -= mtd->writesize;
+ ifc_nand_ctrl->oob = 1;
+ }
+ ifc_out32(nand_fcr0, &ifc->ifc_nand.nand_fcr0);
+ set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob);
+ return;
+ }
+
+ /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
+ case NAND_CMD_PAGEPROG: {
+ if (ifc_nand_ctrl->oob) {
+ ifc_out32(ifc_nand_ctrl->index -
+ ifc_nand_ctrl->column,
+ &ifc->ifc_nand.nand_fbcr);
+ } else {
+ ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
+ }
+
+ fsl_ifc_run_command(mtd);
+ return;
+ }
+
+ case NAND_CMD_STATUS: {
+ void __iomem *addr;
+
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ ifc_out32(1, &ifc->ifc_nand.nand_fbcr);
+ set_addr(mtd, 0, 0, 0);
+ ifc_nand_ctrl->read_bytes = 1;
+
+ fsl_ifc_run_command(mtd);
+
+ /*
+ * The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+ addr = ifc_nand_ctrl->addr;
+ if (chip->options & NAND_BUSWIDTH_16)
+ ifc_out16(ifc_in16(addr) | (NAND_STATUS_WP), addr);
+ else
+ ifc_out8(ifc_in8(addr) | (NAND_STATUS_WP), addr);
+ return;
+ }
+
+ case NAND_CMD_RESET:
+ ifc_out32(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT,
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ fsl_ifc_run_command(mtd);
+ return;
+
+ default:
+ dev_err(priv->dev, "%s: error, unsupported command 0x%x.\n",
+ __func__, command);
+ }
+}
+
+static void fsl_ifc_select_chip(struct nand_chip *chip, int cs)
+{
+ /* The hardware does not seem to support multiple
+ * chips per bank.
+ */
+}
+
+/*
+ * Write buf to the IFC NAND Controller Data Buffer
+ */
+static void fsl_ifc_write_buf(struct nand_chip *chip, const u8 *buf, int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ unsigned int bufsize = mtd->writesize + mtd->oobsize;
+
+ if (len <= 0) {
+ dev_err(priv->dev, "%s: len %d bytes", __func__, len);
+ return;
+ }
+
+ if ((unsigned int)len > bufsize - ifc_nand_ctrl->index) {
+ dev_err(priv->dev,
+ "%s: beyond end of buffer (%d requested, %u available)\n",
+ __func__, len, bufsize - ifc_nand_ctrl->index);
+ len = bufsize - ifc_nand_ctrl->index;
+ }
+
+ memcpy_toio(ifc_nand_ctrl->addr + ifc_nand_ctrl->index, buf, len);
+ ifc_nand_ctrl->index += len;
+}
+
+/*
+ * Read a byte from either the IFC hardware buffer
+ * read function for 8-bit buswidth
+ */
+static uint8_t fsl_ifc_read_byte(struct nand_chip *chip)
+{
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ unsigned int offset;
+
+ /*
+ * If there are still bytes in the IFC buffer, then use the
+ * next byte.
+ */
+ if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
+ offset = ifc_nand_ctrl->index++;
+ return ifc_in8(ifc_nand_ctrl->addr + offset);
+ }
+
+ dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
+ return ERR_BYTE;
+}
+
+/*
+ * Read two bytes from the IFC hardware buffer
+ * read function for 16-bit buswith
+ */
+static uint8_t fsl_ifc_read_byte16(struct nand_chip *chip)
+{
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ uint16_t data;
+
+ /*
+ * If there are still bytes in the IFC buffer, then use the
+ * next byte.
+ */
+ if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
+ data = ifc_in16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index);
+ ifc_nand_ctrl->index += 2;
+ return (uint8_t) data;
+ }
+
+ dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
+ return ERR_BYTE;
+}
+
+/*
+ * Read from the IFC Controller Data Buffer
+ */
+static void fsl_ifc_read_buf(struct nand_chip *chip, u8 *buf, int len)
+{
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ int avail;
+
+ if (len < 0) {
+ dev_err(priv->dev, "%s: len %d bytes", __func__, len);
+ return;
+ }
+
+ avail = min((unsigned int)len,
+ ifc_nand_ctrl->read_bytes - ifc_nand_ctrl->index);
+ memcpy_fromio(buf, ifc_nand_ctrl->addr + ifc_nand_ctrl->index, avail);
+ ifc_nand_ctrl->index += avail;
+
+ if (len > avail)
+ dev_err(priv->dev,
+ "%s: beyond end of buffer (%d requested, %d available)\n",
+ __func__, len, avail);
+}
+
+/*
+ * This function is called after Program and Erase Operations to
+ * check for success or failure.
+ */
+static int fsl_ifc_wait(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
+ u32 nand_fsr;
+ int status;
+
+ /* Use READ_STATUS command, but wait for the device to be ready */
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ ifc_out32(1, &ifc->ifc_nand.nand_fbcr);
+ set_addr(mtd, 0, 0, 0);
+ ifc_nand_ctrl->read_bytes = 1;
+
+ fsl_ifc_run_command(mtd);
+
+ nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr);
+ status = nand_fsr >> 24;
+ /*
+ * The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+ return status | NAND_STATUS_WP;
+}
+
+/*
+ * The controller does not check for bitflips in erased pages,
+ * therefore software must check instead.
+ */
+static int check_erased_page(struct nand_chip *chip, u8 *buf)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *ecc = chip->oob_poi;
+ const int ecc_size = chip->ecc.bytes;
+ const int pkt_size = chip->ecc.size;
+ int i, res, bitflips = 0;
+ struct mtd_oob_region oobregion = { };
+
+ mtd_ooblayout_ecc(mtd, 0, &oobregion);
+ ecc += oobregion.offset;
+
+ for (i = 0; i < chip->ecc.steps; ++i) {
+ res = nand_check_erased_ecc_chunk(buf, pkt_size, ecc, ecc_size,
+ NULL, 0,
+ chip->ecc.strength);
+ if (res < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += res;
+
+ bitflips = max(res, bitflips);
+ buf += pkt_size;
+ ecc += ecc_size;
+ }
+
+ return bitflips;
+}
+
+static int fsl_ifc_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
+
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
+ if (oob_required)
+ fsl_ifc_read_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER) {
+ if (!oob_required)
+ fsl_ifc_read_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ return check_erased_page(chip, buf);
+ }
+
+ if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
+ mtd->ecc_stats.failed++;
+
+ return nctrl->max_bitflips;
+}
+
+/* ECC will be calculated automatically, and errors will be detected in
+ * waitfunc.
+ */
+static int fsl_ifc_write_page(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
+ fsl_ifc_write_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int fsl_ifc_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs;
+ u32 csor;
+
+ csor = ifc_in32(&ifc_global->csor_cs[priv->bank].csor);
+
+ /* Must also set CSOR_NAND_ECC_ENC_EN if DEC_EN set */
+ if (csor & CSOR_NAND_ECC_DEC_EN) {
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ mtd_set_ooblayout(mtd, &fsl_ifc_ooblayout_ops);
+
+ /* Hardware generates ECC per 512 Bytes */
+ chip->ecc.size = 512;
+ if ((csor & CSOR_NAND_ECC_MODE_MASK) == CSOR_NAND_ECC_MODE_4) {
+ chip->ecc.bytes = 8;
+ chip->ecc.strength = 4;
+ } else {
+ chip->ecc.bytes = 16;
+ chip->ecc.strength = 8;
+ }
+ } else {
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+ }
+
+ dev_dbg(priv->dev, "%s: nand->numchips = %d\n", __func__,
+ nanddev_ntargets(&chip->base));
+ dev_dbg(priv->dev, "%s: nand->chipsize = %lld\n", __func__,
+ nanddev_target_size(&chip->base));
+ dev_dbg(priv->dev, "%s: nand->pagemask = %8x\n", __func__,
+ chip->pagemask);
+ dev_dbg(priv->dev, "%s: nand->legacy.chip_delay = %d\n", __func__,
+ chip->legacy.chip_delay);
+ dev_dbg(priv->dev, "%s: nand->badblockpos = %d\n", __func__,
+ chip->badblockpos);
+ dev_dbg(priv->dev, "%s: nand->chip_shift = %d\n", __func__,
+ chip->chip_shift);
+ dev_dbg(priv->dev, "%s: nand->page_shift = %d\n", __func__,
+ chip->page_shift);
+ dev_dbg(priv->dev, "%s: nand->phys_erase_shift = %d\n", __func__,
+ chip->phys_erase_shift);
+ dev_dbg(priv->dev, "%s: nand->ecc.engine_type = %d\n", __func__,
+ chip->ecc.engine_type);
+ dev_dbg(priv->dev, "%s: nand->ecc.steps = %d\n", __func__,
+ chip->ecc.steps);
+ dev_dbg(priv->dev, "%s: nand->ecc.bytes = %d\n", __func__,
+ chip->ecc.bytes);
+ dev_dbg(priv->dev, "%s: nand->ecc.total = %d\n", __func__,
+ chip->ecc.total);
+ dev_dbg(priv->dev, "%s: mtd->ooblayout = %p\n", __func__,
+ mtd->ooblayout);
+ dev_dbg(priv->dev, "%s: mtd->flags = %08x\n", __func__, mtd->flags);
+ dev_dbg(priv->dev, "%s: mtd->size = %lld\n", __func__, mtd->size);
+ dev_dbg(priv->dev, "%s: mtd->erasesize = %d\n", __func__,
+ mtd->erasesize);
+ dev_dbg(priv->dev, "%s: mtd->writesize = %d\n", __func__,
+ mtd->writesize);
+ dev_dbg(priv->dev, "%s: mtd->oobsize = %d\n", __func__,
+ mtd->oobsize);
+
+ return 0;
+}
+
+static const struct nand_controller_ops fsl_ifc_controller_ops = {
+ .attach_chip = fsl_ifc_attach_chip,
+};
+
+static int fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
+{
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs;
+ struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs;
+ uint32_t csor = 0, csor_8k = 0, csor_ext = 0;
+ uint32_t cs = priv->bank;
+
+ if (ctrl->version < FSL_IFC_VERSION_1_1_0)
+ return 0;
+
+ if (ctrl->version > FSL_IFC_VERSION_1_1_0) {
+ u32 ncfgr, status;
+ int ret;
+
+ /* Trigger auto initialization */
+ ncfgr = ifc_in32(&ifc_runtime->ifc_nand.ncfgr);
+ ifc_out32(ncfgr | IFC_NAND_NCFGR_SRAM_INIT_EN, &ifc_runtime->ifc_nand.ncfgr);
+
+ /* Wait until done */
+ ret = readx_poll_timeout(ifc_in32, &ifc_runtime->ifc_nand.ncfgr,
+ status, !(status & IFC_NAND_NCFGR_SRAM_INIT_EN),
+ 10, IFC_TIMEOUT_MSECS * 1000);
+ if (ret)
+ dev_err(priv->dev, "Failed to initialize SRAM!\n");
+
+ return ret;
+ }
+
+ /* Save CSOR and CSOR_ext */
+ csor = ifc_in32(&ifc_global->csor_cs[cs].csor);
+ csor_ext = ifc_in32(&ifc_global->csor_cs[cs].csor_ext);
+
+ /* chage PageSize 8K and SpareSize 1K*/
+ csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
+ ifc_out32(csor_8k, &ifc_global->csor_cs[cs].csor);
+ ifc_out32(0x0000400, &ifc_global->csor_cs[cs].csor_ext);
+
+ /* READID */
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc_runtime->ifc_nand.nand_fir0);
+ ifc_out32(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc_runtime->ifc_nand.nand_fcr0);
+ ifc_out32(0x0, &ifc_runtime->ifc_nand.row3);
+
+ ifc_out32(0x0, &ifc_runtime->ifc_nand.nand_fbcr);
+
+ /* Program ROW0/COL0 */
+ ifc_out32(0x0, &ifc_runtime->ifc_nand.row0);
+ ifc_out32(0x0, &ifc_runtime->ifc_nand.col0);
+
+ /* set the chip select for NAND Transaction */
+ ifc_out32(cs << IFC_NAND_CSEL_SHIFT,
+ &ifc_runtime->ifc_nand.nand_csel);
+
+ /* start read seq */
+ ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT,
+ &ifc_runtime->ifc_nand.nandseq_strt);
+
+ /* wait for command complete flag or timeout */
+ wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
+ msecs_to_jiffies(IFC_TIMEOUT_MSECS));
+
+ if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC) {
+ pr_err("fsl-ifc: Failed to Initialise SRAM\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Restore CSOR and CSOR_ext */
+ ifc_out32(csor, &ifc_global->csor_cs[cs].csor);
+ ifc_out32(csor_ext, &ifc_global->csor_cs[cs].csor_ext);
+
+ return 0;
+}
+
+static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
+{
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs;
+ struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs;
+ struct nand_chip *chip = &priv->chip;
+ struct mtd_info *mtd = nand_to_mtd(&priv->chip);
+ u32 csor;
+ int ret;
+
+ /* Fill in fsl_ifc_mtd structure */
+ mtd->dev.parent = priv->dev;
+ nand_set_flash_node(chip, priv->dev->of_node);
+
+ /* fill in nand_chip structure */
+ /* set up function call table */
+ if ((ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr))
+ & CSPR_PORT_SIZE_16)
+ chip->legacy.read_byte = fsl_ifc_read_byte16;
+ else
+ chip->legacy.read_byte = fsl_ifc_read_byte;
+
+ chip->legacy.write_buf = fsl_ifc_write_buf;
+ chip->legacy.read_buf = fsl_ifc_read_buf;
+ chip->legacy.select_chip = fsl_ifc_select_chip;
+ chip->legacy.cmdfunc = fsl_ifc_cmdfunc;
+ chip->legacy.waitfunc = fsl_ifc_wait;
+ chip->legacy.set_features = nand_get_set_features_notsupp;
+ chip->legacy.get_features = nand_get_set_features_notsupp;
+
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+
+ ifc_out32(0x0, &ifc_runtime->ifc_nand.ncfgr);
+
+ /* set up nand options */
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+ chip->options = NAND_NO_SUBPAGE_WRITE;
+
+ if (ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr)
+ & CSPR_PORT_SIZE_16) {
+ chip->legacy.read_byte = fsl_ifc_read_byte16;
+ chip->options |= NAND_BUSWIDTH_16;
+ } else {
+ chip->legacy.read_byte = fsl_ifc_read_byte;
+ }
+
+ chip->controller = &ifc_nand_ctrl->controller;
+ nand_set_controller_data(chip, priv);
+
+ chip->ecc.read_page = fsl_ifc_read_page;
+ chip->ecc.write_page = fsl_ifc_write_page;
+
+ csor = ifc_in32(&ifc_global->csor_cs[priv->bank].csor);
+
+ switch (csor & CSOR_NAND_PGS_MASK) {
+ case CSOR_NAND_PGS_512:
+ if (!(chip->options & NAND_BUSWIDTH_16)) {
+ /* Avoid conflict with bad block marker */
+ bbt_main_descr.offs = 0;
+ bbt_mirror_descr.offs = 0;
+ }
+
+ priv->bufnum_mask = 15;
+ break;
+
+ case CSOR_NAND_PGS_2K:
+ priv->bufnum_mask = 3;
+ break;
+
+ case CSOR_NAND_PGS_4K:
+ priv->bufnum_mask = 1;
+ break;
+
+ case CSOR_NAND_PGS_8K:
+ priv->bufnum_mask = 0;
+ break;
+
+ default:
+ dev_err(priv->dev, "bad csor %#x: bad page size\n", csor);
+ return -ENODEV;
+ }
+
+ ret = fsl_ifc_sram_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * As IFC version 2.0.0 has 16KB of internal SRAM as compared to older
+ * versions which had 8KB. Hence bufnum mask needs to be updated.
+ */
+ if (ctrl->version >= FSL_IFC_VERSION_2_0_0)
+ priv->bufnum_mask = (priv->bufnum_mask * 2) + 1;
+
+ return 0;
+}
+
+static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
+{
+ struct mtd_info *mtd = nand_to_mtd(&priv->chip);
+
+ kfree(mtd->name);
+
+ if (priv->vbase)
+ iounmap(priv->vbase);
+
+ ifc_nand_ctrl->chips[priv->bank] = NULL;
+
+ return 0;
+}
+
+static int match_bank(struct fsl_ifc_global __iomem *ifc_global, int bank,
+ phys_addr_t addr)
+{
+ u32 cspr = ifc_in32(&ifc_global->cspr_cs[bank].cspr);
+
+ if (!(cspr & CSPR_V))
+ return 0;
+ if ((cspr & CSPR_MSEL) != CSPR_MSEL_NAND)
+ return 0;
+
+ return (cspr & CSPR_BA) == convert_ifc_address(addr);
+}
+
+static DEFINE_MUTEX(fsl_ifc_nand_mutex);
+
+static int fsl_ifc_nand_probe(struct platform_device *dev)
+{
+ struct fsl_ifc_runtime __iomem *ifc;
+ struct fsl_ifc_mtd *priv;
+ struct resource res;
+ static const char *part_probe_types[]
+ = { "cmdlinepart", "RedBoot", "ofpart", NULL };
+ int ret;
+ int bank;
+ struct device_node *node = dev->dev.of_node;
+ struct mtd_info *mtd;
+
+ if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->rregs)
+ return -ENODEV;
+ ifc = fsl_ifc_ctrl_dev->rregs;
+
+ /* get, allocate and map the memory resource */
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(&dev->dev, "%s: failed to get resource\n", __func__);
+ return ret;
+ }
+
+ /* find which chip select it is connected to */
+ for (bank = 0; bank < fsl_ifc_ctrl_dev->banks; bank++) {
+ if (match_bank(fsl_ifc_ctrl_dev->gregs, bank, res.start))
+ break;
+ }
+
+ if (bank >= fsl_ifc_ctrl_dev->banks) {
+ dev_err(&dev->dev, "%s: address did not match any chip selects\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_lock(&fsl_ifc_nand_mutex);
+ if (!fsl_ifc_ctrl_dev->nand) {
+ ifc_nand_ctrl = kzalloc(sizeof(*ifc_nand_ctrl), GFP_KERNEL);
+ if (!ifc_nand_ctrl) {
+ mutex_unlock(&fsl_ifc_nand_mutex);
+ return -ENOMEM;
+ }
+
+ ifc_nand_ctrl->read_bytes = 0;
+ ifc_nand_ctrl->index = 0;
+ ifc_nand_ctrl->addr = NULL;
+ fsl_ifc_ctrl_dev->nand = ifc_nand_ctrl;
+
+ nand_controller_init(&ifc_nand_ctrl->controller);
+ } else {
+ ifc_nand_ctrl = fsl_ifc_ctrl_dev->nand;
+ }
+ mutex_unlock(&fsl_ifc_nand_mutex);
+
+ ifc_nand_ctrl->chips[bank] = priv;
+ priv->bank = bank;
+ priv->ctrl = fsl_ifc_ctrl_dev;
+ priv->dev = &dev->dev;
+
+ priv->vbase = ioremap(res.start, resource_size(&res));
+ if (!priv->vbase) {
+ dev_err(priv->dev, "%s: failed to map chip region\n", __func__);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ dev_set_drvdata(priv->dev, priv);
+
+ ifc_out32(IFC_NAND_EVTER_EN_OPC_EN |
+ IFC_NAND_EVTER_EN_FTOER_EN |
+ IFC_NAND_EVTER_EN_WPER_EN,
+ &ifc->ifc_nand.nand_evter_en);
+
+ /* enable NAND Machine Interrupts */
+ ifc_out32(IFC_NAND_EVTER_INTR_OPCIR_EN |
+ IFC_NAND_EVTER_INTR_FTOERIR_EN |
+ IFC_NAND_EVTER_INTR_WPERIR_EN,
+ &ifc->ifc_nand.nand_evter_intr_en);
+
+ mtd = nand_to_mtd(&priv->chip);
+ mtd->name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
+ if (!mtd->name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = fsl_ifc_chip_init(priv);
+ if (ret)
+ goto err;
+
+ priv->chip.controller->ops = &fsl_ifc_controller_ops;
+ ret = nand_scan(&priv->chip, 1);
+ if (ret)
+ goto err;
+
+ /* First look for RedBoot table or partitions on the command
+ * line, these take precedence over device tree information */
+ ret = mtd_device_parse_register(mtd, part_probe_types, NULL, NULL, 0);
+ if (ret)
+ goto cleanup_nand;
+
+ dev_info(priv->dev, "IFC NAND device at 0x%llx, bank %d\n",
+ (unsigned long long)res.start, priv->bank);
+
+ return 0;
+
+cleanup_nand:
+ nand_cleanup(&priv->chip);
+err:
+ fsl_ifc_chip_remove(priv);
+
+ return ret;
+}
+
+static void fsl_ifc_nand_remove(struct platform_device *dev)
+{
+ struct fsl_ifc_mtd *priv = dev_get_drvdata(&dev->dev);
+ struct nand_chip *chip = &priv->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+
+ fsl_ifc_chip_remove(priv);
+
+ mutex_lock(&fsl_ifc_nand_mutex);
+ ifc_nand_ctrl->counter--;
+ if (!ifc_nand_ctrl->counter) {
+ fsl_ifc_ctrl_dev->nand = NULL;
+ kfree(ifc_nand_ctrl);
+ }
+ mutex_unlock(&fsl_ifc_nand_mutex);
+}
+
+static const struct of_device_id fsl_ifc_nand_match[] = {
+ {
+ .compatible = "fsl,ifc-nand",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, fsl_ifc_nand_match);
+
+static struct platform_driver fsl_ifc_nand_driver = {
+ .driver = {
+ .name = "fsl,ifc-nand",
+ .of_match_table = fsl_ifc_nand_match,
+ },
+ .probe = fsl_ifc_nand_probe,
+ .remove_new = fsl_ifc_nand_remove,
+};
+
+module_platform_driver(fsl_ifc_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Freescale");
+MODULE_DESCRIPTION("Freescale Integrated Flash Controller MTD NAND driver");
diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c
new file mode 100644
index 0000000000..315e9d2b57
--- /dev/null
+++ b/drivers/mtd/nand/raw/fsl_upm.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Freescale UPM NAND driver.
+ *
+ * Copyright © 2007-2008 MontaVista Software, Inc.
+ *
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/mtd.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <asm/fsl_lbc.h>
+
+struct fsl_upm_nand {
+ struct nand_controller base;
+ struct device *dev;
+ struct nand_chip chip;
+ struct fsl_upm upm;
+ uint8_t upm_addr_offset;
+ uint8_t upm_cmd_offset;
+ void __iomem *io_base;
+ struct gpio_desc *rnb_gpio[NAND_MAX_CHIPS];
+ uint32_t mchip_offsets[NAND_MAX_CHIPS];
+ uint32_t mchip_count;
+ uint32_t mchip_number;
+};
+
+static inline struct fsl_upm_nand *to_fsl_upm_nand(struct mtd_info *mtdinfo)
+{
+ return container_of(mtd_to_nand(mtdinfo), struct fsl_upm_nand,
+ chip);
+}
+
+static int fun_chip_init(struct fsl_upm_nand *fun,
+ const struct device_node *upm_np,
+ const struct resource *io_res)
+{
+ struct mtd_info *mtd = nand_to_mtd(&fun->chip);
+ int ret;
+ struct device_node *flash_np;
+
+ fun->chip.ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ fun->chip.ecc.algo = NAND_ECC_ALGO_HAMMING;
+ fun->chip.controller = &fun->base;
+ mtd->dev.parent = fun->dev;
+
+ flash_np = of_get_next_child(upm_np, NULL);
+ if (!flash_np)
+ return -ENODEV;
+
+ nand_set_flash_node(&fun->chip, flash_np);
+ mtd->name = devm_kasprintf(fun->dev, GFP_KERNEL, "0x%llx.%pOFn",
+ (u64)io_res->start,
+ flash_np);
+ if (!mtd->name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = nand_scan(&fun->chip, fun->mchip_count);
+ if (ret)
+ goto err;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+err:
+ of_node_put(flash_np);
+ return ret;
+}
+
+static int func_exec_instr(struct nand_chip *chip,
+ const struct nand_op_instr *instr)
+{
+ struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
+ u32 mar, reg_offs = fun->mchip_offsets[fun->mchip_number];
+ unsigned int i;
+ const u8 *out;
+ u8 *in;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ fsl_upm_start_pattern(&fun->upm, fun->upm_cmd_offset);
+ mar = (instr->ctx.cmd.opcode << (32 - fun->upm.width)) |
+ reg_offs;
+ fsl_upm_run_pattern(&fun->upm, fun->io_base + reg_offs, mar);
+ fsl_upm_end_pattern(&fun->upm);
+ return 0;
+
+ case NAND_OP_ADDR_INSTR:
+ fsl_upm_start_pattern(&fun->upm, fun->upm_addr_offset);
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ mar = (instr->ctx.addr.addrs[i] << (32 - fun->upm.width)) |
+ reg_offs;
+ fsl_upm_run_pattern(&fun->upm, fun->io_base + reg_offs, mar);
+ }
+ fsl_upm_end_pattern(&fun->upm);
+ return 0;
+
+ case NAND_OP_DATA_IN_INSTR:
+ in = instr->ctx.data.buf.in;
+ for (i = 0; i < instr->ctx.data.len; i++)
+ in[i] = in_8(fun->io_base + reg_offs);
+ return 0;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ out = instr->ctx.data.buf.out;
+ for (i = 0; i < instr->ctx.data.len; i++)
+ out_8(fun->io_base + reg_offs, out[i]);
+ return 0;
+
+ case NAND_OP_WAITRDY_INSTR:
+ if (!fun->rnb_gpio[fun->mchip_number])
+ return nand_soft_waitrdy(chip, instr->ctx.waitrdy.timeout_ms);
+
+ return nand_gpio_waitrdy(chip, fun->rnb_gpio[fun->mchip_number],
+ instr->ctx.waitrdy.timeout_ms);
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fun_exec_op(struct nand_chip *chip, const struct nand_operation *op,
+ bool check_only)
+{
+ struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
+ unsigned int i;
+ int ret;
+
+ if (op->cs >= NAND_MAX_CHIPS)
+ return -EINVAL;
+
+ if (check_only)
+ return 0;
+
+ fun->mchip_number = op->cs;
+
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = func_exec_instr(chip, &op->instrs[i]);
+ if (ret)
+ return ret;
+
+ if (op->instrs[i].delay_ns)
+ ndelay(op->instrs[i].delay_ns);
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops fun_ops = {
+ .exec_op = fun_exec_op,
+};
+
+static int fun_probe(struct platform_device *ofdev)
+{
+ struct fsl_upm_nand *fun;
+ struct resource *io_res;
+ const __be32 *prop;
+ int ret;
+ int size;
+ int i;
+
+ fun = devm_kzalloc(&ofdev->dev, sizeof(*fun), GFP_KERNEL);
+ if (!fun)
+ return -ENOMEM;
+
+ fun->io_base = devm_platform_get_and_ioremap_resource(ofdev, 0, &io_res);
+ if (IS_ERR(fun->io_base))
+ return PTR_ERR(fun->io_base);
+
+ ret = fsl_upm_find(io_res->start, &fun->upm);
+ if (ret) {
+ dev_err(&ofdev->dev, "can't find UPM\n");
+ return ret;
+ }
+
+ prop = of_get_property(ofdev->dev.of_node, "fsl,upm-addr-offset",
+ &size);
+ if (!prop || size != sizeof(uint32_t)) {
+ dev_err(&ofdev->dev, "can't get UPM address offset\n");
+ return -EINVAL;
+ }
+ fun->upm_addr_offset = *prop;
+
+ prop = of_get_property(ofdev->dev.of_node, "fsl,upm-cmd-offset", &size);
+ if (!prop || size != sizeof(uint32_t)) {
+ dev_err(&ofdev->dev, "can't get UPM command offset\n");
+ return -EINVAL;
+ }
+ fun->upm_cmd_offset = *prop;
+
+ prop = of_get_property(ofdev->dev.of_node,
+ "fsl,upm-addr-line-cs-offsets", &size);
+ if (prop && (size / sizeof(uint32_t)) > 0) {
+ fun->mchip_count = size / sizeof(uint32_t);
+ if (fun->mchip_count >= NAND_MAX_CHIPS) {
+ dev_err(&ofdev->dev, "too much multiple chips\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < fun->mchip_count; i++)
+ fun->mchip_offsets[i] = be32_to_cpu(prop[i]);
+ } else {
+ fun->mchip_count = 1;
+ }
+
+ for (i = 0; i < fun->mchip_count; i++) {
+ fun->rnb_gpio[i] = devm_gpiod_get_index_optional(&ofdev->dev,
+ NULL, i,
+ GPIOD_IN);
+ if (IS_ERR(fun->rnb_gpio[i])) {
+ dev_err(&ofdev->dev, "RNB gpio #%d is invalid\n", i);
+ return PTR_ERR(fun->rnb_gpio[i]);
+ }
+ }
+
+ nand_controller_init(&fun->base);
+ fun->base.ops = &fun_ops;
+ fun->dev = &ofdev->dev;
+
+ ret = fun_chip_init(fun, ofdev->dev.of_node, io_res);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(&ofdev->dev, fun);
+
+ return 0;
+}
+
+static void fun_remove(struct platform_device *ofdev)
+{
+ struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev);
+ struct nand_chip *chip = &fun->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = mtd_device_unregister(mtd);
+ WARN_ON(ret);
+ nand_cleanup(chip);
+}
+
+static const struct of_device_id of_fun_match[] = {
+ { .compatible = "fsl,upm-nand" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_fun_match);
+
+static struct platform_driver of_fun_driver = {
+ .driver = {
+ .name = "fsl,upm-nand",
+ .of_match_table = of_fun_match,
+ },
+ .probe = fun_probe,
+ .remove_new = fun_remove,
+};
+
+module_platform_driver(of_fun_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
+MODULE_DESCRIPTION("Driver for NAND chips working through Freescale "
+ "LocalBus User-Programmable Machine");
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
new file mode 100644
index 0000000000..811982da35
--- /dev/null
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -0,0 +1,1236 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ST Microelectronics
+ * Flexible Static Memory Controller (FSMC)
+ * Driver for NAND portions
+ *
+ * Copyright © 2010 ST Microelectronics
+ * Vipin Kumar <vipin.kumar@st.com>
+ * Ashish Priyadarshi
+ *
+ * Based on drivers/mtd/nand/nomadik_nand.c (removed in v3.8)
+ * Copyright © 2007 STMicroelectronics Pvt. Ltd.
+ * Copyright © 2009 Alessandro Rubini
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/resource.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/mtd/partitions.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/amba/bus.h>
+#include <mtd/mtd-abi.h>
+
+/* fsmc controller registers for NOR flash */
+#define CTRL 0x0
+ /* ctrl register definitions */
+ #define BANK_ENABLE BIT(0)
+ #define MUXED BIT(1)
+ #define NOR_DEV (2 << 2)
+ #define WIDTH_16 BIT(4)
+ #define RSTPWRDWN BIT(6)
+ #define WPROT BIT(7)
+ #define WRT_ENABLE BIT(12)
+ #define WAIT_ENB BIT(13)
+
+#define CTRL_TIM 0x4
+ /* ctrl_tim register definitions */
+
+#define FSMC_NOR_BANK_SZ 0x8
+#define FSMC_NOR_REG_SIZE 0x40
+
+#define FSMC_NOR_REG(base, bank, reg) ((base) + \
+ (FSMC_NOR_BANK_SZ * (bank)) + \
+ (reg))
+
+/* fsmc controller registers for NAND flash */
+#define FSMC_PC 0x00
+ /* pc register definitions */
+ #define FSMC_RESET BIT(0)
+ #define FSMC_WAITON BIT(1)
+ #define FSMC_ENABLE BIT(2)
+ #define FSMC_DEVTYPE_NAND BIT(3)
+ #define FSMC_DEVWID_16 BIT(4)
+ #define FSMC_ECCEN BIT(6)
+ #define FSMC_ECCPLEN_256 BIT(7)
+ #define FSMC_TCLR_SHIFT (9)
+ #define FSMC_TCLR_MASK (0xF)
+ #define FSMC_TAR_SHIFT (13)
+ #define FSMC_TAR_MASK (0xF)
+#define STS 0x04
+ /* sts register definitions */
+ #define FSMC_CODE_RDY BIT(15)
+#define COMM 0x08
+ /* comm register definitions */
+ #define FSMC_TSET_SHIFT 0
+ #define FSMC_TSET_MASK 0xFF
+ #define FSMC_TWAIT_SHIFT 8
+ #define FSMC_TWAIT_MASK 0xFF
+ #define FSMC_THOLD_SHIFT 16
+ #define FSMC_THOLD_MASK 0xFF
+ #define FSMC_THIZ_SHIFT 24
+ #define FSMC_THIZ_MASK 0xFF
+#define ATTRIB 0x0C
+#define IOATA 0x10
+#define ECC1 0x14
+#define ECC2 0x18
+#define ECC3 0x1C
+#define FSMC_NAND_BANK_SZ 0x20
+
+#define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
+
+/*
+ * According to SPEAr300 Reference Manual (RM0082)
+ * TOUDEL = 7ns (Output delay from the flip-flops to the board)
+ * TINDEL = 5ns (Input delay from the board to the flipflop)
+ */
+#define TOUTDEL 7000
+#define TINDEL 5000
+
+struct fsmc_nand_timings {
+ u8 tclr;
+ u8 tar;
+ u8 thiz;
+ u8 thold;
+ u8 twait;
+ u8 tset;
+};
+
+enum access_mode {
+ USE_DMA_ACCESS = 1,
+ USE_WORD_ACCESS,
+};
+
+/**
+ * struct fsmc_nand_data - structure for FSMC NAND device state
+ *
+ * @base: Inherit from the nand_controller struct
+ * @pid: Part ID on the AMBA PrimeCell format
+ * @nand: Chip related info for a NAND flash.
+ *
+ * @bank: Bank number for probed device.
+ * @dev: Parent device
+ * @mode: Access mode
+ * @clk: Clock structure for FSMC.
+ *
+ * @read_dma_chan: DMA channel for read access
+ * @write_dma_chan: DMA channel for write access to NAND
+ * @dma_access_complete: Completion structure
+ *
+ * @dev_timings: NAND timings
+ *
+ * @data_pa: NAND Physical port for Data.
+ * @data_va: NAND port for Data.
+ * @cmd_va: NAND port for Command.
+ * @addr_va: NAND port for Address.
+ * @regs_va: Registers base address for a given bank.
+ */
+struct fsmc_nand_data {
+ struct nand_controller base;
+ u32 pid;
+ struct nand_chip nand;
+
+ unsigned int bank;
+ struct device *dev;
+ enum access_mode mode;
+ struct clk *clk;
+
+ /* DMA related objects */
+ struct dma_chan *read_dma_chan;
+ struct dma_chan *write_dma_chan;
+ struct completion dma_access_complete;
+
+ struct fsmc_nand_timings *dev_timings;
+
+ dma_addr_t data_pa;
+ void __iomem *data_va;
+ void __iomem *cmd_va;
+ void __iomem *addr_va;
+ void __iomem *regs_va;
+};
+
+static int fsmc_ecc1_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 2;
+ oobregion->length = 3;
+
+ return 0;
+}
+
+static int fsmc_ecc1_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 8;
+
+ if (section < chip->ecc.steps - 1)
+ oobregion->length = 8;
+ else
+ oobregion->length = mtd->oobsize - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops fsmc_ecc1_ooblayout_ops = {
+ .ecc = fsmc_ecc1_ooblayout_ecc,
+ .free = fsmc_ecc1_ooblayout_free,
+};
+
+/*
+ * ECC placement definitions in oobfree type format.
+ * There are 13 bytes of ecc for every 512 byte block and it has to be read
+ * consecutively and immediately after the 512 byte data block for hardware to
+ * generate the error bit offsets in 512 byte data.
+ */
+static int fsmc_ecc4_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->length = chip->ecc.bytes;
+
+ if (!section && mtd->writesize <= 512)
+ oobregion->offset = 0;
+ else
+ oobregion->offset = (section * 16) + 2;
+
+ return 0;
+}
+
+static int fsmc_ecc4_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 15;
+
+ if (section < chip->ecc.steps - 1)
+ oobregion->length = 3;
+ else
+ oobregion->length = mtd->oobsize - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops fsmc_ecc4_ooblayout_ops = {
+ .ecc = fsmc_ecc4_ooblayout_ecc,
+ .free = fsmc_ecc4_ooblayout_free,
+};
+
+static inline struct fsmc_nand_data *nand_to_fsmc(struct nand_chip *chip)
+{
+ return container_of(chip, struct fsmc_nand_data, nand);
+}
+
+/*
+ * fsmc_nand_setup - FSMC (Flexible Static Memory Controller) init routine
+ *
+ * This routine initializes timing parameters related to NAND memory access in
+ * FSMC registers
+ */
+static void fsmc_nand_setup(struct fsmc_nand_data *host,
+ struct fsmc_nand_timings *tims)
+{
+ u32 value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON;
+ u32 tclr, tar, thiz, thold, twait, tset;
+
+ tclr = (tims->tclr & FSMC_TCLR_MASK) << FSMC_TCLR_SHIFT;
+ tar = (tims->tar & FSMC_TAR_MASK) << FSMC_TAR_SHIFT;
+ thiz = (tims->thiz & FSMC_THIZ_MASK) << FSMC_THIZ_SHIFT;
+ thold = (tims->thold & FSMC_THOLD_MASK) << FSMC_THOLD_SHIFT;
+ twait = (tims->twait & FSMC_TWAIT_MASK) << FSMC_TWAIT_SHIFT;
+ tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT;
+
+ if (host->nand.options & NAND_BUSWIDTH_16)
+ value |= FSMC_DEVWID_16;
+
+ writel_relaxed(value | tclr | tar, host->regs_va + FSMC_PC);
+ writel_relaxed(thiz | thold | twait | tset, host->regs_va + COMM);
+ writel_relaxed(thiz | thold | twait | tset, host->regs_va + ATTRIB);
+}
+
+static int fsmc_calc_timings(struct fsmc_nand_data *host,
+ const struct nand_sdr_timings *sdrt,
+ struct fsmc_nand_timings *tims)
+{
+ unsigned long hclk = clk_get_rate(host->clk);
+ unsigned long hclkn = NSEC_PER_SEC / hclk;
+ u32 thiz, thold, twait, tset, twait_min;
+
+ if (sdrt->tRC_min < 30000)
+ return -EOPNOTSUPP;
+
+ tims->tar = DIV_ROUND_UP(sdrt->tAR_min / 1000, hclkn) - 1;
+ if (tims->tar > FSMC_TAR_MASK)
+ tims->tar = FSMC_TAR_MASK;
+ tims->tclr = DIV_ROUND_UP(sdrt->tCLR_min / 1000, hclkn) - 1;
+ if (tims->tclr > FSMC_TCLR_MASK)
+ tims->tclr = FSMC_TCLR_MASK;
+
+ thiz = sdrt->tCS_min - sdrt->tWP_min;
+ tims->thiz = DIV_ROUND_UP(thiz / 1000, hclkn);
+
+ thold = sdrt->tDH_min;
+ if (thold < sdrt->tCH_min)
+ thold = sdrt->tCH_min;
+ if (thold < sdrt->tCLH_min)
+ thold = sdrt->tCLH_min;
+ if (thold < sdrt->tWH_min)
+ thold = sdrt->tWH_min;
+ if (thold < sdrt->tALH_min)
+ thold = sdrt->tALH_min;
+ if (thold < sdrt->tREH_min)
+ thold = sdrt->tREH_min;
+ tims->thold = DIV_ROUND_UP(thold / 1000, hclkn);
+ if (tims->thold == 0)
+ tims->thold = 1;
+ else if (tims->thold > FSMC_THOLD_MASK)
+ tims->thold = FSMC_THOLD_MASK;
+
+ tset = max(sdrt->tCS_min - sdrt->tWP_min,
+ sdrt->tCEA_max - sdrt->tREA_max);
+ tims->tset = DIV_ROUND_UP(tset / 1000, hclkn) - 1;
+ if (tims->tset == 0)
+ tims->tset = 1;
+ else if (tims->tset > FSMC_TSET_MASK)
+ tims->tset = FSMC_TSET_MASK;
+
+ /*
+ * According to SPEAr300 Reference Manual (RM0082) which gives more
+ * information related to FSMSC timings than the SPEAr600 one (RM0305),
+ * twait >= tCEA - (tset * TCLK) + TOUTDEL + TINDEL
+ */
+ twait_min = sdrt->tCEA_max - ((tims->tset + 1) * hclkn * 1000)
+ + TOUTDEL + TINDEL;
+ twait = max3(sdrt->tRP_min, sdrt->tWP_min, twait_min);
+
+ tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1;
+ if (tims->twait == 0)
+ tims->twait = 1;
+ else if (tims->twait > FSMC_TWAIT_MASK)
+ tims->twait = FSMC_TWAIT_MASK;
+
+ return 0;
+}
+
+static int fsmc_setup_interface(struct nand_chip *nand, int csline,
+ const struct nand_interface_config *conf)
+{
+ struct fsmc_nand_data *host = nand_to_fsmc(nand);
+ struct fsmc_nand_timings tims;
+ const struct nand_sdr_timings *sdrt;
+ int ret;
+
+ sdrt = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdrt))
+ return PTR_ERR(sdrt);
+
+ ret = fsmc_calc_timings(host, sdrt, &tims);
+ if (ret)
+ return ret;
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ fsmc_nand_setup(host, &tims);
+
+ return 0;
+}
+
+/*
+ * fsmc_enable_hwecc - Enables Hardware ECC through FSMC registers
+ */
+static void fsmc_enable_hwecc(struct nand_chip *chip, int mode)
+{
+ struct fsmc_nand_data *host = nand_to_fsmc(chip);
+
+ writel_relaxed(readl(host->regs_va + FSMC_PC) & ~FSMC_ECCPLEN_256,
+ host->regs_va + FSMC_PC);
+ writel_relaxed(readl(host->regs_va + FSMC_PC) & ~FSMC_ECCEN,
+ host->regs_va + FSMC_PC);
+ writel_relaxed(readl(host->regs_va + FSMC_PC) | FSMC_ECCEN,
+ host->regs_va + FSMC_PC);
+}
+
+/*
+ * fsmc_read_hwecc_ecc4 - Hardware ECC calculator for ecc4 option supported by
+ * FSMC. ECC is 13 bytes for 512 bytes of data (supports error correction up to
+ * max of 8-bits)
+ */
+static int fsmc_read_hwecc_ecc4(struct nand_chip *chip, const u8 *data,
+ u8 *ecc)
+{
+ struct fsmc_nand_data *host = nand_to_fsmc(chip);
+ u32 ecc_tmp;
+ unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT;
+
+ do {
+ if (readl_relaxed(host->regs_va + STS) & FSMC_CODE_RDY)
+ break;
+
+ cond_resched();
+ } while (!time_after_eq(jiffies, deadline));
+
+ if (time_after_eq(jiffies, deadline)) {
+ dev_err(host->dev, "calculate ecc timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ ecc_tmp = readl_relaxed(host->regs_va + ECC1);
+ ecc[0] = ecc_tmp;
+ ecc[1] = ecc_tmp >> 8;
+ ecc[2] = ecc_tmp >> 16;
+ ecc[3] = ecc_tmp >> 24;
+
+ ecc_tmp = readl_relaxed(host->regs_va + ECC2);
+ ecc[4] = ecc_tmp;
+ ecc[5] = ecc_tmp >> 8;
+ ecc[6] = ecc_tmp >> 16;
+ ecc[7] = ecc_tmp >> 24;
+
+ ecc_tmp = readl_relaxed(host->regs_va + ECC3);
+ ecc[8] = ecc_tmp;
+ ecc[9] = ecc_tmp >> 8;
+ ecc[10] = ecc_tmp >> 16;
+ ecc[11] = ecc_tmp >> 24;
+
+ ecc_tmp = readl_relaxed(host->regs_va + STS);
+ ecc[12] = ecc_tmp >> 16;
+
+ return 0;
+}
+
+/*
+ * fsmc_read_hwecc_ecc1 - Hardware ECC calculator for ecc1 option supported by
+ * FSMC. ECC is 3 bytes for 512 bytes of data (supports error correction up to
+ * max of 1-bit)
+ */
+static int fsmc_read_hwecc_ecc1(struct nand_chip *chip, const u8 *data,
+ u8 *ecc)
+{
+ struct fsmc_nand_data *host = nand_to_fsmc(chip);
+ u32 ecc_tmp;
+
+ ecc_tmp = readl_relaxed(host->regs_va + ECC1);
+ ecc[0] = ecc_tmp;
+ ecc[1] = ecc_tmp >> 8;
+ ecc[2] = ecc_tmp >> 16;
+
+ return 0;
+}
+
+static int fsmc_correct_ecc1(struct nand_chip *chip,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ bool sm_order = chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER;
+
+ return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+ chip->ecc.size, sm_order);
+}
+
+/* Count the number of 0's in buff upto a max of max_bits */
+static int count_written_bits(u8 *buff, int size, int max_bits)
+{
+ int k, written_bits = 0;
+
+ for (k = 0; k < size; k++) {
+ written_bits += hweight8(~buff[k]);
+ if (written_bits > max_bits)
+ break;
+ }
+
+ return written_bits;
+}
+
+static void dma_complete(void *param)
+{
+ struct fsmc_nand_data *host = param;
+
+ complete(&host->dma_access_complete);
+}
+
+static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
+ enum dma_data_direction direction)
+{
+ struct dma_chan *chan;
+ struct dma_device *dma_dev;
+ struct dma_async_tx_descriptor *tx;
+ dma_addr_t dma_dst, dma_src, dma_addr;
+ dma_cookie_t cookie;
+ unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ int ret;
+ unsigned long time_left;
+
+ if (direction == DMA_TO_DEVICE)
+ chan = host->write_dma_chan;
+ else if (direction == DMA_FROM_DEVICE)
+ chan = host->read_dma_chan;
+ else
+ return -EINVAL;
+
+ dma_dev = chan->device;
+ dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
+
+ if (direction == DMA_TO_DEVICE) {
+ dma_src = dma_addr;
+ dma_dst = host->data_pa;
+ } else {
+ dma_src = host->data_pa;
+ dma_dst = dma_addr;
+ }
+
+ tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src,
+ len, flags);
+ if (!tx) {
+ dev_err(host->dev, "device_prep_dma_memcpy error\n");
+ ret = -EIO;
+ goto unmap_dma;
+ }
+
+ tx->callback = dma_complete;
+ tx->callback_param = host;
+ cookie = tx->tx_submit(tx);
+
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(host->dev, "dma_submit_error %d\n", cookie);
+ goto unmap_dma;
+ }
+
+ dma_async_issue_pending(chan);
+
+ time_left =
+ wait_for_completion_timeout(&host->dma_access_complete,
+ msecs_to_jiffies(3000));
+ if (time_left == 0) {
+ dmaengine_terminate_all(chan);
+ dev_err(host->dev, "wait_for_completion_timeout\n");
+ ret = -ETIMEDOUT;
+ goto unmap_dma;
+ }
+
+ ret = 0;
+
+unmap_dma:
+ dma_unmap_single(dma_dev->dev, dma_addr, len, direction);
+
+ return ret;
+}
+
+/*
+ * fsmc_write_buf - write buffer to chip
+ * @host: FSMC NAND controller
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void fsmc_write_buf(struct fsmc_nand_data *host, const u8 *buf,
+ int len)
+{
+ int i;
+
+ if (IS_ALIGNED((uintptr_t)buf, sizeof(u32)) &&
+ IS_ALIGNED(len, sizeof(u32))) {
+ u32 *p = (u32 *)buf;
+
+ len = len >> 2;
+ for (i = 0; i < len; i++)
+ writel_relaxed(p[i], host->data_va);
+ } else {
+ for (i = 0; i < len; i++)
+ writeb_relaxed(buf[i], host->data_va);
+ }
+}
+
+/*
+ * fsmc_read_buf - read chip data into buffer
+ * @host: FSMC NAND controller
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void fsmc_read_buf(struct fsmc_nand_data *host, u8 *buf, int len)
+{
+ int i;
+
+ if (IS_ALIGNED((uintptr_t)buf, sizeof(u32)) &&
+ IS_ALIGNED(len, sizeof(u32))) {
+ u32 *p = (u32 *)buf;
+
+ len = len >> 2;
+ for (i = 0; i < len; i++)
+ p[i] = readl_relaxed(host->data_va);
+ } else {
+ for (i = 0; i < len; i++)
+ buf[i] = readb_relaxed(host->data_va);
+ }
+}
+
+/*
+ * fsmc_read_buf_dma - read chip data into buffer
+ * @host: FSMC NAND controller
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void fsmc_read_buf_dma(struct fsmc_nand_data *host, u8 *buf,
+ int len)
+{
+ dma_xfer(host, buf, len, DMA_FROM_DEVICE);
+}
+
+/*
+ * fsmc_write_buf_dma - write buffer to chip
+ * @host: FSMC NAND controller
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void fsmc_write_buf_dma(struct fsmc_nand_data *host, const u8 *buf,
+ int len)
+{
+ dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
+}
+
+/*
+ * fsmc_exec_op - hook called by the core to execute NAND operations
+ *
+ * This controller is simple enough and thus does not need to use the parser
+ * provided by the core, instead, handle every situation here.
+ */
+static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
+ bool check_only)
+{
+ struct fsmc_nand_data *host = nand_to_fsmc(chip);
+ const struct nand_op_instr *instr = NULL;
+ int ret = 0;
+ unsigned int op_id;
+ int i;
+
+ if (check_only)
+ return 0;
+
+ pr_debug("Executing operation [%d instructions]:\n", op->ninstrs);
+
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+
+ nand_op_trace(" ", instr);
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ writeb_relaxed(instr->ctx.cmd.opcode, host->cmd_va);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++)
+ writeb_relaxed(instr->ctx.addr.addrs[i],
+ host->addr_va);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ if (host->mode == USE_DMA_ACCESS)
+ fsmc_read_buf_dma(host, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ else
+ fsmc_read_buf(host, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ if (host->mode == USE_DMA_ACCESS)
+ fsmc_write_buf_dma(host,
+ instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ else
+ fsmc_write_buf(host, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ ret = nand_soft_waitrdy(chip,
+ instr->ctx.waitrdy.timeout_ms);
+ break;
+ }
+
+ if (instr->delay_ns)
+ ndelay(instr->delay_ns);
+ }
+
+ return ret;
+}
+
+/*
+ * fsmc_read_page_hwecc
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller expects OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * This routine is needed for fsmc version 8 as reading from NAND chip has to be
+ * performed in a strict sequence as follows:
+ * data(512 byte) -> ecc(13 byte)
+ * After this read, fsmc hardware generates and reports error data bits(up to a
+ * max of 8 bits)
+ */
+static int fsmc_read_page_hwecc(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int i, j, s, stat, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ u8 *p = buf;
+ u8 *ecc_calc = chip->ecc.calc_buf;
+ u8 *ecc_code = chip->ecc.code_buf;
+ int off, len, ret, group = 0;
+ /*
+ * ecc_oob is intentionally taken as u16. In 16bit devices, we
+ * end up reading 14 bytes (7 words) from oob. The local array is
+ * to maintain word alignment
+ */
+ u16 ecc_oob[7];
+ u8 *oob = (u8 *)&ecc_oob[0];
+ unsigned int max_bitflips = 0;
+
+ for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
+ nand_read_page_op(chip, page, s * eccsize, NULL, 0);
+ chip->ecc.hwctl(chip, NAND_ECC_READ);
+ ret = nand_read_data_op(chip, p, eccsize, false, false);
+ if (ret)
+ return ret;
+
+ for (j = 0; j < eccbytes;) {
+ struct mtd_oob_region oobregion;
+
+ ret = mtd_ooblayout_ecc(mtd, group++, &oobregion);
+ if (ret)
+ return ret;
+
+ off = oobregion.offset;
+ len = oobregion.length;
+
+ /*
+ * length is intentionally kept a higher multiple of 2
+ * to read at least 13 bytes even in case of 16 bit NAND
+ * devices
+ */
+ if (chip->options & NAND_BUSWIDTH_16)
+ len = roundup(len, 2);
+
+ nand_read_oob_op(chip, page, off, oob + j, len);
+ j += len;
+ }
+
+ memcpy(&ecc_code[i], oob, chip->ecc.bytes);
+ chip->ecc.calculate(chip, p, &ecc_calc[i]);
+
+ stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+
+ return max_bitflips;
+}
+
+/*
+ * fsmc_bch8_correct_data
+ * @mtd: mtd info structure
+ * @dat: buffer of read data
+ * @read_ecc: ecc read from device spare area
+ * @calc_ecc: ecc calculated from read data
+ *
+ * calc_ecc is a 104 bit information containing maximum of 8 error
+ * offset information of 13 bits each in 512 bytes of read data.
+ */
+static int fsmc_bch8_correct_data(struct nand_chip *chip, u8 *dat,
+ u8 *read_ecc, u8 *calc_ecc)
+{
+ struct fsmc_nand_data *host = nand_to_fsmc(chip);
+ u32 err_idx[8];
+ u32 num_err, i;
+ u32 ecc1, ecc2, ecc3, ecc4;
+
+ num_err = (readl_relaxed(host->regs_va + STS) >> 10) & 0xF;
+
+ /* no bit flipping */
+ if (likely(num_err == 0))
+ return 0;
+
+ /* too many errors */
+ if (unlikely(num_err > 8)) {
+ /*
+ * This is a temporary erase check. A newly erased page read
+ * would result in an ecc error because the oob data is also
+ * erased to FF and the calculated ecc for an FF data is not
+ * FF..FF.
+ * This is a workaround to skip performing correction in case
+ * data is FF..FF
+ *
+ * Logic:
+ * For every page, each bit written as 0 is counted until these
+ * number of bits are greater than 8 (the maximum correction
+ * capability of FSMC for each 512 + 13 bytes)
+ */
+
+ int bits_ecc = count_written_bits(read_ecc, chip->ecc.bytes, 8);
+ int bits_data = count_written_bits(dat, chip->ecc.size, 8);
+
+ if ((bits_ecc + bits_data) <= 8) {
+ if (bits_data)
+ memset(dat, 0xff, chip->ecc.size);
+ return bits_data;
+ }
+
+ return -EBADMSG;
+ }
+
+ /*
+ * ------------------- calc_ecc[] bit wise -----------|--13 bits--|
+ * |---idx[7]--|--.....-----|---idx[2]--||---idx[1]--||---idx[0]--|
+ *
+ * calc_ecc is a 104 bit information containing maximum of 8 error
+ * offset information of 13 bits each. calc_ecc is copied into a
+ * u64 array and error offset indexes are populated in err_idx
+ * array
+ */
+ ecc1 = readl_relaxed(host->regs_va + ECC1);
+ ecc2 = readl_relaxed(host->regs_va + ECC2);
+ ecc3 = readl_relaxed(host->regs_va + ECC3);
+ ecc4 = readl_relaxed(host->regs_va + STS);
+
+ err_idx[0] = (ecc1 >> 0) & 0x1FFF;
+ err_idx[1] = (ecc1 >> 13) & 0x1FFF;
+ err_idx[2] = (((ecc2 >> 0) & 0x7F) << 6) | ((ecc1 >> 26) & 0x3F);
+ err_idx[3] = (ecc2 >> 7) & 0x1FFF;
+ err_idx[4] = (((ecc3 >> 0) & 0x1) << 12) | ((ecc2 >> 20) & 0xFFF);
+ err_idx[5] = (ecc3 >> 1) & 0x1FFF;
+ err_idx[6] = (ecc3 >> 14) & 0x1FFF;
+ err_idx[7] = (((ecc4 >> 16) & 0xFF) << 5) | ((ecc3 >> 27) & 0x1F);
+
+ i = 0;
+ while (num_err--) {
+ err_idx[i] ^= 3;
+
+ if (err_idx[i] < chip->ecc.size * 8) {
+ int err = err_idx[i];
+
+ dat[err >> 3] ^= BIT(err & 7);
+ i++;
+ }
+ }
+ return i;
+}
+
+static bool filter(struct dma_chan *chan, void *slave)
+{
+ chan->private = slave;
+ return true;
+}
+
+static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
+ struct fsmc_nand_data *host,
+ struct nand_chip *nand)
+{
+ struct device_node *np = pdev->dev.of_node;
+ u32 val;
+ int ret;
+
+ nand->options = 0;
+
+ if (!of_property_read_u32(np, "bank-width", &val)) {
+ if (val == 2) {
+ nand->options |= NAND_BUSWIDTH_16;
+ } else if (val != 1) {
+ dev_err(&pdev->dev, "invalid bank-width %u\n", val);
+ return -EINVAL;
+ }
+ }
+
+ if (of_property_read_bool(np, "nand-skip-bbtscan"))
+ nand->options |= NAND_SKIP_BBTSCAN;
+
+ host->dev_timings = devm_kzalloc(&pdev->dev,
+ sizeof(*host->dev_timings),
+ GFP_KERNEL);
+ if (!host->dev_timings)
+ return -ENOMEM;
+
+ ret = of_property_read_u8_array(np, "timings", (u8 *)host->dev_timings,
+ sizeof(*host->dev_timings));
+ if (ret)
+ host->dev_timings = NULL;
+
+ /* Set default NAND bank to 0 */
+ host->bank = 0;
+ if (!of_property_read_u32(np, "bank", &val)) {
+ if (val > 3) {
+ dev_err(&pdev->dev, "invalid bank %u\n", val);
+ return -EINVAL;
+ }
+ host->bank = val;
+ }
+ return 0;
+}
+
+static int fsmc_nand_attach_chip(struct nand_chip *nand)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct fsmc_nand_data *host = nand_to_fsmc(nand);
+
+ if (nand->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
+ nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+
+ if (!nand->ecc.size)
+ nand->ecc.size = 512;
+
+ if (AMBA_REV_BITS(host->pid) >= 8) {
+ nand->ecc.read_page = fsmc_read_page_hwecc;
+ nand->ecc.calculate = fsmc_read_hwecc_ecc4;
+ nand->ecc.correct = fsmc_bch8_correct_data;
+ nand->ecc.bytes = 13;
+ nand->ecc.strength = 8;
+ }
+
+ if (AMBA_REV_BITS(host->pid) >= 8) {
+ switch (mtd->oobsize) {
+ case 16:
+ case 64:
+ case 128:
+ case 224:
+ case 256:
+ break;
+ default:
+ dev_warn(host->dev,
+ "No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
+ return -EINVAL;
+ }
+
+ mtd_set_ooblayout(mtd, &fsmc_ecc4_ooblayout_ops);
+
+ return 0;
+ }
+
+ switch (nand->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ dev_info(host->dev, "Using 1-bit HW ECC scheme\n");
+ nand->ecc.calculate = fsmc_read_hwecc_ecc1;
+ nand->ecc.correct = fsmc_correct_ecc1;
+ nand->ecc.hwctl = fsmc_enable_hwecc;
+ nand->ecc.bytes = 3;
+ nand->ecc.strength = 1;
+ nand->ecc.options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
+ break;
+
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ if (nand->ecc.algo == NAND_ECC_ALGO_BCH) {
+ dev_info(host->dev,
+ "Using 4-bit SW BCH ECC scheme\n");
+ break;
+ }
+ break;
+
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
+ break;
+
+ default:
+ dev_err(host->dev, "Unsupported ECC mode!\n");
+ return -ENOTSUPP;
+ }
+
+ /*
+ * Don't set layout for BCH4 SW ECC. This will be
+ * generated later during BCH initialization.
+ */
+ if (nand->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
+ switch (mtd->oobsize) {
+ case 16:
+ case 64:
+ case 128:
+ mtd_set_ooblayout(mtd,
+ &fsmc_ecc1_ooblayout_ops);
+ break;
+ default:
+ dev_warn(host->dev,
+ "No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops fsmc_nand_controller_ops = {
+ .attach_chip = fsmc_nand_attach_chip,
+ .exec_op = fsmc_exec_op,
+ .setup_interface = fsmc_setup_interface,
+};
+
+/**
+ * fsmc_nand_disable() - Disables the NAND bank
+ * @host: The instance to disable
+ */
+static void fsmc_nand_disable(struct fsmc_nand_data *host)
+{
+ u32 val;
+
+ val = readl(host->regs_va + FSMC_PC);
+ val &= ~FSMC_ENABLE;
+ writel(val, host->regs_va + FSMC_PC);
+}
+
+/*
+ * fsmc_nand_probe - Probe function
+ * @pdev: platform device structure
+ */
+static int __init fsmc_nand_probe(struct platform_device *pdev)
+{
+ struct fsmc_nand_data *host;
+ struct mtd_info *mtd;
+ struct nand_chip *nand;
+ struct resource *res;
+ void __iomem *base;
+ dma_cap_mask_t mask;
+ int ret = 0;
+ u32 pid;
+ int i;
+
+ /* Allocate memory for the device structure (and zero it) */
+ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ nand = &host->nand;
+
+ ret = fsmc_nand_probe_config_dt(pdev, host, nand);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
+ host->data_va = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->data_va))
+ return PTR_ERR(host->data_va);
+
+ host->data_pa = (dma_addr_t)res->start;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr");
+ host->addr_va = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->addr_va))
+ return PTR_ERR(host->addr_va);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
+ host->cmd_va = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->cmd_va))
+ return PTR_ERR(host->cmd_va);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs");
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ host->regs_va = base + FSMC_NOR_REG_SIZE +
+ (host->bank * FSMC_NAND_BANK_SZ);
+
+ host->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(host->clk)) {
+ dev_err(&pdev->dev, "failed to fetch block clock\n");
+ return PTR_ERR(host->clk);
+ }
+
+ /*
+ * This device ID is actually a common AMBA ID as used on the
+ * AMBA PrimeCell bus. However it is not a PrimeCell.
+ */
+ for (pid = 0, i = 0; i < 4; i++)
+ pid |= (readl(base + resource_size(res) - 0x20 + 4 * i) &
+ 255) << (i * 8);
+
+ host->pid = pid;
+
+ dev_info(&pdev->dev,
+ "FSMC device partno %03x, manufacturer %02x, revision %02x, config %02x\n",
+ AMBA_PART_BITS(pid), AMBA_MANF_BITS(pid),
+ AMBA_REV_BITS(pid), AMBA_CONFIG_BITS(pid));
+
+ host->dev = &pdev->dev;
+
+ if (host->mode == USE_DMA_ACCESS)
+ init_completion(&host->dma_access_complete);
+
+ /* Link all private pointers */
+ mtd = nand_to_mtd(&host->nand);
+ nand_set_flash_node(nand, pdev->dev.of_node);
+
+ mtd->dev.parent = &pdev->dev;
+
+ nand->badblockbits = 7;
+
+ if (host->mode == USE_DMA_ACCESS) {
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+ host->read_dma_chan = dma_request_channel(mask, filter, NULL);
+ if (!host->read_dma_chan) {
+ dev_err(&pdev->dev, "Unable to get read dma channel\n");
+ ret = -ENODEV;
+ goto disable_fsmc;
+ }
+ host->write_dma_chan = dma_request_channel(mask, filter, NULL);
+ if (!host->write_dma_chan) {
+ dev_err(&pdev->dev, "Unable to get write dma channel\n");
+ ret = -ENODEV;
+ goto release_dma_read_chan;
+ }
+ }
+
+ if (host->dev_timings) {
+ fsmc_nand_setup(host, host->dev_timings);
+ nand->options |= NAND_KEEP_TIMINGS;
+ }
+
+ nand_controller_init(&host->base);
+ host->base.ops = &fsmc_nand_controller_ops;
+ nand->controller = &host->base;
+
+ /*
+ * Scan to find existence of the device
+ */
+ ret = nand_scan(nand, 1);
+ if (ret)
+ goto release_dma_write_chan;
+
+ mtd->name = "nand";
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ goto cleanup_nand;
+
+ platform_set_drvdata(pdev, host);
+ dev_info(&pdev->dev, "FSMC NAND driver registration successful\n");
+
+ return 0;
+
+cleanup_nand:
+ nand_cleanup(nand);
+release_dma_write_chan:
+ if (host->mode == USE_DMA_ACCESS)
+ dma_release_channel(host->write_dma_chan);
+release_dma_read_chan:
+ if (host->mode == USE_DMA_ACCESS)
+ dma_release_channel(host->read_dma_chan);
+disable_fsmc:
+ fsmc_nand_disable(host);
+
+ return ret;
+}
+
+/*
+ * Clean up routine
+ */
+static void fsmc_nand_remove(struct platform_device *pdev)
+{
+ struct fsmc_nand_data *host = platform_get_drvdata(pdev);
+
+ if (host) {
+ struct nand_chip *chip = &host->nand;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ fsmc_nand_disable(host);
+
+ if (host->mode == USE_DMA_ACCESS) {
+ dma_release_channel(host->write_dma_chan);
+ dma_release_channel(host->read_dma_chan);
+ }
+ }
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int fsmc_nand_suspend(struct device *dev)
+{
+ struct fsmc_nand_data *host = dev_get_drvdata(dev);
+
+ if (host)
+ clk_disable_unprepare(host->clk);
+
+ return 0;
+}
+
+static int fsmc_nand_resume(struct device *dev)
+{
+ struct fsmc_nand_data *host = dev_get_drvdata(dev);
+ int ret;
+
+ if (host) {
+ ret = clk_prepare_enable(host->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clk\n");
+ return ret;
+ }
+ if (host->dev_timings)
+ fsmc_nand_setup(host, host->dev_timings);
+ nand_reset(&host->nand, 0);
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops, fsmc_nand_suspend, fsmc_nand_resume);
+
+static const struct of_device_id fsmc_nand_id_table[] = {
+ { .compatible = "st,spear600-fsmc-nand" },
+ { .compatible = "stericsson,fsmc-nand" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, fsmc_nand_id_table);
+
+static struct platform_driver fsmc_nand_driver = {
+ .remove_new = fsmc_nand_remove,
+ .driver = {
+ .name = "fsmc-nand",
+ .of_match_table = fsmc_nand_id_table,
+ .pm = &fsmc_nand_pm_ops,
+ },
+};
+
+module_platform_driver_probe(fsmc_nand_driver, fsmc_nand_probe);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Vipin Kumar <vipin.kumar@st.com>, Ashish Priyadarshi");
+MODULE_DESCRIPTION("NAND driver for SPEAr Platforms");
diff --git a/drivers/mtd/nand/raw/gpio.c b/drivers/mtd/nand/raw/gpio.c
new file mode 100644
index 0000000000..d6cc2cb652
--- /dev/null
+++ b/drivers/mtd/nand/raw/gpio.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Updated, and converted to generic GPIO based driver by Russell King.
+ *
+ * Written by Ben Dooks <ben@simtec.co.uk>
+ * Based on 2.4 version by Mark Whittaker
+ *
+ * © 2004 Simtec Electronics
+ *
+ * Device driver for NAND flash that uses a memory mapped interface to
+ * read/write the NAND commands and data, and GPIO pins for control signals
+ * (the DT binding refers to this as "GPIO assisted NAND flash")
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/nand-gpio.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+
+struct gpiomtd {
+ struct nand_controller base;
+ void __iomem *io;
+ void __iomem *io_sync;
+ struct nand_chip nand_chip;
+ struct gpio_nand_platdata plat;
+ struct gpio_desc *nce; /* Optional chip enable */
+ struct gpio_desc *cle;
+ struct gpio_desc *ale;
+ struct gpio_desc *rdy;
+ struct gpio_desc *nwp; /* Optional write protection */
+};
+
+static inline struct gpiomtd *gpio_nand_getpriv(struct mtd_info *mtd)
+{
+ return container_of(mtd_to_nand(mtd), struct gpiomtd, nand_chip);
+}
+
+
+#ifdef CONFIG_ARM
+/* gpio_nand_dosync()
+ *
+ * Make sure the GPIO state changes occur in-order with writes to NAND
+ * memory region.
+ * Needed on PXA due to bus-reordering within the SoC itself (see section on
+ * I/O ordering in PXA manual (section 2.3, p35)
+ */
+static void gpio_nand_dosync(struct gpiomtd *gpiomtd)
+{
+ unsigned long tmp;
+
+ if (gpiomtd->io_sync) {
+ /*
+ * Linux memory barriers don't cater for what's required here.
+ * What's required is what's here - a read from a separate
+ * region with a dependency on that read.
+ */
+ tmp = readl(gpiomtd->io_sync);
+ asm volatile("mov %1, %0\n" : "=r" (tmp) : "r" (tmp));
+ }
+}
+#else
+static inline void gpio_nand_dosync(struct gpiomtd *gpiomtd) {}
+#endif
+
+static int gpio_nand_exec_instr(struct nand_chip *chip,
+ const struct nand_op_instr *instr)
+{
+ struct gpiomtd *gpiomtd = gpio_nand_getpriv(nand_to_mtd(chip));
+ unsigned int i;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ gpio_nand_dosync(gpiomtd);
+ gpiod_set_value(gpiomtd->cle, 1);
+ gpio_nand_dosync(gpiomtd);
+ writeb(instr->ctx.cmd.opcode, gpiomtd->io);
+ gpio_nand_dosync(gpiomtd);
+ gpiod_set_value(gpiomtd->cle, 0);
+ return 0;
+
+ case NAND_OP_ADDR_INSTR:
+ gpio_nand_dosync(gpiomtd);
+ gpiod_set_value(gpiomtd->ale, 1);
+ gpio_nand_dosync(gpiomtd);
+ for (i = 0; i < instr->ctx.addr.naddrs; i++)
+ writeb(instr->ctx.addr.addrs[i], gpiomtd->io);
+ gpio_nand_dosync(gpiomtd);
+ gpiod_set_value(gpiomtd->ale, 0);
+ return 0;
+
+ case NAND_OP_DATA_IN_INSTR:
+ gpio_nand_dosync(gpiomtd);
+ if ((chip->options & NAND_BUSWIDTH_16) &&
+ !instr->ctx.data.force_8bit)
+ ioread16_rep(gpiomtd->io, instr->ctx.data.buf.in,
+ instr->ctx.data.len / 2);
+ else
+ ioread8_rep(gpiomtd->io, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ return 0;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ gpio_nand_dosync(gpiomtd);
+ if ((chip->options & NAND_BUSWIDTH_16) &&
+ !instr->ctx.data.force_8bit)
+ iowrite16_rep(gpiomtd->io, instr->ctx.data.buf.out,
+ instr->ctx.data.len / 2);
+ else
+ iowrite8_rep(gpiomtd->io, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ return 0;
+
+ case NAND_OP_WAITRDY_INSTR:
+ if (!gpiomtd->rdy)
+ return nand_soft_waitrdy(chip, instr->ctx.waitrdy.timeout_ms);
+
+ return nand_gpio_waitrdy(chip, gpiomtd->rdy,
+ instr->ctx.waitrdy.timeout_ms);
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int gpio_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct gpiomtd *gpiomtd = gpio_nand_getpriv(nand_to_mtd(chip));
+ unsigned int i;
+ int ret = 0;
+
+ if (check_only)
+ return 0;
+
+ gpio_nand_dosync(gpiomtd);
+ gpiod_set_value(gpiomtd->nce, 0);
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = gpio_nand_exec_instr(chip, &op->instrs[i]);
+ if (ret)
+ break;
+
+ if (op->instrs[i].delay_ns)
+ ndelay(op->instrs[i].delay_ns);
+ }
+ gpio_nand_dosync(gpiomtd);
+ gpiod_set_value(gpiomtd->nce, 1);
+
+ return ret;
+}
+
+static int gpio_nand_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+ chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ return 0;
+}
+
+static const struct nand_controller_ops gpio_nand_ops = {
+ .exec_op = gpio_nand_exec_op,
+ .attach_chip = gpio_nand_attach_chip,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id gpio_nand_id_table[] = {
+ { .compatible = "gpio-control-nand" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, gpio_nand_id_table);
+
+static int gpio_nand_get_config_of(const struct device *dev,
+ struct gpio_nand_platdata *plat)
+{
+ u32 val;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ if (!of_property_read_u32(dev->of_node, "bank-width", &val)) {
+ if (val == 2) {
+ plat->options |= NAND_BUSWIDTH_16;
+ } else if (val != 1) {
+ dev_err(dev, "invalid bank-width %u\n", val);
+ return -EINVAL;
+ }
+ }
+
+ if (!of_property_read_u32(dev->of_node, "chip-delay", &val))
+ plat->chip_delay = val;
+
+ return 0;
+}
+
+static struct resource *gpio_nand_get_io_sync_of(struct platform_device *pdev)
+{
+ struct resource *r;
+ u64 addr;
+
+ if (of_property_read_u64(pdev->dev.of_node,
+ "gpio-control-nand,io-sync-reg", &addr))
+ return NULL;
+
+ r = devm_kzalloc(&pdev->dev, sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return NULL;
+
+ r->start = addr;
+ r->end = r->start + 0x3;
+ r->flags = IORESOURCE_MEM;
+
+ return r;
+}
+#else /* CONFIG_OF */
+static inline int gpio_nand_get_config_of(const struct device *dev,
+ struct gpio_nand_platdata *plat)
+{
+ return -ENOSYS;
+}
+
+static inline struct resource *
+gpio_nand_get_io_sync_of(struct platform_device *pdev)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
+static inline int gpio_nand_get_config(const struct device *dev,
+ struct gpio_nand_platdata *plat)
+{
+ int ret = gpio_nand_get_config_of(dev, plat);
+
+ if (!ret)
+ return ret;
+
+ if (dev_get_platdata(dev)) {
+ memcpy(plat, dev_get_platdata(dev), sizeof(*plat));
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline struct resource *
+gpio_nand_get_io_sync(struct platform_device *pdev)
+{
+ struct resource *r = gpio_nand_get_io_sync_of(pdev);
+
+ if (r)
+ return r;
+
+ return platform_get_resource(pdev, IORESOURCE_MEM, 1);
+}
+
+static void gpio_nand_remove(struct platform_device *pdev)
+{
+ struct gpiomtd *gpiomtd = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &gpiomtd->nand_chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+
+ /* Enable write protection and disable the chip */
+ if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
+ gpiod_set_value(gpiomtd->nwp, 0);
+ if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
+ gpiod_set_value(gpiomtd->nce, 0);
+}
+
+static int gpio_nand_probe(struct platform_device *pdev)
+{
+ struct gpiomtd *gpiomtd;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ int ret = 0;
+
+ if (!dev->of_node && !dev_get_platdata(dev))
+ return -EINVAL;
+
+ gpiomtd = devm_kzalloc(dev, sizeof(*gpiomtd), GFP_KERNEL);
+ if (!gpiomtd)
+ return -ENOMEM;
+
+ chip = &gpiomtd->nand_chip;
+
+ gpiomtd->io = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(gpiomtd->io))
+ return PTR_ERR(gpiomtd->io);
+
+ res = gpio_nand_get_io_sync(pdev);
+ if (res) {
+ gpiomtd->io_sync = devm_ioremap_resource(dev, res);
+ if (IS_ERR(gpiomtd->io_sync))
+ return PTR_ERR(gpiomtd->io_sync);
+ }
+
+ ret = gpio_nand_get_config(dev, &gpiomtd->plat);
+ if (ret)
+ return ret;
+
+ /* Just enable the chip */
+ gpiomtd->nce = devm_gpiod_get_optional(dev, "nce", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpiomtd->nce))
+ return PTR_ERR(gpiomtd->nce);
+
+ /* We disable write protection once we know probe() will succeed */
+ gpiomtd->nwp = devm_gpiod_get_optional(dev, "nwp", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiomtd->nwp)) {
+ ret = PTR_ERR(gpiomtd->nwp);
+ goto out_ce;
+ }
+
+ gpiomtd->ale = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiomtd->ale)) {
+ ret = PTR_ERR(gpiomtd->ale);
+ goto out_ce;
+ }
+
+ gpiomtd->cle = devm_gpiod_get(dev, "cle", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiomtd->cle)) {
+ ret = PTR_ERR(gpiomtd->cle);
+ goto out_ce;
+ }
+
+ gpiomtd->rdy = devm_gpiod_get_optional(dev, "rdy", GPIOD_IN);
+ if (IS_ERR(gpiomtd->rdy)) {
+ ret = PTR_ERR(gpiomtd->rdy);
+ goto out_ce;
+ }
+
+ nand_controller_init(&gpiomtd->base);
+ gpiomtd->base.ops = &gpio_nand_ops;
+
+ nand_set_flash_node(chip, pdev->dev.of_node);
+ chip->options = gpiomtd->plat.options;
+ chip->controller = &gpiomtd->base;
+
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = dev;
+
+ platform_set_drvdata(pdev, gpiomtd);
+
+ /* Disable write protection, if wired up */
+ if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
+ gpiod_direction_output(gpiomtd->nwp, 1);
+
+ /*
+ * This driver assumes that the default ECC engine should be TYPE_SOFT.
+ * Set ->engine_type before registering the NAND devices in order to
+ * provide a driver specific default value.
+ */
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
+ ret = nand_scan(chip, 1);
+ if (ret)
+ goto err_wp;
+
+ if (gpiomtd->plat.adjust_parts)
+ gpiomtd->plat.adjust_parts(&gpiomtd->plat, mtd->size);
+
+ ret = mtd_device_register(mtd, gpiomtd->plat.parts,
+ gpiomtd->plat.num_parts);
+ if (!ret)
+ return 0;
+
+err_wp:
+ if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
+ gpiod_set_value(gpiomtd->nwp, 0);
+out_ce:
+ if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
+ gpiod_set_value(gpiomtd->nce, 0);
+
+ return ret;
+}
+
+static struct platform_driver gpio_nand_driver = {
+ .probe = gpio_nand_probe,
+ .remove_new = gpio_nand_remove,
+ .driver = {
+ .name = "gpio-nand",
+ .of_match_table = of_match_ptr(gpio_nand_id_table),
+ },
+};
+
+module_platform_driver(gpio_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("GPIO NAND Driver");
diff --git a/drivers/mtd/nand/raw/gpmi-nand/Makefile b/drivers/mtd/nand/raw/gpmi-nand/Makefile
new file mode 100644
index 0000000000..247cbfceaa
--- /dev/null
+++ b/drivers/mtd/nand/raw/gpmi-nand/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand.o
diff --git a/drivers/mtd/nand/raw/gpmi-nand/bch-regs.h b/drivers/mtd/nand/raw/gpmi-nand/bch-regs.h
new file mode 100644
index 0000000000..a22b8a5062
--- /dev/null
+++ b/drivers/mtd/nand/raw/gpmi-nand/bch-regs.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2008 Embedded Alley Solutions, Inc.
+ */
+#ifndef __GPMI_NAND_BCH_REGS_H
+#define __GPMI_NAND_BCH_REGS_H
+
+#define HW_BCH_CTRL 0x00000000
+#define HW_BCH_CTRL_SET 0x00000004
+#define HW_BCH_CTRL_CLR 0x00000008
+#define HW_BCH_CTRL_TOG 0x0000000c
+
+#define BM_BCH_CTRL_COMPLETE_IRQ_EN (1 << 8)
+#define BM_BCH_CTRL_COMPLETE_IRQ (1 << 0)
+
+#define HW_BCH_STATUS0 0x00000010
+#define HW_BCH_MODE 0x00000020
+#define HW_BCH_ENCODEPTR 0x00000030
+#define HW_BCH_DATAPTR 0x00000040
+#define HW_BCH_METAPTR 0x00000050
+#define HW_BCH_LAYOUTSELECT 0x00000070
+
+#define HW_BCH_FLASH0LAYOUT0 0x00000080
+
+#define BP_BCH_FLASH0LAYOUT0_NBLOCKS 24
+#define BM_BCH_FLASH0LAYOUT0_NBLOCKS (0xff << BP_BCH_FLASH0LAYOUT0_NBLOCKS)
+#define BF_BCH_FLASH0LAYOUT0_NBLOCKS(v) \
+ (((v) << BP_BCH_FLASH0LAYOUT0_NBLOCKS) & BM_BCH_FLASH0LAYOUT0_NBLOCKS)
+
+#define BP_BCH_FLASH0LAYOUT0_META_SIZE 16
+#define BM_BCH_FLASH0LAYOUT0_META_SIZE (0xff << BP_BCH_FLASH0LAYOUT0_META_SIZE)
+#define BF_BCH_FLASH0LAYOUT0_META_SIZE(v) \
+ (((v) << BP_BCH_FLASH0LAYOUT0_META_SIZE)\
+ & BM_BCH_FLASH0LAYOUT0_META_SIZE)
+
+#define BP_BCH_FLASH0LAYOUT0_ECC0 12
+#define BM_BCH_FLASH0LAYOUT0_ECC0 (0xf << BP_BCH_FLASH0LAYOUT0_ECC0)
+#define MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0 11
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0 (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0)
+#define BF_BCH_FLASH0LAYOUT0_ECC0(v, x) \
+ (GPMI_IS_MX6(x) \
+ ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0) \
+ & MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0) \
+ : (((v) << BP_BCH_FLASH0LAYOUT0_ECC0) \
+ & BM_BCH_FLASH0LAYOUT0_ECC0) \
+ )
+
+#define MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14 10
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14 \
+ (0x1 << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14)
+#define BF_BCH_FLASH0LAYOUT0_GF(v, x) \
+ ((GPMI_IS_MX6(x) && ((v) == 14)) \
+ ? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14) \
+ & MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14) \
+ : 0 \
+ )
+
+#define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE 0
+#define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
+ (0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
+ (0x3ff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
+#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v, x) \
+ (GPMI_IS_MX6(x) \
+ ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) \
+ : ((v) & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) \
+ )
+
+#define HW_BCH_FLASH0LAYOUT1 0x00000090
+
+#define BP_BCH_FLASH0LAYOUT1_PAGE_SIZE 16
+#define BM_BCH_FLASH0LAYOUT1_PAGE_SIZE \
+ (0xffff << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE)
+#define BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(v) \
+ (((v) << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE) \
+ & BM_BCH_FLASH0LAYOUT1_PAGE_SIZE)
+
+#define BP_BCH_FLASH0LAYOUT1_ECCN 12
+#define BM_BCH_FLASH0LAYOUT1_ECCN (0xf << BP_BCH_FLASH0LAYOUT1_ECCN)
+#define MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN 11
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN)
+#define BF_BCH_FLASH0LAYOUT1_ECCN(v, x) \
+ (GPMI_IS_MX6(x) \
+ ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN) \
+ & MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN) \
+ : (((v) << BP_BCH_FLASH0LAYOUT1_ECCN) \
+ & BM_BCH_FLASH0LAYOUT1_ECCN) \
+ )
+
+#define MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14 10
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14 \
+ (0x1 << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14)
+#define BF_BCH_FLASH0LAYOUT1_GF(v, x) \
+ ((GPMI_IS_MX6(x) && ((v) == 14)) \
+ ? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14) \
+ & MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14) \
+ : 0 \
+ )
+
+#define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE 0
+#define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
+ (0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
+ (0x3ff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
+#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v, x) \
+ (GPMI_IS_MX6(x) \
+ ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
+ : ((v) & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
+ )
+
+#define HW_BCH_VERSION 0x00000160
+#endif
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
new file mode 100644
index 0000000000..e71ad2fcec
--- /dev/null
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -0,0 +1,2868 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright (C) 2010-2015 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc.
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/sched/task_stack.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/dma/mxs-dma.h>
+#include "gpmi-nand.h"
+#include "gpmi-regs.h"
+#include "bch-regs.h"
+
+/* Resource names for the GPMI NAND driver. */
+#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand"
+#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch"
+#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch"
+
+/* Converts time to clock cycles */
+#define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period)
+
+#define MXS_SET_ADDR 0x4
+#define MXS_CLR_ADDR 0x8
+/*
+ * Clear the bit and poll it cleared. This is usually called with
+ * a reset address and mask being either SFTRST(bit 31) or CLKGATE
+ * (bit 30).
+ */
+static int clear_poll_bit(void __iomem *addr, u32 mask)
+{
+ int timeout = 0x400;
+
+ /* clear the bit */
+ writel(mask, addr + MXS_CLR_ADDR);
+
+ /*
+ * SFTRST needs 3 GPMI clocks to settle, the reference manual
+ * recommends to wait 1us.
+ */
+ udelay(1);
+
+ /* poll the bit becoming clear */
+ while ((readl(addr) & mask) && --timeout)
+ /* nothing */;
+
+ return !timeout;
+}
+
+#define MODULE_CLKGATE (1 << 30)
+#define MODULE_SFTRST (1 << 31)
+/*
+ * The current mxs_reset_block() will do two things:
+ * [1] enable the module.
+ * [2] reset the module.
+ *
+ * In most of the cases, it's ok.
+ * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
+ * If you try to soft reset the BCH block, it becomes unusable until
+ * the next hard reset. This case occurs in the NAND boot mode. When the board
+ * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
+ * So If the driver tries to reset the BCH again, the BCH will not work anymore.
+ * You will see a DMA timeout in this case. The bug has been fixed
+ * in the following chips, such as MX28.
+ *
+ * To avoid this bug, just add a new parameter `just_enable` for
+ * the mxs_reset_block(), and rewrite it here.
+ */
+static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
+{
+ int ret;
+ int timeout = 0x400;
+
+ /* clear and poll SFTRST */
+ ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+ if (unlikely(ret))
+ goto error;
+
+ /* clear CLKGATE */
+ writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
+
+ if (!just_enable) {
+ /* set SFTRST to reset the block */
+ writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
+ udelay(1);
+
+ /* poll CLKGATE becoming set */
+ while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
+ /* nothing */;
+ if (unlikely(!timeout))
+ goto error;
+ }
+
+ /* clear and poll SFTRST */
+ ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+ if (unlikely(ret))
+ goto error;
+
+ /* clear and poll CLKGATE */
+ ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
+ if (unlikely(ret))
+ goto error;
+
+ return 0;
+
+error:
+ pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
+ return -ETIMEDOUT;
+}
+
+static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
+{
+ struct clk *clk;
+ int ret;
+ int i;
+
+ for (i = 0; i < GPMI_CLK_MAX; i++) {
+ clk = this->resources.clock[i];
+ if (!clk)
+ break;
+
+ if (v) {
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto err_clk;
+ } else {
+ clk_disable_unprepare(clk);
+ }
+ }
+ return 0;
+
+err_clk:
+ for (; i > 0; i--)
+ clk_disable_unprepare(this->resources.clock[i - 1]);
+ return ret;
+}
+
+static int gpmi_init(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(this->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = gpmi_reset_block(r->gpmi_regs, false);
+ if (ret)
+ goto err_out;
+
+ /*
+ * Reset BCH here, too. We got failures otherwise :(
+ * See later BCH reset for explanation of MX23 and MX28 handling
+ */
+ ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
+ if (ret)
+ goto err_out;
+
+ /* Choose NAND mode. */
+ writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
+
+ /* Set the IRQ polarity. */
+ writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
+ r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /* Disable Write-Protection. */
+ writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /* Select BCH ECC. */
+ writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /*
+ * Decouple the chip select from dma channel. We use dma0 for all
+ * the chips, force all NAND RDY_BUSY inputs to be sourced from
+ * RDY_BUSY0.
+ */
+ writel(BM_GPMI_CTRL1_DECOUPLE_CS | BM_GPMI_CTRL1_GANGED_RDYBUSY,
+ r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+err_out:
+ pm_runtime_mark_last_busy(this->dev);
+ pm_runtime_put_autosuspend(this->dev);
+ return ret;
+}
+
+/* This function is very useful. It is called only when the bug occur. */
+static void gpmi_dump_info(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ struct bch_geometry *geo = &this->bch_geometry;
+ u32 reg;
+ int i;
+
+ dev_err(this->dev, "Show GPMI registers :\n");
+ for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
+ reg = readl(r->gpmi_regs + i * 0x10);
+ dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
+ }
+
+ /* start to print out the BCH info */
+ dev_err(this->dev, "Show BCH registers :\n");
+ for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
+ reg = readl(r->bch_regs + i * 0x10);
+ dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
+ }
+ dev_err(this->dev, "BCH Geometry :\n"
+ "GF length : %u\n"
+ "ECC Strength : %u\n"
+ "Page Size in Bytes : %u\n"
+ "Metadata Size in Bytes : %u\n"
+ "ECC0 Chunk Size in Bytes: %u\n"
+ "ECCn Chunk Size in Bytes: %u\n"
+ "ECC Chunk Count : %u\n"
+ "Payload Size in Bytes : %u\n"
+ "Auxiliary Size in Bytes: %u\n"
+ "Auxiliary Status Offset: %u\n"
+ "Block Mark Byte Offset : %u\n"
+ "Block Mark Bit Offset : %u\n",
+ geo->gf_len,
+ geo->ecc_strength,
+ geo->page_size,
+ geo->metadata_size,
+ geo->ecc0_chunk_size,
+ geo->eccn_chunk_size,
+ geo->ecc_chunk_count,
+ geo->payload_size,
+ geo->auxiliary_size,
+ geo->auxiliary_status_offset,
+ geo->block_mark_byte_offset,
+ geo->block_mark_bit_offset);
+}
+
+static bool gpmi_check_ecc(struct gpmi_nand_data *this)
+{
+ struct nand_chip *chip = &this->nand;
+ struct bch_geometry *geo = &this->bch_geometry;
+ struct nand_device *nand = &chip->base;
+ struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
+
+ conf->step_size = geo->eccn_chunk_size;
+ conf->strength = geo->ecc_strength;
+
+ /* Do the sanity check. */
+ if (GPMI_IS_MXS(this)) {
+ /* The mx23/mx28 only support the GF13. */
+ if (geo->gf_len == 14)
+ return false;
+ }
+
+ if (geo->ecc_strength > this->devdata->bch_max_ecc_strength)
+ return false;
+
+ if (!nand_ecc_is_strong_enough(nand))
+ return false;
+
+ return true;
+}
+
+/* check if bbm locates in data chunk rather than ecc chunk */
+static bool bbm_in_data_chunk(struct gpmi_nand_data *this,
+ unsigned int *chunk_num)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ struct nand_chip *chip = &this->nand;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int i, j;
+
+ if (geo->ecc0_chunk_size != geo->eccn_chunk_size) {
+ dev_err(this->dev,
+ "The size of ecc0_chunk must equal to eccn_chunk\n");
+ return false;
+ }
+
+ i = (mtd->writesize * 8 - geo->metadata_size * 8) /
+ (geo->gf_len * geo->ecc_strength +
+ geo->eccn_chunk_size * 8);
+
+ j = (mtd->writesize * 8 - geo->metadata_size * 8) -
+ (geo->gf_len * geo->ecc_strength +
+ geo->eccn_chunk_size * 8) * i;
+
+ if (j < geo->eccn_chunk_size * 8) {
+ *chunk_num = i+1;
+ dev_dbg(this->dev, "Set ecc to %d and bbm in chunk %d\n",
+ geo->ecc_strength, *chunk_num);
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * If we can get the ECC information from the nand chip, we do not
+ * need to calculate them ourselves.
+ *
+ * We may have available oob space in this case.
+ */
+static int set_geometry_by_ecc_info(struct gpmi_nand_data *this,
+ unsigned int ecc_strength,
+ unsigned int ecc_step)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ struct nand_chip *chip = &this->nand;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int block_mark_bit_offset;
+
+ switch (ecc_step) {
+ case SZ_512:
+ geo->gf_len = 13;
+ break;
+ case SZ_1K:
+ geo->gf_len = 14;
+ break;
+ default:
+ dev_err(this->dev,
+ "unsupported nand chip. ecc bits : %d, ecc size : %d\n",
+ nanddev_get_ecc_requirements(&chip->base)->strength,
+ nanddev_get_ecc_requirements(&chip->base)->step_size);
+ return -EINVAL;
+ }
+ geo->ecc0_chunk_size = ecc_step;
+ geo->eccn_chunk_size = ecc_step;
+ geo->ecc_strength = round_up(ecc_strength, 2);
+ if (!gpmi_check_ecc(this))
+ return -EINVAL;
+
+ /* Keep the C >= O */
+ if (geo->eccn_chunk_size < mtd->oobsize) {
+ dev_err(this->dev,
+ "unsupported nand chip. ecc size: %d, oob size : %d\n",
+ ecc_step, mtd->oobsize);
+ return -EINVAL;
+ }
+
+ /* The default value, see comment in the legacy_set_geometry(). */
+ geo->metadata_size = 10;
+
+ geo->ecc_chunk_count = mtd->writesize / geo->eccn_chunk_size;
+
+ /*
+ * Now, the NAND chip with 2K page(data chunk is 512byte) shows below:
+ *
+ * | P |
+ * |<----------------------------------------------------->|
+ * | |
+ * | (Block Mark) |
+ * | P' | | | |
+ * |<-------------------------------------------->| D | | O' |
+ * | |<---->| |<--->|
+ * V V V V V
+ * +---+----------+-+----------+-+----------+-+----------+-+-----+
+ * | M | data |E| data |E| data |E| data |E| |
+ * +---+----------+-+----------+-+----------+-+----------+-+-----+
+ * ^ ^
+ * | O |
+ * |<------------>|
+ * | |
+ *
+ * P : the page size for BCH module.
+ * E : The ECC strength.
+ * G : the length of Galois Field.
+ * N : The chunk count of per page.
+ * M : the metasize of per page.
+ * C : the ecc chunk size, aka the "data" above.
+ * P': the nand chip's page size.
+ * O : the nand chip's oob size.
+ * O': the free oob.
+ *
+ * The formula for P is :
+ *
+ * E * G * N
+ * P = ------------ + P' + M
+ * 8
+ *
+ * The position of block mark moves forward in the ECC-based view
+ * of page, and the delta is:
+ *
+ * E * G * (N - 1)
+ * D = (---------------- + M)
+ * 8
+ *
+ * Please see the comment in legacy_set_geometry().
+ * With the condition C >= O , we still can get same result.
+ * So the bit position of the physical block mark within the ECC-based
+ * view of the page is :
+ * (P' - D) * 8
+ */
+ geo->page_size = mtd->writesize + geo->metadata_size +
+ (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
+
+ geo->payload_size = mtd->writesize;
+
+ geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4);
+ geo->auxiliary_size = ALIGN(geo->metadata_size, 4)
+ + ALIGN(geo->ecc_chunk_count, 4);
+
+ if (!this->swap_block_mark)
+ return 0;
+
+ /* For bit swap. */
+ block_mark_bit_offset = mtd->writesize * 8 -
+ (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
+ + geo->metadata_size * 8);
+
+ geo->block_mark_byte_offset = block_mark_bit_offset / 8;
+ geo->block_mark_bit_offset = block_mark_bit_offset % 8;
+ return 0;
+}
+
+/*
+ * Calculate the ECC strength by hand:
+ * E : The ECC strength.
+ * G : the length of Galois Field.
+ * N : The chunk count of per page.
+ * O : the oobsize of the NAND chip.
+ * M : the metasize of per page.
+ *
+ * The formula is :
+ * E * G * N
+ * ------------ <= (O - M)
+ * 8
+ *
+ * So, we get E by:
+ * (O - M) * 8
+ * E <= -------------
+ * G * N
+ */
+static inline int get_ecc_strength(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ struct mtd_info *mtd = nand_to_mtd(&this->nand);
+ int ecc_strength;
+
+ ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
+ / (geo->gf_len * geo->ecc_chunk_count);
+
+ /* We need the minor even number. */
+ return round_down(ecc_strength, 2);
+}
+
+static int set_geometry_for_large_oob(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ struct nand_chip *chip = &this->nand;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&chip->base);
+ unsigned int block_mark_bit_offset;
+ unsigned int max_ecc;
+ unsigned int bbm_chunk;
+ unsigned int i;
+
+ /* sanity check for the minimum ecc nand required */
+ if (!(requirements->strength > 0 &&
+ requirements->step_size > 0))
+ return -EINVAL;
+ geo->ecc_strength = requirements->strength;
+
+ /* check if platform can support this nand */
+ if (!gpmi_check_ecc(this)) {
+ dev_err(this->dev,
+ "unsupported NAND chip, minimum ecc required %d\n",
+ geo->ecc_strength);
+ return -EINVAL;
+ }
+
+ /* calculate the maximum ecc platform can support*/
+ geo->metadata_size = 10;
+ geo->gf_len = 14;
+ geo->ecc0_chunk_size = 1024;
+ geo->eccn_chunk_size = 1024;
+ geo->ecc_chunk_count = mtd->writesize / geo->eccn_chunk_size;
+ max_ecc = min(get_ecc_strength(this),
+ this->devdata->bch_max_ecc_strength);
+
+ /*
+ * search a supported ecc strength that makes bbm
+ * located in data chunk
+ */
+ geo->ecc_strength = max_ecc;
+ while (!(geo->ecc_strength < requirements->strength)) {
+ if (bbm_in_data_chunk(this, &bbm_chunk))
+ goto geo_setting;
+ geo->ecc_strength -= 2;
+ }
+
+ /* if none of them works, keep using the minimum ecc */
+ /* nand required but changing ecc page layout */
+ geo->ecc_strength = requirements->strength;
+ /* add extra ecc for meta data */
+ geo->ecc0_chunk_size = 0;
+ geo->ecc_chunk_count = (mtd->writesize / geo->eccn_chunk_size) + 1;
+ geo->ecc_for_meta = 1;
+ /* check if oob can afford this extra ecc chunk */
+ if (mtd->oobsize * 8 < geo->metadata_size * 8 +
+ geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) {
+ dev_err(this->dev, "unsupported NAND chip with new layout\n");
+ return -EINVAL;
+ }
+
+ /* calculate in which chunk bbm located */
+ bbm_chunk = (mtd->writesize * 8 - geo->metadata_size * 8 -
+ geo->gf_len * geo->ecc_strength) /
+ (geo->gf_len * geo->ecc_strength +
+ geo->eccn_chunk_size * 8) + 1;
+
+geo_setting:
+
+ geo->page_size = mtd->writesize + geo->metadata_size +
+ (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
+ geo->payload_size = mtd->writesize;
+
+ /*
+ * The auxiliary buffer contains the metadata and the ECC status. The
+ * metadata is padded to the nearest 32-bit boundary. The ECC status
+ * contains one byte for every ECC chunk, and is also padded to the
+ * nearest 32-bit boundary.
+ */
+ geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4);
+ geo->auxiliary_size = ALIGN(geo->metadata_size, 4)
+ + ALIGN(geo->ecc_chunk_count, 4);
+
+ if (!this->swap_block_mark)
+ return 0;
+
+ /* calculate the number of ecc chunk behind the bbm */
+ i = (mtd->writesize / geo->eccn_chunk_size) - bbm_chunk + 1;
+
+ block_mark_bit_offset = mtd->writesize * 8 -
+ (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - i)
+ + geo->metadata_size * 8);
+
+ geo->block_mark_byte_offset = block_mark_bit_offset / 8;
+ geo->block_mark_bit_offset = block_mark_bit_offset % 8;
+
+ dev_dbg(this->dev, "BCH Geometry :\n"
+ "GF length : %u\n"
+ "ECC Strength : %u\n"
+ "Page Size in Bytes : %u\n"
+ "Metadata Size in Bytes : %u\n"
+ "ECC0 Chunk Size in Bytes: %u\n"
+ "ECCn Chunk Size in Bytes: %u\n"
+ "ECC Chunk Count : %u\n"
+ "Payload Size in Bytes : %u\n"
+ "Auxiliary Size in Bytes: %u\n"
+ "Auxiliary Status Offset: %u\n"
+ "Block Mark Byte Offset : %u\n"
+ "Block Mark Bit Offset : %u\n"
+ "Block Mark in chunk : %u\n"
+ "Ecc for Meta data : %u\n",
+ geo->gf_len,
+ geo->ecc_strength,
+ geo->page_size,
+ geo->metadata_size,
+ geo->ecc0_chunk_size,
+ geo->eccn_chunk_size,
+ geo->ecc_chunk_count,
+ geo->payload_size,
+ geo->auxiliary_size,
+ geo->auxiliary_status_offset,
+ geo->block_mark_byte_offset,
+ geo->block_mark_bit_offset,
+ bbm_chunk,
+ geo->ecc_for_meta);
+
+ return 0;
+}
+
+static int legacy_set_geometry(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ struct mtd_info *mtd = nand_to_mtd(&this->nand);
+ unsigned int metadata_size;
+ unsigned int status_size;
+ unsigned int block_mark_bit_offset;
+
+ /*
+ * The size of the metadata can be changed, though we set it to 10
+ * bytes now. But it can't be too large, because we have to save
+ * enough space for BCH.
+ */
+ geo->metadata_size = 10;
+
+ /* The default for the length of Galois Field. */
+ geo->gf_len = 13;
+
+ /* The default for chunk size. */
+ geo->ecc0_chunk_size = 512;
+ geo->eccn_chunk_size = 512;
+ while (geo->eccn_chunk_size < mtd->oobsize) {
+ geo->ecc0_chunk_size *= 2; /* keep C >= O */
+ geo->eccn_chunk_size *= 2; /* keep C >= O */
+ geo->gf_len = 14;
+ }
+
+ geo->ecc_chunk_count = mtd->writesize / geo->eccn_chunk_size;
+
+ /* We use the same ECC strength for all chunks. */
+ geo->ecc_strength = get_ecc_strength(this);
+ if (!gpmi_check_ecc(this)) {
+ dev_err(this->dev,
+ "ecc strength: %d cannot be supported by the controller (%d)\n"
+ "try to use minimum ecc strength that NAND chip required\n",
+ geo->ecc_strength,
+ this->devdata->bch_max_ecc_strength);
+ return -EINVAL;
+ }
+
+ geo->page_size = mtd->writesize + geo->metadata_size +
+ (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
+ geo->payload_size = mtd->writesize;
+
+ /*
+ * The auxiliary buffer contains the metadata and the ECC status. The
+ * metadata is padded to the nearest 32-bit boundary. The ECC status
+ * contains one byte for every ECC chunk, and is also padded to the
+ * nearest 32-bit boundary.
+ */
+ metadata_size = ALIGN(geo->metadata_size, 4);
+ status_size = ALIGN(geo->ecc_chunk_count, 4);
+
+ geo->auxiliary_size = metadata_size + status_size;
+ geo->auxiliary_status_offset = metadata_size;
+
+ if (!this->swap_block_mark)
+ return 0;
+
+ /*
+ * We need to compute the byte and bit offsets of
+ * the physical block mark within the ECC-based view of the page.
+ *
+ * NAND chip with 2K page shows below:
+ * (Block Mark)
+ * | |
+ * | D |
+ * |<---->|
+ * V V
+ * +---+----------+-+----------+-+----------+-+----------+-+
+ * | M | data |E| data |E| data |E| data |E|
+ * +---+----------+-+----------+-+----------+-+----------+-+
+ *
+ * The position of block mark moves forward in the ECC-based view
+ * of page, and the delta is:
+ *
+ * E * G * (N - 1)
+ * D = (---------------- + M)
+ * 8
+ *
+ * With the formula to compute the ECC strength, and the condition
+ * : C >= O (C is the ecc chunk size)
+ *
+ * It's easy to deduce to the following result:
+ *
+ * E * G (O - M) C - M C - M
+ * ----------- <= ------- <= -------- < ---------
+ * 8 N N (N - 1)
+ *
+ * So, we get:
+ *
+ * E * G * (N - 1)
+ * D = (---------------- + M) < C
+ * 8
+ *
+ * The above inequality means the position of block mark
+ * within the ECC-based view of the page is still in the data chunk,
+ * and it's NOT in the ECC bits of the chunk.
+ *
+ * Use the following to compute the bit position of the
+ * physical block mark within the ECC-based view of the page:
+ * (page_size - D) * 8
+ *
+ * --Huang Shijie
+ */
+ block_mark_bit_offset = mtd->writesize * 8 -
+ (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
+ + geo->metadata_size * 8);
+
+ geo->block_mark_byte_offset = block_mark_bit_offset / 8;
+ geo->block_mark_bit_offset = block_mark_bit_offset % 8;
+ return 0;
+}
+
+static int common_nfc_set_geometry(struct gpmi_nand_data *this)
+{
+ struct nand_chip *chip = &this->nand;
+ struct mtd_info *mtd = nand_to_mtd(&this->nand);
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&chip->base);
+ bool use_minimun_ecc;
+ int err;
+
+ use_minimun_ecc = of_property_read_bool(this->dev->of_node,
+ "fsl,use-minimum-ecc");
+
+ /* use legacy bch geometry settings by default*/
+ if ((!use_minimun_ecc && mtd->oobsize < 1024) ||
+ !(requirements->strength > 0 && requirements->step_size > 0)) {
+ dev_dbg(this->dev, "use legacy bch geometry\n");
+ err = legacy_set_geometry(this);
+ if (!err)
+ return 0;
+ }
+
+ /* for large oob nand */
+ if (mtd->oobsize > 1024) {
+ dev_dbg(this->dev, "use large oob bch geometry\n");
+ err = set_geometry_for_large_oob(this);
+ if (!err)
+ return 0;
+ }
+
+ /* otherwise use the minimum ecc nand chip required */
+ dev_dbg(this->dev, "use minimum ecc bch geometry\n");
+ err = set_geometry_by_ecc_info(this, requirements->strength,
+ requirements->step_size);
+ if (err)
+ dev_err(this->dev, "none of the bch geometry setting works\n");
+
+ return err;
+}
+
+/* Configures the geometry for BCH. */
+static int bch_set_geometry(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ int ret;
+
+ ret = common_nfc_set_geometry(this);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_get_sync(this->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(this->dev);
+ return ret;
+ }
+
+ /*
+ * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
+ * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
+ * and MX28.
+ */
+ ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
+ if (ret)
+ goto err_out;
+
+ /* Set *all* chip selects to use layout 0. */
+ writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
+
+ ret = 0;
+err_out:
+ pm_runtime_mark_last_busy(this->dev);
+ pm_runtime_put_autosuspend(this->dev);
+
+ return ret;
+}
+
+/*
+ * <1> Firstly, we should know what's the GPMI-clock means.
+ * The GPMI-clock is the internal clock in the gpmi nand controller.
+ * If you set 100MHz to gpmi nand controller, the GPMI-clock's period
+ * is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
+ *
+ * <2> Secondly, we should know what's the frequency on the nand chip pins.
+ * The frequency on the nand chip pins is derived from the GPMI-clock.
+ * We can get it from the following equation:
+ *
+ * F = G / (DS + DH)
+ *
+ * F : the frequency on the nand chip pins.
+ * G : the GPMI clock, such as 100MHz.
+ * DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
+ * DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
+ *
+ * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
+ * the nand EDO(extended Data Out) timing could be applied.
+ * The GPMI implements a feedback read strobe to sample the read data.
+ * The feedback read strobe can be delayed to support the nand EDO timing
+ * where the read strobe may deasserts before the read data is valid, and
+ * read data is valid for some time after read strobe.
+ *
+ * The following figure illustrates some aspects of a NAND Flash read:
+ *
+ * |<---tREA---->|
+ * | |
+ * | | |
+ * |<--tRP-->| |
+ * | | |
+ * __ ___|__________________________________
+ * RDN \________/ |
+ * |
+ * /---------\
+ * Read Data --------------< >---------
+ * \---------/
+ * | |
+ * |<-D->|
+ * FeedbackRDN ________ ____________
+ * \___________/
+ *
+ * D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
+ *
+ *
+ * <4> Now, we begin to describe how to compute the right RDN_DELAY.
+ *
+ * 4.1) From the aspect of the nand chip pins:
+ * Delay = (tREA + C - tRP) {1}
+ *
+ * tREA : the maximum read access time.
+ * C : a constant to adjust the delay. default is 4000ps.
+ * tRP : the read pulse width, which is exactly:
+ * tRP = (GPMI-clock-period) * DATA_SETUP
+ *
+ * 4.2) From the aspect of the GPMI nand controller:
+ * Delay = RDN_DELAY * 0.125 * RP {2}
+ *
+ * RP : the DLL reference period.
+ * if (GPMI-clock-period > DLL_THRETHOLD)
+ * RP = GPMI-clock-period / 2;
+ * else
+ * RP = GPMI-clock-period;
+ *
+ * Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
+ * is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
+ * is 16000ps, but in mx6q, we use 12000ps.
+ *
+ * 4.3) since {1} equals {2}, we get:
+ *
+ * (tREA + 4000 - tRP) * 8
+ * RDN_DELAY = ----------------------- {3}
+ * RP
+ */
+static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
+ const struct nand_sdr_timings *sdr)
+{
+ struct gpmi_nfc_hardware_timing *hw = &this->hw;
+ struct resources *r = &this->resources;
+ unsigned int dll_threshold_ps = this->devdata->max_chain_delay;
+ unsigned int period_ps, reference_period_ps;
+ unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles;
+ unsigned int tRP_ps;
+ bool use_half_period;
+ int sample_delay_ps, sample_delay_factor;
+ unsigned int busy_timeout_cycles;
+ u8 wrn_dly_sel;
+ unsigned long clk_rate, min_rate;
+ u64 busy_timeout_ps;
+
+ if (sdr->tRC_min >= 30000) {
+ /* ONFI non-EDO modes [0-3] */
+ hw->clk_rate = 22000000;
+ min_rate = 0;
+ wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
+ } else if (sdr->tRC_min >= 25000) {
+ /* ONFI EDO mode 4 */
+ hw->clk_rate = 80000000;
+ min_rate = 22000000;
+ wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
+ } else {
+ /* ONFI EDO mode 5 */
+ hw->clk_rate = 100000000;
+ min_rate = 80000000;
+ wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
+ }
+
+ clk_rate = clk_round_rate(r->clock[0], hw->clk_rate);
+ if (clk_rate <= min_rate) {
+ dev_err(this->dev, "clock setting: expected %ld, got %ld\n",
+ hw->clk_rate, clk_rate);
+ return -ENOTSUPP;
+ }
+
+ hw->clk_rate = clk_rate;
+ /* SDR core timings are given in picoseconds */
+ period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate);
+
+ addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
+ data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
+ data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
+ busy_timeout_ps = max(sdr->tBERS_max, sdr->tPROG_max);
+ busy_timeout_cycles = TO_CYCLES(busy_timeout_ps, period_ps);
+
+ hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
+ BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
+ BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles);
+ hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(DIV_ROUND_UP(busy_timeout_cycles, 4096));
+
+ /*
+ * Derive NFC ideal delay from {3}:
+ *
+ * (tREA + 4000 - tRP) * 8
+ * RDN_DELAY = -----------------------
+ * RP
+ */
+ if (period_ps > dll_threshold_ps) {
+ use_half_period = true;
+ reference_period_ps = period_ps / 2;
+ } else {
+ use_half_period = false;
+ reference_period_ps = period_ps;
+ }
+
+ tRP_ps = data_setup_cycles * period_ps;
+ sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8;
+ if (sample_delay_ps > 0)
+ sample_delay_factor = sample_delay_ps / reference_period_ps;
+ else
+ sample_delay_factor = 0;
+
+ hw->ctrl1n = BF_GPMI_CTRL1_WRN_DLY_SEL(wrn_dly_sel);
+ if (sample_delay_factor)
+ hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) |
+ BM_GPMI_CTRL1_DLL_ENABLE |
+ (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0);
+ return 0;
+}
+
+static int gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
+{
+ struct gpmi_nfc_hardware_timing *hw = &this->hw;
+ struct resources *r = &this->resources;
+ void __iomem *gpmi_regs = r->gpmi_regs;
+ unsigned int dll_wait_time_us;
+ int ret;
+
+ /* Clock dividers do NOT guarantee a clean clock signal on its output
+ * during the change of the divide factor on i.MX6Q/UL/SX. On i.MX7/8,
+ * all clock dividers provide these guarantee.
+ */
+ if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this))
+ clk_disable_unprepare(r->clock[0]);
+
+ ret = clk_set_rate(r->clock[0], hw->clk_rate);
+ if (ret) {
+ dev_err(this->dev, "cannot set clock rate to %lu Hz: %d\n", hw->clk_rate, ret);
+ return ret;
+ }
+
+ if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this)) {
+ ret = clk_prepare_enable(r->clock[0]);
+ if (ret)
+ return ret;
+ }
+
+ writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0);
+ writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1);
+
+ /*
+ * Clear several CTRL1 fields, DLL must be disabled when setting
+ * RDN_DELAY or HALF_PERIOD.
+ */
+ writel(BM_GPMI_CTRL1_CLEAR_MASK, gpmi_regs + HW_GPMI_CTRL1_CLR);
+ writel(hw->ctrl1n, gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /* Wait 64 clock cycles before using the GPMI after enabling the DLL */
+ dll_wait_time_us = USEC_PER_SEC / hw->clk_rate * 64;
+ if (!dll_wait_time_us)
+ dll_wait_time_us = 1;
+
+ /* Wait for the DLL to settle. */
+ udelay(dll_wait_time_us);
+
+ return 0;
+}
+
+static int gpmi_setup_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_interface_config *conf)
+{
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ const struct nand_sdr_timings *sdr;
+ int ret;
+
+ /* Retrieve required NAND timings */
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ /* Only MX28/MX6 GPMI controller can reach EDO timings */
+ if (sdr->tRC_min <= 25000 && !GPMI_IS_MX28(this) && !GPMI_IS_MX6(this))
+ return -ENOTSUPP;
+
+ /* Stop here if this call was just a check */
+ if (chipnr < 0)
+ return 0;
+
+ /* Do the actual derivation of the controller timings */
+ ret = gpmi_nfc_compute_timings(this, sdr);
+ if (ret)
+ return ret;
+
+ this->hw.must_apply_timings = true;
+
+ return 0;
+}
+
+/* Clears a BCH interrupt. */
+static void gpmi_clear_bch(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
+}
+
+static struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
+{
+ /* We use the DMA channel 0 to access all the nand chips. */
+ return this->dma_chans[0];
+}
+
+/* This will be called after the DMA operation is finished. */
+static void dma_irq_callback(void *param)
+{
+ struct gpmi_nand_data *this = param;
+ struct completion *dma_c = &this->dma_done;
+
+ complete(dma_c);
+}
+
+static irqreturn_t bch_irq(int irq, void *cookie)
+{
+ struct gpmi_nand_data *this = cookie;
+
+ gpmi_clear_bch(this);
+ complete(&this->bch_done);
+ return IRQ_HANDLED;
+}
+
+static int gpmi_raw_len_to_len(struct gpmi_nand_data *this, int raw_len)
+{
+ /*
+ * raw_len is the length to read/write including bch data which
+ * we are passed in exec_op. Calculate the data length from it.
+ */
+ if (this->bch)
+ return ALIGN_DOWN(raw_len, this->bch_geometry.eccn_chunk_size);
+ else
+ return raw_len;
+}
+
+/* Can we use the upper's buffer directly for DMA? */
+static bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf,
+ int raw_len, struct scatterlist *sgl,
+ enum dma_data_direction dr)
+{
+ int ret;
+ int len = gpmi_raw_len_to_len(this, raw_len);
+
+ /* first try to map the upper buffer directly */
+ if (virt_addr_valid(buf) && !object_is_on_stack(buf)) {
+ sg_init_one(sgl, buf, len);
+ ret = dma_map_sg(this->dev, sgl, 1, dr);
+ if (ret == 0)
+ goto map_fail;
+
+ return true;
+ }
+
+map_fail:
+ /* We have to use our own DMA buffer. */
+ sg_init_one(sgl, this->data_buffer_dma, len);
+
+ if (dr == DMA_TO_DEVICE && buf != this->data_buffer_dma)
+ memcpy(this->data_buffer_dma, buf, len);
+
+ dma_map_sg(this->dev, sgl, 1, dr);
+
+ return false;
+}
+
+/* add our owner bbt descriptor */
+static uint8_t scan_ff_pattern[] = { 0xff };
+static struct nand_bbt_descr gpmi_bbt_descr = {
+ .options = 0,
+ .offs = 0,
+ .len = 1,
+ .pattern = scan_ff_pattern
+};
+
+/*
+ * We may change the layout if we can get the ECC info from the datasheet,
+ * else we will use all the (page + OOB).
+ */
+static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ struct bch_geometry *geo = &this->bch_geometry;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 0;
+ oobregion->length = geo->page_size - mtd->writesize;
+
+ return 0;
+}
+
+static int gpmi_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ struct bch_geometry *geo = &this->bch_geometry;
+
+ if (section)
+ return -ERANGE;
+
+ /* The available oob size we have. */
+ if (geo->page_size < mtd->writesize + mtd->oobsize) {
+ oobregion->offset = geo->page_size - mtd->writesize;
+ oobregion->length = mtd->oobsize - oobregion->offset;
+ }
+
+ return 0;
+}
+
+static const char * const gpmi_clks_for_mx2x[] = {
+ "gpmi_io",
+};
+
+static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = {
+ .ecc = gpmi_ooblayout_ecc,
+ .free = gpmi_ooblayout_free,
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx23 = {
+ .type = IS_MX23,
+ .bch_max_ecc_strength = 20,
+ .max_chain_delay = 16000,
+ .clks = gpmi_clks_for_mx2x,
+ .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx28 = {
+ .type = IS_MX28,
+ .bch_max_ecc_strength = 20,
+ .max_chain_delay = 16000,
+ .clks = gpmi_clks_for_mx2x,
+ .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
+};
+
+static const char * const gpmi_clks_for_mx6[] = {
+ "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx6q = {
+ .type = IS_MX6Q,
+ .bch_max_ecc_strength = 40,
+ .max_chain_delay = 12000,
+ .clks = gpmi_clks_for_mx6,
+ .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx6sx = {
+ .type = IS_MX6SX,
+ .bch_max_ecc_strength = 62,
+ .max_chain_delay = 12000,
+ .clks = gpmi_clks_for_mx6,
+ .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
+};
+
+static const char * const gpmi_clks_for_mx7d[] = {
+ "gpmi_io", "gpmi_bch_apb",
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx7d = {
+ .type = IS_MX7D,
+ .bch_max_ecc_strength = 62,
+ .max_chain_delay = 12000,
+ .clks = gpmi_clks_for_mx7d,
+ .clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d),
+};
+
+static int acquire_register_block(struct gpmi_nand_data *this,
+ const char *res_name)
+{
+ struct platform_device *pdev = this->pdev;
+ struct resources *res = &this->resources;
+ void __iomem *p;
+
+ p = devm_platform_ioremap_resource_byname(pdev, res_name);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+
+ if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
+ res->gpmi_regs = p;
+ else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
+ res->bch_regs = p;
+ else
+ dev_err(this->dev, "unknown resource name : %s\n", res_name);
+
+ return 0;
+}
+
+static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
+{
+ struct platform_device *pdev = this->pdev;
+ const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
+ int err;
+
+ err = platform_get_irq_byname(pdev, res_name);
+ if (err < 0)
+ return err;
+
+ err = devm_request_irq(this->dev, err, irq_h, 0, res_name, this);
+ if (err)
+ dev_err(this->dev, "error requesting BCH IRQ\n");
+
+ return err;
+}
+
+static void release_dma_channels(struct gpmi_nand_data *this)
+{
+ unsigned int i;
+ for (i = 0; i < DMA_CHANS; i++)
+ if (this->dma_chans[i]) {
+ dma_release_channel(this->dma_chans[i]);
+ this->dma_chans[i] = NULL;
+ }
+}
+
+static int acquire_dma_channels(struct gpmi_nand_data *this)
+{
+ struct platform_device *pdev = this->pdev;
+ struct dma_chan *dma_chan;
+ int ret = 0;
+
+ /* request dma channel */
+ dma_chan = dma_request_chan(&pdev->dev, "rx-tx");
+ if (IS_ERR(dma_chan)) {
+ ret = dev_err_probe(this->dev, PTR_ERR(dma_chan),
+ "DMA channel request failed\n");
+ release_dma_channels(this);
+ } else {
+ this->dma_chans[0] = dma_chan;
+ }
+
+ return ret;
+}
+
+static int gpmi_get_clks(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ struct clk *clk;
+ int err, i;
+
+ for (i = 0; i < this->devdata->clks_count; i++) {
+ clk = devm_clk_get(this->dev, this->devdata->clks[i]);
+ if (IS_ERR(clk)) {
+ err = PTR_ERR(clk);
+ goto err_clock;
+ }
+
+ r->clock[i] = clk;
+ }
+
+ return 0;
+
+err_clock:
+ dev_dbg(this->dev, "failed in finding the clocks.\n");
+ return err;
+}
+
+static int acquire_resources(struct gpmi_nand_data *this)
+{
+ int ret;
+
+ ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
+ if (ret)
+ goto exit_regs;
+
+ ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME);
+ if (ret)
+ goto exit_regs;
+
+ ret = acquire_bch_irq(this, bch_irq);
+ if (ret)
+ goto exit_regs;
+
+ ret = acquire_dma_channels(this);
+ if (ret)
+ goto exit_regs;
+
+ ret = gpmi_get_clks(this);
+ if (ret)
+ goto exit_clock;
+ return 0;
+
+exit_clock:
+ release_dma_channels(this);
+exit_regs:
+ return ret;
+}
+
+static void release_resources(struct gpmi_nand_data *this)
+{
+ release_dma_channels(this);
+}
+
+static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
+{
+ struct device *dev = this->dev;
+ struct bch_geometry *geo = &this->bch_geometry;
+
+ if (this->auxiliary_virt && virt_addr_valid(this->auxiliary_virt))
+ dma_free_coherent(dev, geo->auxiliary_size,
+ this->auxiliary_virt,
+ this->auxiliary_phys);
+ kfree(this->data_buffer_dma);
+ kfree(this->raw_buffer);
+
+ this->data_buffer_dma = NULL;
+ this->raw_buffer = NULL;
+}
+
+/* Allocate the DMA buffers */
+static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ struct device *dev = this->dev;
+ struct mtd_info *mtd = nand_to_mtd(&this->nand);
+
+ /*
+ * [2] Allocate a read/write data buffer.
+ * The gpmi_alloc_dma_buffer can be called twice.
+ * We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer
+ * is called before the NAND identification; and we allocate a
+ * buffer of the real NAND page size when the gpmi_alloc_dma_buffer
+ * is called after.
+ */
+ this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE,
+ GFP_DMA | GFP_KERNEL);
+ if (this->data_buffer_dma == NULL)
+ goto error_alloc;
+
+ this->auxiliary_virt = dma_alloc_coherent(dev, geo->auxiliary_size,
+ &this->auxiliary_phys, GFP_DMA);
+ if (!this->auxiliary_virt)
+ goto error_alloc;
+
+ this->raw_buffer = kzalloc((mtd->writesize ?: PAGE_SIZE) + mtd->oobsize, GFP_KERNEL);
+ if (!this->raw_buffer)
+ goto error_alloc;
+
+ return 0;
+
+error_alloc:
+ gpmi_free_dma_buffer(this);
+ return -ENOMEM;
+}
+
+/*
+ * Handles block mark swapping.
+ * It can be called in swapping the block mark, or swapping it back,
+ * because the operations are the same.
+ */
+static void block_mark_swapping(struct gpmi_nand_data *this,
+ void *payload, void *auxiliary)
+{
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+ unsigned char *p;
+ unsigned char *a;
+ unsigned int bit;
+ unsigned char mask;
+ unsigned char from_data;
+ unsigned char from_oob;
+
+ if (!this->swap_block_mark)
+ return;
+
+ /*
+ * If control arrives here, we're swapping. Make some convenience
+ * variables.
+ */
+ bit = nfc_geo->block_mark_bit_offset;
+ p = payload + nfc_geo->block_mark_byte_offset;
+ a = auxiliary;
+
+ /*
+ * Get the byte from the data area that overlays the block mark. Since
+ * the ECC engine applies its own view to the bits in the page, the
+ * physical block mark won't (in general) appear on a byte boundary in
+ * the data.
+ */
+ from_data = (p[0] >> bit) | (p[1] << (8 - bit));
+
+ /* Get the byte from the OOB. */
+ from_oob = a[0];
+
+ /* Swap them. */
+ a[0] = from_data;
+
+ mask = (0x1 << bit) - 1;
+ p[0] = (p[0] & mask) | (from_oob << bit);
+
+ mask = ~0 << bit;
+ p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
+}
+
+static int gpmi_count_bitflips(struct nand_chip *chip, void *buf, int first,
+ int last, int meta)
+{
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int i;
+ unsigned char *status;
+ unsigned int max_bitflips = 0;
+
+ /* Loop over status bytes, accumulating ECC status. */
+ status = this->auxiliary_virt + ALIGN(meta, 4);
+
+ for (i = first; i < last; i++, status++) {
+ if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
+ continue;
+
+ if (*status == STATUS_UNCORRECTABLE) {
+ int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
+ u8 *eccbuf = this->raw_buffer;
+ int offset, bitoffset;
+ int eccbytes;
+ int flips;
+
+ /* Read ECC bytes into our internal raw_buffer */
+ offset = nfc_geo->metadata_size * 8;
+ offset += ((8 * nfc_geo->eccn_chunk_size) + eccbits) * (i + 1);
+ offset -= eccbits;
+ bitoffset = offset % 8;
+ eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
+ offset /= 8;
+ eccbytes -= offset;
+ nand_change_read_column_op(chip, offset, eccbuf,
+ eccbytes, false);
+
+ /*
+ * ECC data are not byte aligned and we may have
+ * in-band data in the first and last byte of
+ * eccbuf. Set non-eccbits to one so that
+ * nand_check_erased_ecc_chunk() does not count them
+ * as bitflips.
+ */
+ if (bitoffset)
+ eccbuf[0] |= GENMASK(bitoffset - 1, 0);
+
+ bitoffset = (bitoffset + eccbits) % 8;
+ if (bitoffset)
+ eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset);
+
+ /*
+ * The ECC hardware has an uncorrectable ECC status
+ * code in case we have bitflips in an erased page. As
+ * nothing was written into this subpage the ECC is
+ * obviously wrong and we can not trust it. We assume
+ * at this point that we are reading an erased page and
+ * try to correct the bitflips in buffer up to
+ * ecc_strength bitflips. If this is a page with random
+ * data, we exceed this number of bitflips and have a
+ * ECC failure. Otherwise we use the corrected buffer.
+ */
+ if (i == 0) {
+ /* The first block includes metadata */
+ flips = nand_check_erased_ecc_chunk(
+ buf + i * nfc_geo->eccn_chunk_size,
+ nfc_geo->eccn_chunk_size,
+ eccbuf, eccbytes,
+ this->auxiliary_virt,
+ nfc_geo->metadata_size,
+ nfc_geo->ecc_strength);
+ } else {
+ flips = nand_check_erased_ecc_chunk(
+ buf + i * nfc_geo->eccn_chunk_size,
+ nfc_geo->eccn_chunk_size,
+ eccbuf, eccbytes,
+ NULL, 0,
+ nfc_geo->ecc_strength);
+ }
+
+ if (flips > 0) {
+ max_bitflips = max_t(unsigned int, max_bitflips,
+ flips);
+ mtd->ecc_stats.corrected += flips;
+ continue;
+ }
+
+ mtd->ecc_stats.failed++;
+ continue;
+ }
+
+ mtd->ecc_stats.corrected += *status;
+ max_bitflips = max_t(unsigned int, max_bitflips, *status);
+ }
+
+ return max_bitflips;
+}
+
+static void gpmi_bch_layout_std(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+ unsigned int ecc_strength = geo->ecc_strength >> 1;
+ unsigned int gf_len = geo->gf_len;
+ unsigned int block0_size = geo->ecc0_chunk_size;
+ unsigned int blockn_size = geo->eccn_chunk_size;
+
+ this->bch_flashlayout0 =
+ BF_BCH_FLASH0LAYOUT0_NBLOCKS(geo->ecc_chunk_count - 1) |
+ BF_BCH_FLASH0LAYOUT0_META_SIZE(geo->metadata_size) |
+ BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
+ BF_BCH_FLASH0LAYOUT0_GF(gf_len, this) |
+ BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block0_size, this);
+
+ this->bch_flashlayout1 =
+ BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(geo->page_size) |
+ BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
+ BF_BCH_FLASH0LAYOUT1_GF(gf_len, this) |
+ BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(blockn_size, this);
+}
+
+static int gpmi_ecc_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct bch_geometry *geo = &this->bch_geometry;
+ unsigned int max_bitflips;
+ int ret;
+
+ gpmi_bch_layout_std(this);
+ this->bch = true;
+
+ ret = nand_read_page_op(chip, page, 0, buf, geo->page_size);
+ if (ret)
+ return ret;
+
+ max_bitflips = gpmi_count_bitflips(chip, buf, 0,
+ geo->ecc_chunk_count,
+ geo->auxiliary_status_offset);
+
+ /* handle the block mark swapping */
+ block_mark_swapping(this, buf, this->auxiliary_virt);
+
+ if (oob_required) {
+ /*
+ * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
+ * for details about our policy for delivering the OOB.
+ *
+ * We fill the caller's buffer with set bits, and then copy the
+ * block mark to th caller's buffer. Note that, if block mark
+ * swapping was necessary, it has already been done, so we can
+ * rely on the first byte of the auxiliary buffer to contain
+ * the block mark.
+ */
+ memset(chip->oob_poi, ~0, mtd->oobsize);
+ chip->oob_poi[0] = ((uint8_t *)this->auxiliary_virt)[0];
+ }
+
+ return max_bitflips;
+}
+
+/* Fake a virtual small page for the subpage read */
+static int gpmi_ecc_read_subpage(struct nand_chip *chip, uint32_t offs,
+ uint32_t len, uint8_t *buf, int page)
+{
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ struct bch_geometry *geo = &this->bch_geometry;
+ int size = chip->ecc.size; /* ECC chunk size */
+ int meta, n, page_size;
+ unsigned int max_bitflips;
+ unsigned int ecc_strength;
+ int first, last, marker_pos;
+ int ecc_parity_size;
+ int col = 0;
+ int ret;
+
+ /* The size of ECC parity */
+ ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
+
+ /* Align it with the chunk size */
+ first = offs / size;
+ last = (offs + len - 1) / size;
+
+ if (this->swap_block_mark) {
+ /*
+ * Find the chunk which contains the Block Marker.
+ * If this chunk is in the range of [first, last],
+ * we have to read out the whole page.
+ * Why? since we had swapped the data at the position of Block
+ * Marker to the metadata which is bound with the chunk 0.
+ */
+ marker_pos = geo->block_mark_byte_offset / size;
+ if (last >= marker_pos && first <= marker_pos) {
+ dev_dbg(this->dev,
+ "page:%d, first:%d, last:%d, marker at:%d\n",
+ page, first, last, marker_pos);
+ return gpmi_ecc_read_page(chip, buf, 0, page);
+ }
+ }
+
+ /*
+ * if there is an ECC dedicate for meta:
+ * - need to add an extra ECC size when calculating col and page_size,
+ * if the meta size is NOT zero.
+ * - ecc0_chunk size need to set to the same size as other chunks,
+ * if the meta size is zero.
+ */
+
+ meta = geo->metadata_size;
+ if (first) {
+ if (geo->ecc_for_meta)
+ col = meta + ecc_parity_size
+ + (size + ecc_parity_size) * first;
+ else
+ col = meta + (size + ecc_parity_size) * first;
+
+ meta = 0;
+ buf = buf + first * size;
+ }
+
+ ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
+ n = last - first + 1;
+
+ if (geo->ecc_for_meta && meta)
+ page_size = meta + ecc_parity_size
+ + (size + ecc_parity_size) * n;
+ else
+ page_size = meta + (size + ecc_parity_size) * n;
+
+ ecc_strength = geo->ecc_strength >> 1;
+
+ this->bch_flashlayout0 = BF_BCH_FLASH0LAYOUT0_NBLOCKS(
+ (geo->ecc_for_meta ? n : n - 1)) |
+ BF_BCH_FLASH0LAYOUT0_META_SIZE(meta) |
+ BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
+ BF_BCH_FLASH0LAYOUT0_GF(geo->gf_len, this) |
+ BF_BCH_FLASH0LAYOUT0_DATA0_SIZE((geo->ecc_for_meta ?
+ 0 : geo->ecc0_chunk_size), this);
+
+ this->bch_flashlayout1 = BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) |
+ BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
+ BF_BCH_FLASH0LAYOUT1_GF(geo->gf_len, this) |
+ BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(geo->eccn_chunk_size, this);
+
+ this->bch = true;
+
+ ret = nand_read_page_op(chip, page, col, buf, page_size);
+ if (ret)
+ return ret;
+
+ dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n",
+ page, offs, len, col, first, n, page_size);
+
+ max_bitflips = gpmi_count_bitflips(chip, buf, first, last, meta);
+
+ return max_bitflips;
+}
+
+static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+
+ dev_dbg(this->dev, "ecc write page.\n");
+
+ gpmi_bch_layout_std(this);
+ this->bch = true;
+
+ memcpy(this->auxiliary_virt, chip->oob_poi, nfc_geo->auxiliary_size);
+
+ if (this->swap_block_mark) {
+ /*
+ * When doing bad block marker swapping we must always copy the
+ * input buffer as we can't modify the const buffer.
+ */
+ memcpy(this->data_buffer_dma, buf, mtd->writesize);
+ buf = this->data_buffer_dma;
+ block_mark_swapping(this, this->data_buffer_dma,
+ this->auxiliary_virt);
+ }
+
+ return nand_prog_page_op(chip, page, 0, buf, nfc_geo->page_size);
+}
+
+/*
+ * There are several places in this driver where we have to handle the OOB and
+ * block marks. This is the function where things are the most complicated, so
+ * this is where we try to explain it all. All the other places refer back to
+ * here.
+ *
+ * These are the rules, in order of decreasing importance:
+ *
+ * 1) Nothing the caller does can be allowed to imperil the block mark.
+ *
+ * 2) In read operations, the first byte of the OOB we return must reflect the
+ * true state of the block mark, no matter where that block mark appears in
+ * the physical page.
+ *
+ * 3) ECC-based read operations return an OOB full of set bits (since we never
+ * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
+ * return).
+ *
+ * 4) "Raw" read operations return a direct view of the physical bytes in the
+ * page, using the conventional definition of which bytes are data and which
+ * are OOB. This gives the caller a way to see the actual, physical bytes
+ * in the page, without the distortions applied by our ECC engine.
+ *
+ *
+ * What we do for this specific read operation depends on two questions:
+ *
+ * 1) Are we doing a "raw" read, or an ECC-based read?
+ *
+ * 2) Are we using block mark swapping or transcription?
+ *
+ * There are four cases, illustrated by the following Karnaugh map:
+ *
+ * | Raw | ECC-based |
+ * -------------+-------------------------+-------------------------+
+ * | Read the conventional | |
+ * | OOB at the end of the | |
+ * Swapping | page and return it. It | |
+ * | contains exactly what | |
+ * | we want. | Read the block mark and |
+ * -------------+-------------------------+ return it in a buffer |
+ * | Read the conventional | full of set bits. |
+ * | OOB at the end of the | |
+ * | page and also the block | |
+ * Transcribing | mark in the metadata. | |
+ * | Copy the block mark | |
+ * | into the first byte of | |
+ * | the OOB. | |
+ * -------------+-------------------------+-------------------------+
+ *
+ * Note that we break rule #4 in the Transcribing/Raw case because we're not
+ * giving an accurate view of the actual, physical bytes in the page (we're
+ * overwriting the block mark). That's OK because it's more important to follow
+ * rule #2.
+ *
+ * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
+ * easy. When reading a page, for example, the NAND Flash MTD code calls our
+ * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
+ * ECC-based or raw view of the page is implicit in which function it calls
+ * (there is a similar pair of ECC-based/raw functions for writing).
+ */
+static int gpmi_ecc_read_oob(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ int ret;
+
+ /* clear the OOB buffer */
+ memset(chip->oob_poi, ~0, mtd->oobsize);
+
+ /* Read out the conventional OOB. */
+ ret = nand_read_page_op(chip, page, mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
+ if (ret)
+ return ret;
+
+ /*
+ * Now, we want to make sure the block mark is correct. In the
+ * non-transcribing case (!GPMI_IS_MX23()), we already have it.
+ * Otherwise, we need to explicitly read it.
+ */
+ if (GPMI_IS_MX23(this)) {
+ /* Read the block mark into the first byte of the OOB buffer. */
+ ret = nand_read_page_op(chip, page, 0, chip->oob_poi, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int gpmi_ecc_write_oob(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mtd_oob_region of = { };
+
+ /* Do we have available oob area? */
+ mtd_ooblayout_free(mtd, 0, &of);
+ if (!of.length)
+ return -EPERM;
+
+ if (!nand_is_slc(chip))
+ return -EPERM;
+
+ return nand_prog_page_op(chip, page, mtd->writesize + of.offset,
+ chip->oob_poi + of.offset, of.length);
+}
+
+/*
+ * This function reads a NAND page without involving the ECC engine (no HW
+ * ECC correction).
+ * The tricky part in the GPMI/BCH controller is that it stores ECC bits
+ * inline (interleaved with payload DATA), and do not align data chunk on
+ * byte boundaries.
+ * We thus need to take care moving the payload data and ECC bits stored in the
+ * page into the provided buffers, which is why we're using nand_extract_bits().
+ *
+ * See set_geometry_by_ecc_info inline comments to have a full description
+ * of the layout used by the GPMI controller.
+ */
+static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+ int eccsize = nfc_geo->eccn_chunk_size;
+ int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
+ u8 *tmp_buf = this->raw_buffer;
+ size_t src_bit_off;
+ size_t oob_bit_off;
+ size_t oob_byte_off;
+ uint8_t *oob = chip->oob_poi;
+ int step;
+ int ret;
+
+ ret = nand_read_page_op(chip, page, 0, tmp_buf,
+ mtd->writesize + mtd->oobsize);
+ if (ret)
+ return ret;
+
+ /*
+ * If required, swap the bad block marker and the data stored in the
+ * metadata section, so that we don't wrongly consider a block as bad.
+ *
+ * See the layout description for a detailed explanation on why this
+ * is needed.
+ */
+ if (this->swap_block_mark)
+ swap(tmp_buf[0], tmp_buf[mtd->writesize]);
+
+ /*
+ * Copy the metadata section into the oob buffer (this section is
+ * guaranteed to be aligned on a byte boundary).
+ */
+ if (oob_required)
+ memcpy(oob, tmp_buf, nfc_geo->metadata_size);
+
+ oob_bit_off = nfc_geo->metadata_size * 8;
+ src_bit_off = oob_bit_off;
+
+ /* Extract interleaved payload data and ECC bits */
+ for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
+ if (buf)
+ nand_extract_bits(buf, step * eccsize * 8, tmp_buf,
+ src_bit_off, eccsize * 8);
+ src_bit_off += eccsize * 8;
+
+ /* Align last ECC block to align a byte boundary */
+ if (step == nfc_geo->ecc_chunk_count - 1 &&
+ (oob_bit_off + eccbits) % 8)
+ eccbits += 8 - ((oob_bit_off + eccbits) % 8);
+
+ if (oob_required)
+ nand_extract_bits(oob, oob_bit_off, tmp_buf,
+ src_bit_off, eccbits);
+
+ src_bit_off += eccbits;
+ oob_bit_off += eccbits;
+ }
+
+ if (oob_required) {
+ oob_byte_off = oob_bit_off / 8;
+
+ if (oob_byte_off < mtd->oobsize)
+ memcpy(oob + oob_byte_off,
+ tmp_buf + mtd->writesize + oob_byte_off,
+ mtd->oobsize - oob_byte_off);
+ }
+
+ return 0;
+}
+
+/*
+ * This function writes a NAND page without involving the ECC engine (no HW
+ * ECC generation).
+ * The tricky part in the GPMI/BCH controller is that it stores ECC bits
+ * inline (interleaved with payload DATA), and do not align data chunk on
+ * byte boundaries.
+ * We thus need to take care moving the OOB area at the right place in the
+ * final page, which is why we're using nand_extract_bits().
+ *
+ * See set_geometry_by_ecc_info inline comments to have a full description
+ * of the layout used by the GPMI controller.
+ */
+static int gpmi_ecc_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+ int eccsize = nfc_geo->eccn_chunk_size;
+ int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
+ u8 *tmp_buf = this->raw_buffer;
+ uint8_t *oob = chip->oob_poi;
+ size_t dst_bit_off;
+ size_t oob_bit_off;
+ size_t oob_byte_off;
+ int step;
+
+ /*
+ * Initialize all bits to 1 in case we don't have a buffer for the
+ * payload or oob data in order to leave unspecified bits of data
+ * to their initial state.
+ */
+ if (!buf || !oob_required)
+ memset(tmp_buf, 0xff, mtd->writesize + mtd->oobsize);
+
+ /*
+ * First copy the metadata section (stored in oob buffer) at the
+ * beginning of the page, as imposed by the GPMI layout.
+ */
+ memcpy(tmp_buf, oob, nfc_geo->metadata_size);
+ oob_bit_off = nfc_geo->metadata_size * 8;
+ dst_bit_off = oob_bit_off;
+
+ /* Interleave payload data and ECC bits */
+ for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
+ if (buf)
+ nand_extract_bits(tmp_buf, dst_bit_off, buf,
+ step * eccsize * 8, eccsize * 8);
+ dst_bit_off += eccsize * 8;
+
+ /* Align last ECC block to align a byte boundary */
+ if (step == nfc_geo->ecc_chunk_count - 1 &&
+ (oob_bit_off + eccbits) % 8)
+ eccbits += 8 - ((oob_bit_off + eccbits) % 8);
+
+ if (oob_required)
+ nand_extract_bits(tmp_buf, dst_bit_off, oob,
+ oob_bit_off, eccbits);
+
+ dst_bit_off += eccbits;
+ oob_bit_off += eccbits;
+ }
+
+ oob_byte_off = oob_bit_off / 8;
+
+ if (oob_required && oob_byte_off < mtd->oobsize)
+ memcpy(tmp_buf + mtd->writesize + oob_byte_off,
+ oob + oob_byte_off, mtd->oobsize - oob_byte_off);
+
+ /*
+ * If required, swap the bad block marker and the first byte of the
+ * metadata section, so that we don't modify the bad block marker.
+ *
+ * See the layout description for a detailed explanation on why this
+ * is needed.
+ */
+ if (this->swap_block_mark)
+ swap(tmp_buf[0], tmp_buf[mtd->writesize]);
+
+ return nand_prog_page_op(chip, page, 0, tmp_buf,
+ mtd->writesize + mtd->oobsize);
+}
+
+static int gpmi_ecc_read_oob_raw(struct nand_chip *chip, int page)
+{
+ return gpmi_ecc_read_page_raw(chip, NULL, 1, page);
+}
+
+static int gpmi_ecc_write_oob_raw(struct nand_chip *chip, int page)
+{
+ return gpmi_ecc_write_page_raw(chip, NULL, 1, page);
+}
+
+static int gpmi_block_markbad(struct nand_chip *chip, loff_t ofs)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ int ret = 0;
+ uint8_t *block_mark;
+ int column, page, chipnr;
+
+ chipnr = (int)(ofs >> chip->chip_shift);
+ nand_select_target(chip, chipnr);
+
+ column = !GPMI_IS_MX23(this) ? mtd->writesize : 0;
+
+ /* Write the block mark. */
+ block_mark = this->data_buffer_dma;
+ block_mark[0] = 0; /* bad block marker */
+
+ /* Shift to get page */
+ page = (int)(ofs >> chip->page_shift);
+
+ ret = nand_prog_page_op(chip, page, column, block_mark, 1);
+
+ nand_deselect_target(chip);
+
+ return ret;
+}
+
+static int nand_boot_set_geometry(struct gpmi_nand_data *this)
+{
+ struct boot_rom_geometry *geometry = &this->rom_geometry;
+
+ /*
+ * Set the boot block stride size.
+ *
+ * In principle, we should be reading this from the OTP bits, since
+ * that's where the ROM is going to get it. In fact, we don't have any
+ * way to read the OTP bits, so we go with the default and hope for the
+ * best.
+ */
+ geometry->stride_size_in_pages = 64;
+
+ /*
+ * Set the search area stride exponent.
+ *
+ * In principle, we should be reading this from the OTP bits, since
+ * that's where the ROM is going to get it. In fact, we don't have any
+ * way to read the OTP bits, so we go with the default and hope for the
+ * best.
+ */
+ geometry->search_area_stride_exponent = 2;
+ return 0;
+}
+
+static const char *fingerprint = "STMP";
+static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
+{
+ struct boot_rom_geometry *rom_geo = &this->rom_geometry;
+ struct device *dev = this->dev;
+ struct nand_chip *chip = &this->nand;
+ unsigned int search_area_size_in_strides;
+ unsigned int stride;
+ unsigned int page;
+ u8 *buffer = nand_get_data_buf(chip);
+ int found_an_ncb_fingerprint = false;
+ int ret;
+
+ /* Compute the number of strides in a search area. */
+ search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
+
+ nand_select_target(chip, 0);
+
+ /*
+ * Loop through the first search area, looking for the NCB fingerprint.
+ */
+ dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
+
+ for (stride = 0; stride < search_area_size_in_strides; stride++) {
+ /* Compute the page addresses. */
+ page = stride * rom_geo->stride_size_in_pages;
+
+ dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
+
+ /*
+ * Read the NCB fingerprint. The fingerprint is four bytes long
+ * and starts in the 12th byte of the page.
+ */
+ ret = nand_read_page_op(chip, page, 12, buffer,
+ strlen(fingerprint));
+ if (ret)
+ continue;
+
+ /* Look for the fingerprint. */
+ if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
+ found_an_ncb_fingerprint = true;
+ break;
+ }
+
+ }
+
+ nand_deselect_target(chip);
+
+ if (found_an_ncb_fingerprint)
+ dev_dbg(dev, "\tFound a fingerprint\n");
+ else
+ dev_dbg(dev, "\tNo fingerprint found\n");
+ return found_an_ncb_fingerprint;
+}
+
+/* Writes a transcription stamp. */
+static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
+{
+ struct device *dev = this->dev;
+ struct boot_rom_geometry *rom_geo = &this->rom_geometry;
+ struct nand_chip *chip = &this->nand;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int block_size_in_pages;
+ unsigned int search_area_size_in_strides;
+ unsigned int search_area_size_in_pages;
+ unsigned int search_area_size_in_blocks;
+ unsigned int block;
+ unsigned int stride;
+ unsigned int page;
+ u8 *buffer = nand_get_data_buf(chip);
+ int status;
+
+ /* Compute the search area geometry. */
+ block_size_in_pages = mtd->erasesize / mtd->writesize;
+ search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
+ search_area_size_in_pages = search_area_size_in_strides *
+ rom_geo->stride_size_in_pages;
+ search_area_size_in_blocks =
+ (search_area_size_in_pages + (block_size_in_pages - 1)) /
+ block_size_in_pages;
+
+ dev_dbg(dev, "Search Area Geometry :\n");
+ dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks);
+ dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
+ dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages);
+
+ nand_select_target(chip, 0);
+
+ /* Loop over blocks in the first search area, erasing them. */
+ dev_dbg(dev, "Erasing the search area...\n");
+
+ for (block = 0; block < search_area_size_in_blocks; block++) {
+ /* Erase this block. */
+ dev_dbg(dev, "\tErasing block 0x%x\n", block);
+ status = nand_erase_op(chip, block);
+ if (status)
+ dev_err(dev, "[%s] Erase failed.\n", __func__);
+ }
+
+ /* Write the NCB fingerprint into the page buffer. */
+ memset(buffer, ~0, mtd->writesize);
+ memcpy(buffer + 12, fingerprint, strlen(fingerprint));
+
+ /* Loop through the first search area, writing NCB fingerprints. */
+ dev_dbg(dev, "Writing NCB fingerprints...\n");
+ for (stride = 0; stride < search_area_size_in_strides; stride++) {
+ /* Compute the page addresses. */
+ page = stride * rom_geo->stride_size_in_pages;
+
+ /* Write the first page of the current stride. */
+ dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
+
+ status = chip->ecc.write_page_raw(chip, buffer, 0, page);
+ if (status)
+ dev_err(dev, "[%s] Write failed.\n", __func__);
+ }
+
+ nand_deselect_target(chip);
+
+ return 0;
+}
+
+static int mx23_boot_init(struct gpmi_nand_data *this)
+{
+ struct device *dev = this->dev;
+ struct nand_chip *chip = &this->nand;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int block_count;
+ unsigned int block;
+ int chipnr;
+ int page;
+ loff_t byte;
+ uint8_t block_mark;
+ int ret = 0;
+
+ /*
+ * If control arrives here, we can't use block mark swapping, which
+ * means we're forced to use transcription. First, scan for the
+ * transcription stamp. If we find it, then we don't have to do
+ * anything -- the block marks are already transcribed.
+ */
+ if (mx23_check_transcription_stamp(this))
+ return 0;
+
+ /*
+ * If control arrives here, we couldn't find a transcription stamp, so
+ * so we presume the block marks are in the conventional location.
+ */
+ dev_dbg(dev, "Transcribing bad block marks...\n");
+
+ /* Compute the number of blocks in the entire medium. */
+ block_count = nanddev_eraseblocks_per_target(&chip->base);
+
+ /*
+ * Loop over all the blocks in the medium, transcribing block marks as
+ * we go.
+ */
+ for (block = 0; block < block_count; block++) {
+ /*
+ * Compute the chip, page and byte addresses for this block's
+ * conventional mark.
+ */
+ chipnr = block >> (chip->chip_shift - chip->phys_erase_shift);
+ page = block << (chip->phys_erase_shift - chip->page_shift);
+ byte = block << chip->phys_erase_shift;
+
+ /* Send the command to read the conventional block mark. */
+ nand_select_target(chip, chipnr);
+ ret = nand_read_page_op(chip, page, mtd->writesize, &block_mark,
+ 1);
+ nand_deselect_target(chip);
+
+ if (ret)
+ continue;
+
+ /*
+ * Check if the block is marked bad. If so, we need to mark it
+ * again, but this time the result will be a mark in the
+ * location where we transcribe block marks.
+ */
+ if (block_mark != 0xff) {
+ dev_dbg(dev, "Transcribing mark in block %u\n", block);
+ ret = chip->legacy.block_markbad(chip, byte);
+ if (ret)
+ dev_err(dev,
+ "Failed to mark block bad with ret %d\n",
+ ret);
+ }
+ }
+
+ /* Write the stamp that indicates we've transcribed the block marks. */
+ mx23_write_transcription_stamp(this);
+ return 0;
+}
+
+static int nand_boot_init(struct gpmi_nand_data *this)
+{
+ nand_boot_set_geometry(this);
+
+ /* This is ROM arch-specific initilization before the BBT scanning. */
+ if (GPMI_IS_MX23(this))
+ return mx23_boot_init(this);
+ return 0;
+}
+
+static int gpmi_set_geometry(struct gpmi_nand_data *this)
+{
+ int ret;
+
+ /* Free the temporary DMA memory for reading ID. */
+ gpmi_free_dma_buffer(this);
+
+ /* Set up the NFC geometry which is used by BCH. */
+ ret = bch_set_geometry(this);
+ if (ret) {
+ dev_err(this->dev, "Error setting BCH geometry : %d\n", ret);
+ return ret;
+ }
+
+ /* Alloc the new DMA buffers according to the pagesize and oobsize */
+ return gpmi_alloc_dma_buffer(this);
+}
+
+static int gpmi_init_last(struct gpmi_nand_data *this)
+{
+ struct nand_chip *chip = &this->nand;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct bch_geometry *bch_geo = &this->bch_geometry;
+ int ret;
+
+ /* Set up the medium geometry */
+ ret = gpmi_set_geometry(this);
+ if (ret)
+ return ret;
+
+ /* Init the nand_ecc_ctrl{} */
+ ecc->read_page = gpmi_ecc_read_page;
+ ecc->write_page = gpmi_ecc_write_page;
+ ecc->read_oob = gpmi_ecc_read_oob;
+ ecc->write_oob = gpmi_ecc_write_oob;
+ ecc->read_page_raw = gpmi_ecc_read_page_raw;
+ ecc->write_page_raw = gpmi_ecc_write_page_raw;
+ ecc->read_oob_raw = gpmi_ecc_read_oob_raw;
+ ecc->write_oob_raw = gpmi_ecc_write_oob_raw;
+ ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ ecc->size = bch_geo->eccn_chunk_size;
+ ecc->strength = bch_geo->ecc_strength;
+ mtd_set_ooblayout(mtd, &gpmi_ooblayout_ops);
+
+ /*
+ * We only enable the subpage read when:
+ * (1) the chip is imx6, and
+ * (2) the size of the ECC parity is byte aligned.
+ */
+ if (GPMI_IS_MX6(this) &&
+ ((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) {
+ ecc->read_subpage = gpmi_ecc_read_subpage;
+ chip->options |= NAND_SUBPAGE_READ;
+ }
+
+ return 0;
+}
+
+static int gpmi_nand_attach_chip(struct nand_chip *chip)
+{
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ int ret;
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH) {
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ if (of_property_read_bool(this->dev->of_node,
+ "fsl,no-blockmark-swap"))
+ this->swap_block_mark = false;
+ }
+ dev_dbg(this->dev, "Blockmark swapping %sabled\n",
+ this->swap_block_mark ? "en" : "dis");
+
+ ret = gpmi_init_last(this);
+ if (ret)
+ return ret;
+
+ chip->options |= NAND_SKIP_BBTSCAN;
+
+ return 0;
+}
+
+static struct gpmi_transfer *get_next_transfer(struct gpmi_nand_data *this)
+{
+ struct gpmi_transfer *transfer = &this->transfers[this->ntransfers];
+
+ this->ntransfers++;
+
+ if (this->ntransfers == GPMI_MAX_TRANSFERS)
+ return NULL;
+
+ return transfer;
+}
+
+static struct dma_async_tx_descriptor *gpmi_chain_command(
+ struct gpmi_nand_data *this, u8 cmd, const u8 *addr, int naddr)
+{
+ struct dma_chan *channel = get_dma_chan(this);
+ struct dma_async_tx_descriptor *desc;
+ struct gpmi_transfer *transfer;
+ int chip = this->nand.cur_cs;
+ u32 pio[3];
+
+ /* [1] send out the PIO words */
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(chip, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
+ | BM_GPMI_CTRL0_ADDRESS_INCREMENT
+ | BF_GPMI_CTRL0_XFER_COUNT(naddr + 1);
+ pio[1] = 0;
+ pio[2] = 0;
+ desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
+ DMA_TRANS_NONE, 0);
+ if (!desc)
+ return NULL;
+
+ transfer = get_next_transfer(this);
+ if (!transfer)
+ return NULL;
+
+ transfer->cmdbuf[0] = cmd;
+ if (naddr)
+ memcpy(&transfer->cmdbuf[1], addr, naddr);
+
+ sg_init_one(&transfer->sgl, transfer->cmdbuf, naddr + 1);
+ dma_map_sg(this->dev, &transfer->sgl, 1, DMA_TO_DEVICE);
+
+ transfer->direction = DMA_TO_DEVICE;
+
+ desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1, DMA_MEM_TO_DEV,
+ MXS_DMA_CTRL_WAIT4END);
+ return desc;
+}
+
+static struct dma_async_tx_descriptor *gpmi_chain_wait_ready(
+ struct gpmi_nand_data *this)
+{
+ struct dma_chan *channel = get_dma_chan(this);
+ u32 pio[2];
+
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
+ | BF_GPMI_CTRL0_XFER_COUNT(0);
+ pio[1] = 0;
+
+ return mxs_dmaengine_prep_pio(channel, pio, 2, DMA_TRANS_NONE,
+ MXS_DMA_CTRL_WAIT4END | MXS_DMA_CTRL_WAIT4RDY);
+}
+
+static struct dma_async_tx_descriptor *gpmi_chain_data_read(
+ struct gpmi_nand_data *this, void *buf, int raw_len, bool *direct)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *channel = get_dma_chan(this);
+ struct gpmi_transfer *transfer;
+ u32 pio[6] = {};
+
+ transfer = get_next_transfer(this);
+ if (!transfer)
+ return NULL;
+
+ transfer->direction = DMA_FROM_DEVICE;
+
+ *direct = prepare_data_dma(this, buf, raw_len, &transfer->sgl,
+ DMA_FROM_DEVICE);
+
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
+ | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
+
+ if (this->bch) {
+ pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
+ | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE)
+ | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
+ | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
+ pio[3] = raw_len;
+ pio[4] = transfer->sgl.dma_address;
+ pio[5] = this->auxiliary_phys;
+ }
+
+ desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
+ DMA_TRANS_NONE, 0);
+ if (!desc)
+ return NULL;
+
+ if (!this->bch)
+ desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
+ DMA_DEV_TO_MEM,
+ MXS_DMA_CTRL_WAIT4END);
+
+ return desc;
+}
+
+static struct dma_async_tx_descriptor *gpmi_chain_data_write(
+ struct gpmi_nand_data *this, const void *buf, int raw_len)
+{
+ struct dma_chan *channel = get_dma_chan(this);
+ struct dma_async_tx_descriptor *desc;
+ struct gpmi_transfer *transfer;
+ u32 pio[6] = {};
+
+ transfer = get_next_transfer(this);
+ if (!transfer)
+ return NULL;
+
+ transfer->direction = DMA_TO_DEVICE;
+
+ prepare_data_dma(this, buf, raw_len, &transfer->sgl, DMA_TO_DEVICE);
+
+ pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
+ | BM_GPMI_CTRL0_WORD_LENGTH
+ | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
+ | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+ | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
+ | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
+
+ if (this->bch) {
+ pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
+ | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE)
+ | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
+ BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
+ pio[3] = raw_len;
+ pio[4] = transfer->sgl.dma_address;
+ pio[5] = this->auxiliary_phys;
+ }
+
+ desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
+ DMA_TRANS_NONE,
+ (this->bch ? MXS_DMA_CTRL_WAIT4END : 0));
+ if (!desc)
+ return NULL;
+
+ if (!this->bch)
+ desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
+ DMA_MEM_TO_DEV,
+ MXS_DMA_CTRL_WAIT4END);
+
+ return desc;
+}
+
+static int gpmi_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ const struct nand_op_instr *instr;
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ struct dma_async_tx_descriptor *desc = NULL;
+ int i, ret, buf_len = 0, nbufs = 0;
+ u8 cmd = 0;
+ void *buf_read = NULL;
+ const void *buf_write = NULL;
+ bool direct = false;
+ struct completion *dma_completion, *bch_completion;
+ unsigned long to;
+
+ if (check_only)
+ return 0;
+
+ this->ntransfers = 0;
+ for (i = 0; i < GPMI_MAX_TRANSFERS; i++)
+ this->transfers[i].direction = DMA_NONE;
+
+ ret = pm_runtime_resume_and_get(this->dev);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * This driver currently supports only one NAND chip. Plus, dies share
+ * the same configuration. So once timings have been applied on the
+ * controller side, they will not change anymore. When the time will
+ * come, the check on must_apply_timings will have to be dropped.
+ */
+ if (this->hw.must_apply_timings) {
+ this->hw.must_apply_timings = false;
+ ret = gpmi_nfc_apply_timings(this);
+ if (ret)
+ goto out_pm;
+ }
+
+ dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
+
+ for (i = 0; i < op->ninstrs; i++) {
+ instr = &op->instrs[i];
+
+ nand_op_trace(" ", instr);
+
+ switch (instr->type) {
+ case NAND_OP_WAITRDY_INSTR:
+ desc = gpmi_chain_wait_ready(this);
+ break;
+ case NAND_OP_CMD_INSTR:
+ cmd = instr->ctx.cmd.opcode;
+
+ /*
+ * When this command has an address cycle chain it
+ * together with the address cycle
+ */
+ if (i + 1 != op->ninstrs &&
+ op->instrs[i + 1].type == NAND_OP_ADDR_INSTR)
+ continue;
+
+ desc = gpmi_chain_command(this, cmd, NULL, 0);
+
+ break;
+ case NAND_OP_ADDR_INSTR:
+ desc = gpmi_chain_command(this, cmd, instr->ctx.addr.addrs,
+ instr->ctx.addr.naddrs);
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ buf_write = instr->ctx.data.buf.out;
+ buf_len = instr->ctx.data.len;
+ nbufs++;
+
+ desc = gpmi_chain_data_write(this, buf_write, buf_len);
+
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ if (!instr->ctx.data.len)
+ break;
+ buf_read = instr->ctx.data.buf.in;
+ buf_len = instr->ctx.data.len;
+ nbufs++;
+
+ desc = gpmi_chain_data_read(this, buf_read, buf_len,
+ &direct);
+ break;
+ }
+
+ if (!desc) {
+ ret = -ENXIO;
+ goto unmap;
+ }
+ }
+
+ dev_dbg(this->dev, "%s setup done\n", __func__);
+
+ if (nbufs > 1) {
+ dev_err(this->dev, "Multiple data instructions not supported\n");
+ ret = -EINVAL;
+ goto unmap;
+ }
+
+ if (this->bch) {
+ writel(this->bch_flashlayout0,
+ this->resources.bch_regs + HW_BCH_FLASH0LAYOUT0);
+ writel(this->bch_flashlayout1,
+ this->resources.bch_regs + HW_BCH_FLASH0LAYOUT1);
+ }
+
+ desc->callback = dma_irq_callback;
+ desc->callback_param = this;
+ dma_completion = &this->dma_done;
+ bch_completion = NULL;
+
+ init_completion(dma_completion);
+
+ if (this->bch && buf_read) {
+ writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
+ this->resources.bch_regs + HW_BCH_CTRL_SET);
+ bch_completion = &this->bch_done;
+ init_completion(bch_completion);
+ }
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(get_dma_chan(this));
+
+ to = wait_for_completion_timeout(dma_completion, msecs_to_jiffies(1000));
+ if (!to) {
+ dev_err(this->dev, "DMA timeout, last DMA\n");
+ gpmi_dump_info(this);
+ ret = -ETIMEDOUT;
+ goto unmap;
+ }
+
+ if (this->bch && buf_read) {
+ to = wait_for_completion_timeout(bch_completion, msecs_to_jiffies(1000));
+ if (!to) {
+ dev_err(this->dev, "BCH timeout, last DMA\n");
+ gpmi_dump_info(this);
+ ret = -ETIMEDOUT;
+ goto unmap;
+ }
+ }
+
+ writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
+ this->resources.bch_regs + HW_BCH_CTRL_CLR);
+ gpmi_clear_bch(this);
+
+ ret = 0;
+
+unmap:
+ for (i = 0; i < this->ntransfers; i++) {
+ struct gpmi_transfer *transfer = &this->transfers[i];
+
+ if (transfer->direction != DMA_NONE)
+ dma_unmap_sg(this->dev, &transfer->sgl, 1,
+ transfer->direction);
+ }
+
+ if (!ret && buf_read && !direct)
+ memcpy(buf_read, this->data_buffer_dma,
+ gpmi_raw_len_to_len(this, buf_len));
+
+ this->bch = false;
+
+out_pm:
+ pm_runtime_mark_last_busy(this->dev);
+ pm_runtime_put_autosuspend(this->dev);
+
+ return ret;
+}
+
+static const struct nand_controller_ops gpmi_nand_controller_ops = {
+ .attach_chip = gpmi_nand_attach_chip,
+ .setup_interface = gpmi_setup_interface,
+ .exec_op = gpmi_nfc_exec_op,
+};
+
+static int gpmi_nand_init(struct gpmi_nand_data *this)
+{
+ struct nand_chip *chip = &this->nand;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ /* init the MTD data structures */
+ mtd->name = "gpmi-nand";
+ mtd->dev.parent = this->dev;
+
+ /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
+ nand_set_controller_data(chip, this);
+ nand_set_flash_node(chip, this->pdev->dev.of_node);
+ chip->legacy.block_markbad = gpmi_block_markbad;
+ chip->badblock_pattern = &gpmi_bbt_descr;
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+
+ /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
+ this->swap_block_mark = !GPMI_IS_MX23(this);
+
+ /*
+ * Allocate a temporary DMA buffer for reading ID in the
+ * nand_scan_ident().
+ */
+ this->bch_geometry.payload_size = 1024;
+ this->bch_geometry.auxiliary_size = 128;
+ ret = gpmi_alloc_dma_buffer(this);
+ if (ret)
+ return ret;
+
+ nand_controller_init(&this->base);
+ this->base.ops = &gpmi_nand_controller_ops;
+ chip->controller = &this->base;
+
+ ret = nand_scan(chip, GPMI_IS_MX6(this) ? 2 : 1);
+ if (ret)
+ goto err_out;
+
+ ret = nand_boot_init(this);
+ if (ret)
+ goto err_nand_cleanup;
+ ret = nand_create_bbt(chip);
+ if (ret)
+ goto err_nand_cleanup;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ goto err_nand_cleanup;
+ return 0;
+
+err_nand_cleanup:
+ nand_cleanup(chip);
+err_out:
+ gpmi_free_dma_buffer(this);
+ return ret;
+}
+
+static const struct of_device_id gpmi_nand_id_table[] = {
+ { .compatible = "fsl,imx23-gpmi-nand", .data = &gpmi_devdata_imx23, },
+ { .compatible = "fsl,imx28-gpmi-nand", .data = &gpmi_devdata_imx28, },
+ { .compatible = "fsl,imx6q-gpmi-nand", .data = &gpmi_devdata_imx6q, },
+ { .compatible = "fsl,imx6sx-gpmi-nand", .data = &gpmi_devdata_imx6sx, },
+ { .compatible = "fsl,imx7d-gpmi-nand", .data = &gpmi_devdata_imx7d,},
+ {}
+};
+MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
+
+static int gpmi_nand_probe(struct platform_device *pdev)
+{
+ struct gpmi_nand_data *this;
+ int ret;
+
+ this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
+ if (!this)
+ return -ENOMEM;
+
+ this->devdata = of_device_get_match_data(&pdev->dev);
+ platform_set_drvdata(pdev, this);
+ this->pdev = pdev;
+ this->dev = &pdev->dev;
+
+ ret = acquire_resources(this);
+ if (ret)
+ goto exit_acquire_resources;
+
+ ret = __gpmi_enable_clk(this, true);
+ if (ret)
+ goto exit_acquire_resources;
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ ret = gpmi_init(this);
+ if (ret)
+ goto exit_nfc_init;
+
+ ret = gpmi_nand_init(this);
+ if (ret)
+ goto exit_nfc_init;
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ dev_info(this->dev, "driver registered.\n");
+
+ return 0;
+
+exit_nfc_init:
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ release_resources(this);
+exit_acquire_resources:
+
+ return ret;
+}
+
+static void gpmi_nand_remove(struct platform_device *pdev)
+{
+ struct gpmi_nand_data *this = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &this->nand;
+ int ret;
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ gpmi_free_dma_buffer(this);
+ release_resources(this);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int gpmi_pm_suspend(struct device *dev)
+{
+ struct gpmi_nand_data *this = dev_get_drvdata(dev);
+
+ release_dma_channels(this);
+ return 0;
+}
+
+static int gpmi_pm_resume(struct device *dev)
+{
+ struct gpmi_nand_data *this = dev_get_drvdata(dev);
+ int ret;
+
+ ret = acquire_dma_channels(this);
+ if (ret < 0)
+ return ret;
+
+ /* re-init the GPMI registers */
+ ret = gpmi_init(this);
+ if (ret) {
+ dev_err(this->dev, "Error setting GPMI : %d\n", ret);
+ return ret;
+ }
+
+ /* Set flag to get timing setup restored for next exec_op */
+ if (this->hw.clk_rate)
+ this->hw.must_apply_timings = true;
+
+ /* re-init the BCH registers */
+ ret = bch_set_geometry(this);
+ if (ret) {
+ dev_err(this->dev, "Error setting BCH : %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int __maybe_unused gpmi_runtime_suspend(struct device *dev)
+{
+ struct gpmi_nand_data *this = dev_get_drvdata(dev);
+
+ return __gpmi_enable_clk(this, false);
+}
+
+static int __maybe_unused gpmi_runtime_resume(struct device *dev)
+{
+ struct gpmi_nand_data *this = dev_get_drvdata(dev);
+
+ return __gpmi_enable_clk(this, true);
+}
+
+static const struct dev_pm_ops gpmi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume)
+ SET_RUNTIME_PM_OPS(gpmi_runtime_suspend, gpmi_runtime_resume, NULL)
+};
+
+static struct platform_driver gpmi_nand_driver = {
+ .driver = {
+ .name = "gpmi-nand",
+ .pm = &gpmi_pm_ops,
+ .of_match_table = gpmi_nand_id_table,
+ },
+ .probe = gpmi_nand_probe,
+ .remove_new = gpmi_nand_remove,
+};
+module_platform_driver(gpmi_nand_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
new file mode 100644
index 0000000000..c3ff56ac62
--- /dev/null
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc.
+ */
+#ifndef __DRIVERS_MTD_NAND_GPMI_NAND_H
+#define __DRIVERS_MTD_NAND_GPMI_NAND_H
+
+#include <linux/mtd/rawnand.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+
+#define GPMI_CLK_MAX 5 /* MX6Q needs five clocks */
+struct resources {
+ void __iomem *gpmi_regs;
+ void __iomem *bch_regs;
+ unsigned int dma_low_channel;
+ unsigned int dma_high_channel;
+ struct clk *clock[GPMI_CLK_MAX];
+};
+
+/**
+ * struct bch_geometry - BCH geometry description.
+ * @gf_len: The length of Galois Field. (e.g., 13 or 14)
+ * @ecc_strength: A number that describes the strength of the ECC
+ * algorithm.
+ * @page_size: The size, in bytes, of a physical page, including
+ * both data and OOB.
+ * @metadata_size: The size, in bytes, of the metadata.
+ * @ecc0_chunk_size: The size, in bytes, of a first ECC chunk.
+ * @eccn_chunk_size: The size, in bytes, of a single ECC chunk after
+ * the first chunk in the page.
+ * @ecc_chunk_count: The number of ECC chunks in the page,
+ * @payload_size: The size, in bytes, of the payload buffer.
+ * @auxiliary_size: The size, in bytes, of the auxiliary buffer.
+ * @auxiliary_status_offset: The offset into the auxiliary buffer at which
+ * the ECC status appears.
+ * @block_mark_byte_offset: The byte offset in the ECC-based page view at
+ * which the underlying physical block mark appears.
+ * @block_mark_bit_offset: The bit offset into the ECC-based page view at
+ * which the underlying physical block mark appears.
+ * @ecc_for_meta: The flag to indicate if there is a dedicate ecc
+ * for meta.
+ */
+struct bch_geometry {
+ unsigned int gf_len;
+ unsigned int ecc_strength;
+ unsigned int page_size;
+ unsigned int metadata_size;
+ unsigned int ecc0_chunk_size;
+ unsigned int eccn_chunk_size;
+ unsigned int ecc_chunk_count;
+ unsigned int payload_size;
+ unsigned int auxiliary_size;
+ unsigned int auxiliary_status_offset;
+ unsigned int block_mark_byte_offset;
+ unsigned int block_mark_bit_offset;
+ unsigned int ecc_for_meta; /* ECC for meta data */
+};
+
+/**
+ * struct boot_rom_geometry - Boot ROM geometry description.
+ * @stride_size_in_pages: The size of a boot block stride, in pages.
+ * @search_area_stride_exponent: The logarithm to base 2 of the size of a
+ * search area in boot block strides.
+ */
+struct boot_rom_geometry {
+ unsigned int stride_size_in_pages;
+ unsigned int search_area_stride_exponent;
+};
+
+enum gpmi_type {
+ IS_MX23,
+ IS_MX28,
+ IS_MX6Q,
+ IS_MX6SX,
+ IS_MX7D,
+};
+
+struct gpmi_devdata {
+ enum gpmi_type type;
+ int bch_max_ecc_strength;
+ int max_chain_delay; /* See the SDR EDO mode */
+ const char * const *clks;
+ const int clks_count;
+};
+
+/**
+ * struct gpmi_nfc_hardware_timing - GPMI hardware timing parameters.
+ * @must_apply_timings: Whether controller timings have already been
+ * applied or not (useful only while there is
+ * support for only one chip select)
+ * @clk_rate: The clock rate that must be used to derive the
+ * following parameters
+ * @timing0: HW_GPMI_TIMING0 register
+ * @timing1: HW_GPMI_TIMING1 register
+ * @ctrl1n: HW_GPMI_CTRL1n register
+ */
+struct gpmi_nfc_hardware_timing {
+ bool must_apply_timings;
+ unsigned long int clk_rate;
+ u32 timing0;
+ u32 timing1;
+ u32 ctrl1n;
+};
+
+#define GPMI_MAX_TRANSFERS 8
+
+struct gpmi_transfer {
+ u8 cmdbuf[8];
+ struct scatterlist sgl;
+ enum dma_data_direction direction;
+};
+
+struct gpmi_nand_data {
+ /* Devdata */
+ const struct gpmi_devdata *devdata;
+
+ /* System Interface */
+ struct device *dev;
+ struct platform_device *pdev;
+
+ /* Resources */
+ struct resources resources;
+
+ /* Flash Hardware */
+ struct gpmi_nfc_hardware_timing hw;
+
+ /* BCH */
+ struct bch_geometry bch_geometry;
+ struct completion bch_done;
+
+ /* NAND Boot issue */
+ bool swap_block_mark;
+ struct boot_rom_geometry rom_geometry;
+
+ /* MTD / NAND */
+ struct nand_controller base;
+ struct nand_chip nand;
+
+ struct gpmi_transfer transfers[GPMI_MAX_TRANSFERS];
+ int ntransfers;
+
+ bool bch;
+ uint32_t bch_flashlayout0;
+ uint32_t bch_flashlayout1;
+
+ char *data_buffer_dma;
+
+ void *auxiliary_virt;
+ dma_addr_t auxiliary_phys;
+
+ void *raw_buffer;
+
+ /* DMA channels */
+#define DMA_CHANS 8
+ struct dma_chan *dma_chans[DMA_CHANS];
+ struct completion dma_done;
+};
+
+/* BCH : Status Block Completion Codes */
+#define STATUS_GOOD 0x00
+#define STATUS_ERASED 0xff
+#define STATUS_UNCORRECTABLE 0xfe
+
+/* Use the devdata to distinguish different Archs. */
+#define GPMI_IS_MX23(x) ((x)->devdata->type == IS_MX23)
+#define GPMI_IS_MX28(x) ((x)->devdata->type == IS_MX28)
+#define GPMI_IS_MX6Q(x) ((x)->devdata->type == IS_MX6Q)
+#define GPMI_IS_MX6SX(x) ((x)->devdata->type == IS_MX6SX)
+#define GPMI_IS_MX7D(x) ((x)->devdata->type == IS_MX7D)
+
+#define GPMI_IS_MX6(x) (GPMI_IS_MX6Q(x) || GPMI_IS_MX6SX(x) || \
+ GPMI_IS_MX7D(x))
+#define GPMI_IS_MXS(x) (GPMI_IS_MX23(x) || GPMI_IS_MX28(x))
+#endif
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-regs.h b/drivers/mtd/nand/raw/gpmi-nand/gpmi-regs.h
new file mode 100644
index 0000000000..fc31fd084d
--- /dev/null
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-regs.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2008 Embedded Alley Solutions, Inc.
+ */
+#ifndef __GPMI_NAND_GPMI_REGS_H
+#define __GPMI_NAND_GPMI_REGS_H
+
+#define HW_GPMI_CTRL0 0x00000000
+#define HW_GPMI_CTRL0_SET 0x00000004
+#define HW_GPMI_CTRL0_CLR 0x00000008
+#define HW_GPMI_CTRL0_TOG 0x0000000c
+
+#define BP_GPMI_CTRL0_COMMAND_MODE 24
+#define BM_GPMI_CTRL0_COMMAND_MODE (3 << BP_GPMI_CTRL0_COMMAND_MODE)
+#define BF_GPMI_CTRL0_COMMAND_MODE(v) \
+ (((v) << BP_GPMI_CTRL0_COMMAND_MODE) & BM_GPMI_CTRL0_COMMAND_MODE)
+#define BV_GPMI_CTRL0_COMMAND_MODE__WRITE 0x0
+#define BV_GPMI_CTRL0_COMMAND_MODE__READ 0x1
+#define BV_GPMI_CTRL0_COMMAND_MODE__READ_AND_COMPARE 0x2
+#define BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY 0x3
+
+#define BM_GPMI_CTRL0_WORD_LENGTH (1 << 23)
+#define BV_GPMI_CTRL0_WORD_LENGTH__16_BIT 0x0
+#define BV_GPMI_CTRL0_WORD_LENGTH__8_BIT 0x1
+
+/*
+ * Difference in LOCK_CS between imx23 and imx28 :
+ * This bit may impact the _POWER_ consumption. So some chips
+ * do not set it.
+ */
+#define MX23_BP_GPMI_CTRL0_LOCK_CS 22
+#define MX28_BP_GPMI_CTRL0_LOCK_CS 27
+#define LOCK_CS_ENABLE 0x1
+#define BF_GPMI_CTRL0_LOCK_CS(v, x) 0x0
+
+/* Difference in CS between imx23 and imx28 */
+#define BP_GPMI_CTRL0_CS 20
+#define MX23_BM_GPMI_CTRL0_CS (3 << BP_GPMI_CTRL0_CS)
+#define MX28_BM_GPMI_CTRL0_CS (7 << BP_GPMI_CTRL0_CS)
+#define BF_GPMI_CTRL0_CS(v, x) (((v) << BP_GPMI_CTRL0_CS) & \
+ (GPMI_IS_MX23((x)) \
+ ? MX23_BM_GPMI_CTRL0_CS \
+ : MX28_BM_GPMI_CTRL0_CS))
+
+#define BP_GPMI_CTRL0_ADDRESS 17
+#define BM_GPMI_CTRL0_ADDRESS (3 << BP_GPMI_CTRL0_ADDRESS)
+#define BF_GPMI_CTRL0_ADDRESS(v) \
+ (((v) << BP_GPMI_CTRL0_ADDRESS) & BM_GPMI_CTRL0_ADDRESS)
+#define BV_GPMI_CTRL0_ADDRESS__NAND_DATA 0x0
+#define BV_GPMI_CTRL0_ADDRESS__NAND_CLE 0x1
+#define BV_GPMI_CTRL0_ADDRESS__NAND_ALE 0x2
+
+#define BM_GPMI_CTRL0_ADDRESS_INCREMENT (1 << 16)
+#define BV_GPMI_CTRL0_ADDRESS_INCREMENT__DISABLED 0x0
+#define BV_GPMI_CTRL0_ADDRESS_INCREMENT__ENABLED 0x1
+
+#define BP_GPMI_CTRL0_XFER_COUNT 0
+#define BM_GPMI_CTRL0_XFER_COUNT (0xffff << BP_GPMI_CTRL0_XFER_COUNT)
+#define BF_GPMI_CTRL0_XFER_COUNT(v) \
+ (((v) << BP_GPMI_CTRL0_XFER_COUNT) & BM_GPMI_CTRL0_XFER_COUNT)
+
+#define HW_GPMI_COMPARE 0x00000010
+
+#define HW_GPMI_ECCCTRL 0x00000020
+#define HW_GPMI_ECCCTRL_SET 0x00000024
+#define HW_GPMI_ECCCTRL_CLR 0x00000028
+#define HW_GPMI_ECCCTRL_TOG 0x0000002c
+
+#define BP_GPMI_ECCCTRL_ECC_CMD 13
+#define BM_GPMI_ECCCTRL_ECC_CMD (3 << BP_GPMI_ECCCTRL_ECC_CMD)
+#define BF_GPMI_ECCCTRL_ECC_CMD(v) \
+ (((v) << BP_GPMI_ECCCTRL_ECC_CMD) & BM_GPMI_ECCCTRL_ECC_CMD)
+#define BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE 0x0
+#define BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE 0x1
+
+#define BM_GPMI_ECCCTRL_ENABLE_ECC (1 << 12)
+#define BV_GPMI_ECCCTRL_ENABLE_ECC__ENABLE 0x1
+#define BV_GPMI_ECCCTRL_ENABLE_ECC__DISABLE 0x0
+
+#define BP_GPMI_ECCCTRL_BUFFER_MASK 0
+#define BM_GPMI_ECCCTRL_BUFFER_MASK (0x1ff << BP_GPMI_ECCCTRL_BUFFER_MASK)
+#define BF_GPMI_ECCCTRL_BUFFER_MASK(v) \
+ (((v) << BP_GPMI_ECCCTRL_BUFFER_MASK) & BM_GPMI_ECCCTRL_BUFFER_MASK)
+#define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY 0x100
+#define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE 0x1FF
+
+#define HW_GPMI_ECCCOUNT 0x00000030
+#define HW_GPMI_PAYLOAD 0x00000040
+#define HW_GPMI_AUXILIARY 0x00000050
+#define HW_GPMI_CTRL1 0x00000060
+#define HW_GPMI_CTRL1_SET 0x00000064
+#define HW_GPMI_CTRL1_CLR 0x00000068
+#define HW_GPMI_CTRL1_TOG 0x0000006c
+
+#define BP_GPMI_CTRL1_DECOUPLE_CS 24
+#define BM_GPMI_CTRL1_DECOUPLE_CS (1 << BP_GPMI_CTRL1_DECOUPLE_CS)
+
+#define BP_GPMI_CTRL1_WRN_DLY_SEL 22
+#define BM_GPMI_CTRL1_WRN_DLY_SEL (0x3 << BP_GPMI_CTRL1_WRN_DLY_SEL)
+#define BF_GPMI_CTRL1_WRN_DLY_SEL(v) \
+ (((v) << BP_GPMI_CTRL1_WRN_DLY_SEL) & BM_GPMI_CTRL1_WRN_DLY_SEL)
+#define BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS 0x0
+#define BV_GPMI_CTRL1_WRN_DLY_SEL_6_TO_10NS 0x1
+#define BV_GPMI_CTRL1_WRN_DLY_SEL_7_TO_12NS 0x2
+#define BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY 0x3
+
+#define BM_GPMI_CTRL1_GANGED_RDYBUSY (1 << 19)
+#define BM_GPMI_CTRL1_BCH_MODE (1 << 18)
+
+#define BP_GPMI_CTRL1_DLL_ENABLE 17
+#define BM_GPMI_CTRL1_DLL_ENABLE (1 << BP_GPMI_CTRL1_DLL_ENABLE)
+
+#define BP_GPMI_CTRL1_HALF_PERIOD 16
+#define BM_GPMI_CTRL1_HALF_PERIOD (1 << BP_GPMI_CTRL1_HALF_PERIOD)
+
+#define BP_GPMI_CTRL1_RDN_DELAY 12
+#define BM_GPMI_CTRL1_RDN_DELAY (0xf << BP_GPMI_CTRL1_RDN_DELAY)
+#define BF_GPMI_CTRL1_RDN_DELAY(v) \
+ (((v) << BP_GPMI_CTRL1_RDN_DELAY) & BM_GPMI_CTRL1_RDN_DELAY)
+
+#define BM_GPMI_CTRL1_DEV_RESET (1 << 3)
+#define BV_GPMI_CTRL1_DEV_RESET__ENABLED 0x0
+#define BV_GPMI_CTRL1_DEV_RESET__DISABLED 0x1
+
+#define BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY (1 << 2)
+#define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVELOW 0x0
+#define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVEHIGH 0x1
+
+#define BM_GPMI_CTRL1_CAMERA_MODE (1 << 1)
+#define BV_GPMI_CTRL1_GPMI_MODE__NAND 0x0
+#define BV_GPMI_CTRL1_GPMI_MODE__ATA 0x1
+
+#define BM_GPMI_CTRL1_GPMI_MODE (1 << 0)
+
+#define BM_GPMI_CTRL1_CLEAR_MASK (BM_GPMI_CTRL1_WRN_DLY_SEL | \
+ BM_GPMI_CTRL1_DLL_ENABLE | \
+ BM_GPMI_CTRL1_RDN_DELAY | \
+ BM_GPMI_CTRL1_HALF_PERIOD)
+
+#define HW_GPMI_TIMING0 0x00000070
+
+#define BP_GPMI_TIMING0_ADDRESS_SETUP 16
+#define BM_GPMI_TIMING0_ADDRESS_SETUP (0xff << BP_GPMI_TIMING0_ADDRESS_SETUP)
+#define BF_GPMI_TIMING0_ADDRESS_SETUP(v) \
+ (((v) << BP_GPMI_TIMING0_ADDRESS_SETUP) & BM_GPMI_TIMING0_ADDRESS_SETUP)
+
+#define BP_GPMI_TIMING0_DATA_HOLD 8
+#define BM_GPMI_TIMING0_DATA_HOLD (0xff << BP_GPMI_TIMING0_DATA_HOLD)
+#define BF_GPMI_TIMING0_DATA_HOLD(v) \
+ (((v) << BP_GPMI_TIMING0_DATA_HOLD) & BM_GPMI_TIMING0_DATA_HOLD)
+
+#define BP_GPMI_TIMING0_DATA_SETUP 0
+#define BM_GPMI_TIMING0_DATA_SETUP (0xff << BP_GPMI_TIMING0_DATA_SETUP)
+#define BF_GPMI_TIMING0_DATA_SETUP(v) \
+ (((v) << BP_GPMI_TIMING0_DATA_SETUP) & BM_GPMI_TIMING0_DATA_SETUP)
+
+#define HW_GPMI_TIMING1 0x00000080
+#define BP_GPMI_TIMING1_BUSY_TIMEOUT 16
+#define BM_GPMI_TIMING1_BUSY_TIMEOUT (0xffff << BP_GPMI_TIMING1_BUSY_TIMEOUT)
+#define BF_GPMI_TIMING1_BUSY_TIMEOUT(v) \
+ (((v) << BP_GPMI_TIMING1_BUSY_TIMEOUT) & BM_GPMI_TIMING1_BUSY_TIMEOUT)
+
+#define HW_GPMI_TIMING2 0x00000090
+#define HW_GPMI_DATA 0x000000a0
+
+/* MX28 uses this to detect READY. */
+#define HW_GPMI_STAT 0x000000b0
+#define MX28_BP_GPMI_STAT_READY_BUSY 24
+#define MX28_BM_GPMI_STAT_READY_BUSY (0xff << MX28_BP_GPMI_STAT_READY_BUSY)
+#define MX28_BF_GPMI_STAT_READY_BUSY(v) \
+ (((v) << MX28_BP_GPMI_STAT_READY_BUSY) & MX28_BM_GPMI_STAT_READY_BUSY)
+
+/* MX23 uses this to detect READY. */
+#define HW_GPMI_DEBUG 0x000000c0
+#define MX23_BP_GPMI_DEBUG_READY0 28
+#define MX23_BM_GPMI_DEBUG_READY0 (1 << MX23_BP_GPMI_DEBUG_READY0)
+#endif
diff --git a/drivers/mtd/nand/raw/hisi504_nand.c b/drivers/mtd/nand/raw/hisi504_nand.c
new file mode 100644
index 0000000000..fe291a2e5c
--- /dev/null
+++ b/drivers/mtd/nand/raw/hisi504_nand.c
@@ -0,0 +1,869 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hisilicon NAND Flash controller driver
+ *
+ * Copyright © 2012-2014 HiSilicon Technologies Co., Ltd.
+ * http://www.hisilicon.com
+ *
+ * Author: Zhou Wang <wangzhou.bry@gmail.com>
+ * The initial developer of the original code is Zhiyong Cai
+ * <caizhiyong@huawei.com>
+ */
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/sizes.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/partitions.h>
+
+#define HINFC504_MAX_CHIP (4)
+#define HINFC504_W_LATCH (5)
+#define HINFC504_R_LATCH (7)
+#define HINFC504_RW_LATCH (3)
+
+#define HINFC504_NFC_TIMEOUT (2 * HZ)
+#define HINFC504_NFC_PM_TIMEOUT (1 * HZ)
+#define HINFC504_NFC_DMA_TIMEOUT (5 * HZ)
+#define HINFC504_CHIP_DELAY (25)
+
+#define HINFC504_REG_BASE_ADDRESS_LEN (0x100)
+#define HINFC504_BUFFER_BASE_ADDRESS_LEN (2048 + 128)
+
+#define HINFC504_ADDR_CYCLE_MASK 0x4
+
+#define HINFC504_CON 0x00
+#define HINFC504_CON_OP_MODE_NORMAL BIT(0)
+#define HINFC504_CON_PAGEISZE_SHIFT (1)
+#define HINFC504_CON_PAGESIZE_MASK (0x07)
+#define HINFC504_CON_BUS_WIDTH BIT(4)
+#define HINFC504_CON_READY_BUSY_SEL BIT(8)
+#define HINFC504_CON_ECCTYPE_SHIFT (9)
+#define HINFC504_CON_ECCTYPE_MASK (0x07)
+
+#define HINFC504_PWIDTH 0x04
+#define SET_HINFC504_PWIDTH(_w_lcnt, _r_lcnt, _rw_hcnt) \
+ ((_w_lcnt) | (((_r_lcnt) & 0x0F) << 4) | (((_rw_hcnt) & 0x0F) << 8))
+
+#define HINFC504_CMD 0x0C
+#define HINFC504_ADDRL 0x10
+#define HINFC504_ADDRH 0x14
+#define HINFC504_DATA_NUM 0x18
+
+#define HINFC504_OP 0x1C
+#define HINFC504_OP_READ_DATA_EN BIT(1)
+#define HINFC504_OP_WAIT_READY_EN BIT(2)
+#define HINFC504_OP_CMD2_EN BIT(3)
+#define HINFC504_OP_WRITE_DATA_EN BIT(4)
+#define HINFC504_OP_ADDR_EN BIT(5)
+#define HINFC504_OP_CMD1_EN BIT(6)
+#define HINFC504_OP_NF_CS_SHIFT (7)
+#define HINFC504_OP_NF_CS_MASK (3)
+#define HINFC504_OP_ADDR_CYCLE_SHIFT (9)
+#define HINFC504_OP_ADDR_CYCLE_MASK (7)
+
+#define HINFC504_STATUS 0x20
+#define HINFC504_READY BIT(0)
+
+#define HINFC504_INTEN 0x24
+#define HINFC504_INTEN_DMA BIT(9)
+#define HINFC504_INTEN_UE BIT(6)
+#define HINFC504_INTEN_CE BIT(5)
+
+#define HINFC504_INTS 0x28
+#define HINFC504_INTS_DMA BIT(9)
+#define HINFC504_INTS_UE BIT(6)
+#define HINFC504_INTS_CE BIT(5)
+
+#define HINFC504_INTCLR 0x2C
+#define HINFC504_INTCLR_DMA BIT(9)
+#define HINFC504_INTCLR_UE BIT(6)
+#define HINFC504_INTCLR_CE BIT(5)
+
+#define HINFC504_ECC_STATUS 0x5C
+#define HINFC504_ECC_16_BIT_SHIFT 12
+
+#define HINFC504_DMA_CTRL 0x60
+#define HINFC504_DMA_CTRL_DMA_START BIT(0)
+#define HINFC504_DMA_CTRL_WE BIT(1)
+#define HINFC504_DMA_CTRL_DATA_AREA_EN BIT(2)
+#define HINFC504_DMA_CTRL_OOB_AREA_EN BIT(3)
+#define HINFC504_DMA_CTRL_BURST4_EN BIT(4)
+#define HINFC504_DMA_CTRL_BURST8_EN BIT(5)
+#define HINFC504_DMA_CTRL_BURST16_EN BIT(6)
+#define HINFC504_DMA_CTRL_ADDR_NUM_SHIFT (7)
+#define HINFC504_DMA_CTRL_ADDR_NUM_MASK (1)
+#define HINFC504_DMA_CTRL_CS_SHIFT (8)
+#define HINFC504_DMA_CTRL_CS_MASK (0x03)
+
+#define HINFC504_DMA_ADDR_DATA 0x64
+#define HINFC504_DMA_ADDR_OOB 0x68
+
+#define HINFC504_DMA_LEN 0x6C
+#define HINFC504_DMA_LEN_OOB_SHIFT (16)
+#define HINFC504_DMA_LEN_OOB_MASK (0xFFF)
+
+#define HINFC504_DMA_PARA 0x70
+#define HINFC504_DMA_PARA_DATA_RW_EN BIT(0)
+#define HINFC504_DMA_PARA_OOB_RW_EN BIT(1)
+#define HINFC504_DMA_PARA_DATA_EDC_EN BIT(2)
+#define HINFC504_DMA_PARA_OOB_EDC_EN BIT(3)
+#define HINFC504_DMA_PARA_DATA_ECC_EN BIT(4)
+#define HINFC504_DMA_PARA_OOB_ECC_EN BIT(5)
+
+#define HINFC_VERSION 0x74
+#define HINFC504_LOG_READ_ADDR 0x7C
+#define HINFC504_LOG_READ_LEN 0x80
+
+#define HINFC504_NANDINFO_LEN 0x10
+
+struct hinfc_host {
+ struct nand_chip chip;
+ struct device *dev;
+ void __iomem *iobase;
+ void __iomem *mmio;
+ struct completion cmd_complete;
+ unsigned int offset;
+ unsigned int command;
+ int chipselect;
+ unsigned int addr_cycle;
+ u32 addr_value[2];
+ u32 cache_addr_value[2];
+ char *buffer;
+ dma_addr_t dma_buffer;
+ dma_addr_t dma_oob;
+ int version;
+ unsigned int irq_status; /* interrupt status */
+};
+
+static inline unsigned int hinfc_read(struct hinfc_host *host, unsigned int reg)
+{
+ return readl(host->iobase + reg);
+}
+
+static inline void hinfc_write(struct hinfc_host *host, unsigned int value,
+ unsigned int reg)
+{
+ writel(value, host->iobase + reg);
+}
+
+static void wait_controller_finished(struct hinfc_host *host)
+{
+ unsigned long timeout = jiffies + HINFC504_NFC_TIMEOUT;
+ int val;
+
+ while (time_before(jiffies, timeout)) {
+ val = hinfc_read(host, HINFC504_STATUS);
+ if (host->command == NAND_CMD_ERASE2) {
+ /* nfc is ready */
+ while (!(val & HINFC504_READY)) {
+ usleep_range(500, 1000);
+ val = hinfc_read(host, HINFC504_STATUS);
+ }
+ return;
+ }
+
+ if (val & HINFC504_READY)
+ return;
+ }
+
+ /* wait cmd timeout */
+ dev_err(host->dev, "Wait NAND controller exec cmd timeout.\n");
+}
+
+static void hisi_nfc_dma_transfer(struct hinfc_host *host, int todev)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned long val;
+ int ret;
+
+ hinfc_write(host, host->dma_buffer, HINFC504_DMA_ADDR_DATA);
+ hinfc_write(host, host->dma_oob, HINFC504_DMA_ADDR_OOB);
+
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_NONE) {
+ hinfc_write(host, ((mtd->oobsize & HINFC504_DMA_LEN_OOB_MASK)
+ << HINFC504_DMA_LEN_OOB_SHIFT), HINFC504_DMA_LEN);
+
+ hinfc_write(host, HINFC504_DMA_PARA_DATA_RW_EN
+ | HINFC504_DMA_PARA_OOB_RW_EN, HINFC504_DMA_PARA);
+ } else {
+ if (host->command == NAND_CMD_READOOB)
+ hinfc_write(host, HINFC504_DMA_PARA_OOB_RW_EN
+ | HINFC504_DMA_PARA_OOB_EDC_EN
+ | HINFC504_DMA_PARA_OOB_ECC_EN, HINFC504_DMA_PARA);
+ else
+ hinfc_write(host, HINFC504_DMA_PARA_DATA_RW_EN
+ | HINFC504_DMA_PARA_OOB_RW_EN
+ | HINFC504_DMA_PARA_DATA_EDC_EN
+ | HINFC504_DMA_PARA_OOB_EDC_EN
+ | HINFC504_DMA_PARA_DATA_ECC_EN
+ | HINFC504_DMA_PARA_OOB_ECC_EN, HINFC504_DMA_PARA);
+
+ }
+
+ val = (HINFC504_DMA_CTRL_DMA_START | HINFC504_DMA_CTRL_BURST4_EN
+ | HINFC504_DMA_CTRL_BURST8_EN | HINFC504_DMA_CTRL_BURST16_EN
+ | HINFC504_DMA_CTRL_DATA_AREA_EN | HINFC504_DMA_CTRL_OOB_AREA_EN
+ | ((host->addr_cycle == 4 ? 1 : 0)
+ << HINFC504_DMA_CTRL_ADDR_NUM_SHIFT)
+ | ((host->chipselect & HINFC504_DMA_CTRL_CS_MASK)
+ << HINFC504_DMA_CTRL_CS_SHIFT));
+
+ if (todev)
+ val |= HINFC504_DMA_CTRL_WE;
+
+ init_completion(&host->cmd_complete);
+
+ hinfc_write(host, val, HINFC504_DMA_CTRL);
+ ret = wait_for_completion_timeout(&host->cmd_complete,
+ HINFC504_NFC_DMA_TIMEOUT);
+
+ if (!ret) {
+ dev_err(host->dev, "DMA operation(irq) timeout!\n");
+ /* sanity check */
+ val = hinfc_read(host, HINFC504_DMA_CTRL);
+ if (!(val & HINFC504_DMA_CTRL_DMA_START))
+ dev_err(host->dev, "DMA is already done but without irq ACK!\n");
+ else
+ dev_err(host->dev, "DMA is really timeout!\n");
+ }
+}
+
+static int hisi_nfc_send_cmd_pageprog(struct hinfc_host *host)
+{
+ host->addr_value[0] &= 0xffff0000;
+
+ hinfc_write(host, host->addr_value[0], HINFC504_ADDRL);
+ hinfc_write(host, host->addr_value[1], HINFC504_ADDRH);
+ hinfc_write(host, NAND_CMD_PAGEPROG << 8 | NAND_CMD_SEQIN,
+ HINFC504_CMD);
+
+ hisi_nfc_dma_transfer(host, 1);
+
+ return 0;
+}
+
+static int hisi_nfc_send_cmd_readstart(struct hinfc_host *host)
+{
+ struct mtd_info *mtd = nand_to_mtd(&host->chip);
+
+ if ((host->addr_value[0] == host->cache_addr_value[0]) &&
+ (host->addr_value[1] == host->cache_addr_value[1]))
+ return 0;
+
+ host->addr_value[0] &= 0xffff0000;
+
+ hinfc_write(host, host->addr_value[0], HINFC504_ADDRL);
+ hinfc_write(host, host->addr_value[1], HINFC504_ADDRH);
+ hinfc_write(host, NAND_CMD_READSTART << 8 | NAND_CMD_READ0,
+ HINFC504_CMD);
+
+ hinfc_write(host, 0, HINFC504_LOG_READ_ADDR);
+ hinfc_write(host, mtd->writesize + mtd->oobsize,
+ HINFC504_LOG_READ_LEN);
+
+ hisi_nfc_dma_transfer(host, 0);
+
+ host->cache_addr_value[0] = host->addr_value[0];
+ host->cache_addr_value[1] = host->addr_value[1];
+
+ return 0;
+}
+
+static int hisi_nfc_send_cmd_erase(struct hinfc_host *host)
+{
+ hinfc_write(host, host->addr_value[0], HINFC504_ADDRL);
+ hinfc_write(host, (NAND_CMD_ERASE2 << 8) | NAND_CMD_ERASE1,
+ HINFC504_CMD);
+
+ hinfc_write(host, HINFC504_OP_WAIT_READY_EN
+ | HINFC504_OP_CMD2_EN
+ | HINFC504_OP_CMD1_EN
+ | HINFC504_OP_ADDR_EN
+ | ((host->chipselect & HINFC504_OP_NF_CS_MASK)
+ << HINFC504_OP_NF_CS_SHIFT)
+ | ((host->addr_cycle & HINFC504_OP_ADDR_CYCLE_MASK)
+ << HINFC504_OP_ADDR_CYCLE_SHIFT),
+ HINFC504_OP);
+
+ wait_controller_finished(host);
+
+ return 0;
+}
+
+static int hisi_nfc_send_cmd_readid(struct hinfc_host *host)
+{
+ hinfc_write(host, HINFC504_NANDINFO_LEN, HINFC504_DATA_NUM);
+ hinfc_write(host, NAND_CMD_READID, HINFC504_CMD);
+ hinfc_write(host, 0, HINFC504_ADDRL);
+
+ hinfc_write(host, HINFC504_OP_CMD1_EN | HINFC504_OP_ADDR_EN
+ | HINFC504_OP_READ_DATA_EN
+ | ((host->chipselect & HINFC504_OP_NF_CS_MASK)
+ << HINFC504_OP_NF_CS_SHIFT)
+ | 1 << HINFC504_OP_ADDR_CYCLE_SHIFT, HINFC504_OP);
+
+ wait_controller_finished(host);
+
+ return 0;
+}
+
+static int hisi_nfc_send_cmd_status(struct hinfc_host *host)
+{
+ hinfc_write(host, HINFC504_NANDINFO_LEN, HINFC504_DATA_NUM);
+ hinfc_write(host, NAND_CMD_STATUS, HINFC504_CMD);
+ hinfc_write(host, HINFC504_OP_CMD1_EN
+ | HINFC504_OP_READ_DATA_EN
+ | ((host->chipselect & HINFC504_OP_NF_CS_MASK)
+ << HINFC504_OP_NF_CS_SHIFT),
+ HINFC504_OP);
+
+ wait_controller_finished(host);
+
+ return 0;
+}
+
+static int hisi_nfc_send_cmd_reset(struct hinfc_host *host, int chipselect)
+{
+ hinfc_write(host, NAND_CMD_RESET, HINFC504_CMD);
+
+ hinfc_write(host, HINFC504_OP_CMD1_EN
+ | ((chipselect & HINFC504_OP_NF_CS_MASK)
+ << HINFC504_OP_NF_CS_SHIFT)
+ | HINFC504_OP_WAIT_READY_EN,
+ HINFC504_OP);
+
+ wait_controller_finished(host);
+
+ return 0;
+}
+
+static void hisi_nfc_select_chip(struct nand_chip *chip, int chipselect)
+{
+ struct hinfc_host *host = nand_get_controller_data(chip);
+
+ if (chipselect < 0)
+ return;
+
+ host->chipselect = chipselect;
+}
+
+static uint8_t hisi_nfc_read_byte(struct nand_chip *chip)
+{
+ struct hinfc_host *host = nand_get_controller_data(chip);
+
+ if (host->command == NAND_CMD_STATUS)
+ return *(uint8_t *)(host->mmio);
+
+ host->offset++;
+
+ if (host->command == NAND_CMD_READID)
+ return *(uint8_t *)(host->mmio + host->offset - 1);
+
+ return *(uint8_t *)(host->buffer + host->offset - 1);
+}
+
+static void
+hisi_nfc_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
+{
+ struct hinfc_host *host = nand_get_controller_data(chip);
+
+ memcpy(host->buffer + host->offset, buf, len);
+ host->offset += len;
+}
+
+static void hisi_nfc_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
+{
+ struct hinfc_host *host = nand_get_controller_data(chip);
+
+ memcpy(buf, host->buffer + host->offset, len);
+ host->offset += len;
+}
+
+static void set_addr(struct mtd_info *mtd, int column, int page_addr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct hinfc_host *host = nand_get_controller_data(chip);
+ unsigned int command = host->command;
+
+ host->addr_cycle = 0;
+ host->addr_value[0] = 0;
+ host->addr_value[1] = 0;
+
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (chip->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
+ column >>= 1;
+
+ host->addr_value[0] = column & 0xffff;
+ host->addr_cycle = 2;
+ }
+ if (page_addr != -1) {
+ host->addr_value[0] |= (page_addr & 0xffff)
+ << (host->addr_cycle * 8);
+ host->addr_cycle += 2;
+ if (chip->options & NAND_ROW_ADDR_3) {
+ host->addr_cycle += 1;
+ if (host->command == NAND_CMD_ERASE1)
+ host->addr_value[0] |= ((page_addr >> 16) & 0xff) << 16;
+ else
+ host->addr_value[1] |= ((page_addr >> 16) & 0xff);
+ }
+ }
+}
+
+static void hisi_nfc_cmdfunc(struct nand_chip *chip, unsigned command,
+ int column, int page_addr)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct hinfc_host *host = nand_get_controller_data(chip);
+ int is_cache_invalid = 1;
+ unsigned int flag = 0;
+
+ host->command = command;
+
+ switch (command) {
+ case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
+ if (command == NAND_CMD_READ0)
+ host->offset = column;
+ else
+ host->offset = column + mtd->writesize;
+
+ is_cache_invalid = 0;
+ set_addr(mtd, column, page_addr);
+ hisi_nfc_send_cmd_readstart(host);
+ break;
+
+ case NAND_CMD_SEQIN:
+ host->offset = column;
+ set_addr(mtd, column, page_addr);
+ break;
+
+ case NAND_CMD_ERASE1:
+ set_addr(mtd, column, page_addr);
+ break;
+
+ case NAND_CMD_PAGEPROG:
+ hisi_nfc_send_cmd_pageprog(host);
+ break;
+
+ case NAND_CMD_ERASE2:
+ hisi_nfc_send_cmd_erase(host);
+ break;
+
+ case NAND_CMD_READID:
+ host->offset = column;
+ memset(host->mmio, 0, 0x10);
+ hisi_nfc_send_cmd_readid(host);
+ break;
+
+ case NAND_CMD_STATUS:
+ flag = hinfc_read(host, HINFC504_CON);
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
+ hinfc_write(host,
+ flag & ~(HINFC504_CON_ECCTYPE_MASK <<
+ HINFC504_CON_ECCTYPE_SHIFT), HINFC504_CON);
+
+ host->offset = 0;
+ memset(host->mmio, 0, 0x10);
+ hisi_nfc_send_cmd_status(host);
+ hinfc_write(host, flag, HINFC504_CON);
+ break;
+
+ case NAND_CMD_RESET:
+ hisi_nfc_send_cmd_reset(host, host->chipselect);
+ break;
+
+ default:
+ dev_err(host->dev, "Error: unsupported cmd(cmd=%x, col=%x, page=%x)\n",
+ command, column, page_addr);
+ }
+
+ if (is_cache_invalid) {
+ host->cache_addr_value[0] = ~0;
+ host->cache_addr_value[1] = ~0;
+ }
+}
+
+static irqreturn_t hinfc_irq_handle(int irq, void *devid)
+{
+ struct hinfc_host *host = devid;
+ unsigned int flag;
+
+ flag = hinfc_read(host, HINFC504_INTS);
+ /* store interrupts state */
+ host->irq_status |= flag;
+
+ if (flag & HINFC504_INTS_DMA) {
+ hinfc_write(host, HINFC504_INTCLR_DMA, HINFC504_INTCLR);
+ complete(&host->cmd_complete);
+ } else if (flag & HINFC504_INTS_CE) {
+ hinfc_write(host, HINFC504_INTCLR_CE, HINFC504_INTCLR);
+ } else if (flag & HINFC504_INTS_UE) {
+ hinfc_write(host, HINFC504_INTCLR_UE, HINFC504_INTCLR);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hisi_nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct hinfc_host *host = nand_get_controller_data(chip);
+ int max_bitflips = 0, stat = 0, stat_max = 0, status_ecc;
+ int stat_1, stat_2;
+
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
+ chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ /* errors which can not be corrected by ECC */
+ if (host->irq_status & HINFC504_INTS_UE) {
+ mtd->ecc_stats.failed++;
+ } else if (host->irq_status & HINFC504_INTS_CE) {
+ /* TODO: need add other ECC modes! */
+ switch (chip->ecc.strength) {
+ case 16:
+ status_ecc = hinfc_read(host, HINFC504_ECC_STATUS) >>
+ HINFC504_ECC_16_BIT_SHIFT & 0x0fff;
+ stat_2 = status_ecc & 0x3f;
+ stat_1 = status_ecc >> 6 & 0x3f;
+ stat = stat_1 + stat_2;
+ stat_max = max_t(int, stat_1, stat_2);
+ }
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(int, max_bitflips, stat_max);
+ }
+ host->irq_status = 0;
+
+ return max_bitflips;
+}
+
+static int hisi_nand_read_oob(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct hinfc_host *host = nand_get_controller_data(chip);
+
+ nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+
+ if (host->irq_status & HINFC504_INTS_UE) {
+ host->irq_status = 0;
+ return -EBADMSG;
+ }
+
+ host->irq_status = 0;
+ return 0;
+}
+
+static int hisi_nand_write_page_hwecc(struct nand_chip *chip,
+ const uint8_t *buf, int oob_required,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
+ if (oob_required)
+ chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ return nand_prog_page_end_op(chip);
+}
+
+static void hisi_nfc_host_init(struct hinfc_host *host)
+{
+ struct nand_chip *chip = &host->chip;
+ unsigned int flag = 0;
+
+ host->version = hinfc_read(host, HINFC_VERSION);
+ host->addr_cycle = 0;
+ host->addr_value[0] = 0;
+ host->addr_value[1] = 0;
+ host->cache_addr_value[0] = ~0;
+ host->cache_addr_value[1] = ~0;
+ host->chipselect = 0;
+
+ /* default page size: 2K, ecc_none. need modify */
+ flag = HINFC504_CON_OP_MODE_NORMAL | HINFC504_CON_READY_BUSY_SEL
+ | ((0x001 & HINFC504_CON_PAGESIZE_MASK)
+ << HINFC504_CON_PAGEISZE_SHIFT)
+ | ((0x0 & HINFC504_CON_ECCTYPE_MASK)
+ << HINFC504_CON_ECCTYPE_SHIFT)
+ | ((chip->options & NAND_BUSWIDTH_16) ?
+ HINFC504_CON_BUS_WIDTH : 0);
+ hinfc_write(host, flag, HINFC504_CON);
+
+ memset(host->mmio, 0xff, HINFC504_BUFFER_BASE_ADDRESS_LEN);
+
+ hinfc_write(host, SET_HINFC504_PWIDTH(HINFC504_W_LATCH,
+ HINFC504_R_LATCH, HINFC504_RW_LATCH), HINFC504_PWIDTH);
+
+ /* enable DMA irq */
+ hinfc_write(host, HINFC504_INTEN_DMA, HINFC504_INTEN);
+}
+
+static int hisi_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ /* FIXME: add ECC bytes position */
+ return -ENOTSUPP;
+}
+
+static int hisi_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 2;
+ oobregion->length = 6;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops hisi_ooblayout_ops = {
+ .ecc = hisi_ooblayout_ecc,
+ .free = hisi_ooblayout_free,
+};
+
+static int hisi_nfc_ecc_probe(struct hinfc_host *host)
+{
+ unsigned int flag;
+ int size, strength, ecc_bits;
+ struct device *dev = host->dev;
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ size = chip->ecc.size;
+ strength = chip->ecc.strength;
+ if (size != 1024) {
+ dev_err(dev, "error ecc size: %d\n", size);
+ return -EINVAL;
+ }
+
+ if ((size == 1024) && ((strength != 8) && (strength != 16) &&
+ (strength != 24) && (strength != 40))) {
+ dev_err(dev, "ecc size and strength do not match\n");
+ return -EINVAL;
+ }
+
+ chip->ecc.size = size;
+ chip->ecc.strength = strength;
+
+ chip->ecc.read_page = hisi_nand_read_page_hwecc;
+ chip->ecc.read_oob = hisi_nand_read_oob;
+ chip->ecc.write_page = hisi_nand_write_page_hwecc;
+
+ switch (chip->ecc.strength) {
+ case 16:
+ ecc_bits = 6;
+ if (mtd->writesize == 2048)
+ mtd_set_ooblayout(mtd, &hisi_ooblayout_ops);
+
+ /* TODO: add more page size support */
+ break;
+
+ /* TODO: add more ecc strength support */
+ default:
+ dev_err(dev, "not support strength: %d\n", chip->ecc.strength);
+ return -EINVAL;
+ }
+
+ flag = hinfc_read(host, HINFC504_CON);
+ /* add ecc type configure */
+ flag |= ((ecc_bits & HINFC504_CON_ECCTYPE_MASK)
+ << HINFC504_CON_ECCTYPE_SHIFT);
+ hinfc_write(host, flag, HINFC504_CON);
+
+ /* enable ecc irq */
+ flag = hinfc_read(host, HINFC504_INTEN) & 0xfff;
+ hinfc_write(host, flag | HINFC504_INTEN_UE | HINFC504_INTEN_CE,
+ HINFC504_INTEN);
+
+ return 0;
+}
+
+static int hisi_nfc_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct hinfc_host *host = nand_get_controller_data(chip);
+ int flag;
+
+ host->buffer = dmam_alloc_coherent(host->dev,
+ mtd->writesize + mtd->oobsize,
+ &host->dma_buffer, GFP_KERNEL);
+ if (!host->buffer)
+ return -ENOMEM;
+
+ host->dma_oob = host->dma_buffer + mtd->writesize;
+ memset(host->buffer, 0xff, mtd->writesize + mtd->oobsize);
+
+ flag = hinfc_read(host, HINFC504_CON);
+ flag &= ~(HINFC504_CON_PAGESIZE_MASK << HINFC504_CON_PAGEISZE_SHIFT);
+ switch (mtd->writesize) {
+ case 2048:
+ flag |= (0x001 << HINFC504_CON_PAGEISZE_SHIFT);
+ break;
+ /*
+ * TODO: add more pagesize support,
+ * default pagesize has been set in hisi_nfc_host_init
+ */
+ default:
+ dev_err(host->dev, "NON-2KB page size nand flash\n");
+ return -EINVAL;
+ }
+ hinfc_write(host, flag, HINFC504_CON);
+
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
+ hisi_nfc_ecc_probe(host);
+
+ return 0;
+}
+
+static const struct nand_controller_ops hisi_nfc_controller_ops = {
+ .attach_chip = hisi_nfc_attach_chip,
+};
+
+static int hisi_nfc_probe(struct platform_device *pdev)
+{
+ int ret = 0, irq, max_chips = HINFC504_MAX_CHIP;
+ struct device *dev = &pdev->dev;
+ struct hinfc_host *host;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ struct device_node *np = dev->of_node;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+ host->dev = dev;
+
+ platform_set_drvdata(pdev, host);
+ chip = &host->chip;
+ mtd = nand_to_mtd(chip);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -ENXIO;
+
+ host->iobase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(host->iobase))
+ return PTR_ERR(host->iobase);
+
+ host->mmio = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(host->mmio))
+ return PTR_ERR(host->mmio);
+
+ mtd->name = "hisi_nand";
+ mtd->dev.parent = &pdev->dev;
+
+ nand_set_controller_data(chip, host);
+ nand_set_flash_node(chip, np);
+ chip->legacy.cmdfunc = hisi_nfc_cmdfunc;
+ chip->legacy.select_chip = hisi_nfc_select_chip;
+ chip->legacy.read_byte = hisi_nfc_read_byte;
+ chip->legacy.write_buf = hisi_nfc_write_buf;
+ chip->legacy.read_buf = hisi_nfc_read_buf;
+ chip->legacy.chip_delay = HINFC504_CHIP_DELAY;
+ chip->legacy.set_features = nand_get_set_features_notsupp;
+ chip->legacy.get_features = nand_get_set_features_notsupp;
+
+ hisi_nfc_host_init(host);
+
+ ret = devm_request_irq(dev, irq, hinfc_irq_handle, 0x0, "nandc", host);
+ if (ret) {
+ dev_err(dev, "failed to request IRQ\n");
+ return ret;
+ }
+
+ chip->legacy.dummy_controller.ops = &hisi_nfc_controller_ops;
+ ret = nand_scan(chip, max_chips);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(dev, "Err MTD partition=%d\n", ret);
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hisi_nfc_remove(struct platform_device *pdev)
+{
+ struct hinfc_host *host = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &host->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int hisi_nfc_suspend(struct device *dev)
+{
+ struct hinfc_host *host = dev_get_drvdata(dev);
+ unsigned long timeout = jiffies + HINFC504_NFC_PM_TIMEOUT;
+
+ while (time_before(jiffies, timeout)) {
+ if (((hinfc_read(host, HINFC504_STATUS) & 0x1) == 0x0) &&
+ (hinfc_read(host, HINFC504_DMA_CTRL) &
+ HINFC504_DMA_CTRL_DMA_START)) {
+ cond_resched();
+ return 0;
+ }
+ }
+
+ dev_err(host->dev, "nand controller suspend timeout.\n");
+
+ return -EAGAIN;
+}
+
+static int hisi_nfc_resume(struct device *dev)
+{
+ int cs;
+ struct hinfc_host *host = dev_get_drvdata(dev);
+ struct nand_chip *chip = &host->chip;
+
+ for (cs = 0; cs < nanddev_ntargets(&chip->base); cs++)
+ hisi_nfc_send_cmd_reset(host, cs);
+ hinfc_write(host, SET_HINFC504_PWIDTH(HINFC504_W_LATCH,
+ HINFC504_R_LATCH, HINFC504_RW_LATCH), HINFC504_PWIDTH);
+
+ return 0;
+}
+#endif
+static SIMPLE_DEV_PM_OPS(hisi_nfc_pm_ops, hisi_nfc_suspend, hisi_nfc_resume);
+
+static const struct of_device_id nfc_id_table[] = {
+ { .compatible = "hisilicon,504-nfc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, nfc_id_table);
+
+static struct platform_driver hisi_nfc_driver = {
+ .driver = {
+ .name = "hisi_nand",
+ .of_match_table = nfc_id_table,
+ .pm = &hisi_nfc_pm_ops,
+ },
+ .probe = hisi_nfc_probe,
+ .remove_new = hisi_nfc_remove,
+};
+
+module_platform_driver(hisi_nfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Zhou Wang");
+MODULE_AUTHOR("Zhiyong Cai");
+MODULE_DESCRIPTION("Hisilicon Nand Flash Controller Driver");
diff --git a/drivers/mtd/nand/raw/ingenic/Kconfig b/drivers/mtd/nand/raw/ingenic/Kconfig
new file mode 100644
index 0000000000..96c5ae8b1b
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/Kconfig
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config MTD_NAND_JZ4780
+ tristate "JZ4780 NAND controller"
+ depends on MIPS || COMPILE_TEST
+ depends on JZ4780_NEMC
+ help
+ Enables support for NAND Flash connected to the NEMC on JZ4780 SoC
+ based boards, using the BCH controller for hardware error correction.
+
+if MTD_NAND_JZ4780
+
+config MTD_NAND_INGENIC_ECC
+ bool
+
+config MTD_NAND_JZ4740_ECC
+ tristate "Hardware BCH support for JZ4740 SoC"
+ select MTD_NAND_INGENIC_ECC
+ help
+ Enable this driver to support the Reed-Solomon error-correction
+ hardware present on the JZ4740 SoC from Ingenic.
+
+ This driver can also be built as a module. If so, the module
+ will be called jz4740-ecc.
+
+config MTD_NAND_JZ4725B_BCH
+ tristate "Hardware BCH support for JZ4725B SoC"
+ select MTD_NAND_INGENIC_ECC
+ help
+ Enable this driver to support the BCH error-correction hardware
+ present on the JZ4725B SoC from Ingenic.
+
+ This driver can also be built as a module. If so, the module
+ will be called jz4725b-bch.
+
+config MTD_NAND_JZ4780_BCH
+ tristate "Hardware BCH support for JZ4780 SoC"
+ select MTD_NAND_INGENIC_ECC
+ help
+ Enable this driver to support the BCH error-correction hardware
+ present on the JZ4780 SoC from Ingenic.
+
+ This driver can also be built as a module. If so, the module
+ will be called jz4780-bch.
+
+endif # MTD_NAND_JZ4780
diff --git a/drivers/mtd/nand/raw/ingenic/Makefile b/drivers/mtd/nand/raw/ingenic/Makefile
new file mode 100644
index 0000000000..4c53f5e759
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MTD_NAND_JZ4780) += ingenic_nand.o
+
+ingenic_nand-y += ingenic_nand_drv.o
+ingenic_nand-$(CONFIG_MTD_NAND_INGENIC_ECC) += ingenic_ecc.o
+
+obj-$(CONFIG_MTD_NAND_JZ4740_ECC) += jz4740_ecc.o
+obj-$(CONFIG_MTD_NAND_JZ4725B_BCH) += jz4725b_bch.o
+obj-$(CONFIG_MTD_NAND_JZ4780_BCH) += jz4780_bch.o
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
new file mode 100644
index 0000000000..525c34c281
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * JZ47xx ECC common code
+ *
+ * Copyright (c) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "ingenic_ecc.h"
+
+/**
+ * ingenic_ecc_calculate() - calculate ECC for a data buffer
+ * @ecc: ECC device.
+ * @params: ECC parameters.
+ * @buf: input buffer with raw data.
+ * @ecc_code: output buffer with ECC.
+ *
+ * Return: 0 on success, -ETIMEDOUT if timed out while waiting for ECC
+ * controller.
+ */
+int ingenic_ecc_calculate(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code)
+{
+ return ecc->ops->calculate(ecc, params, buf, ecc_code);
+}
+
+/**
+ * ingenic_ecc_correct() - detect and correct bit errors
+ * @ecc: ECC device.
+ * @params: ECC parameters.
+ * @buf: raw data read from the chip.
+ * @ecc_code: ECC read from the chip.
+ *
+ * Given the raw data and the ECC read from the NAND device, detects and
+ * corrects errors in the data.
+ *
+ * Return: the number of bit errors corrected, -EBADMSG if there are too many
+ * errors to correct or -ETIMEDOUT if we timed out waiting for the controller.
+ */
+int ingenic_ecc_correct(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ u8 *buf, u8 *ecc_code)
+{
+ return ecc->ops->correct(ecc, params, buf, ecc_code);
+}
+
+/**
+ * ingenic_ecc_get() - get the ECC controller device
+ * @np: ECC device tree node.
+ *
+ * Gets the ECC controller device from the specified device tree node. The
+ * device must be released with ingenic_ecc_release() when it is no longer being
+ * used.
+ *
+ * Return: a pointer to ingenic_ecc, errors are encoded into the pointer.
+ * PTR_ERR(-EPROBE_DEFER) if the device hasn't been initialised yet.
+ */
+static struct ingenic_ecc *ingenic_ecc_get(struct device_node *np)
+{
+ struct platform_device *pdev;
+ struct ingenic_ecc *ecc;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ if (!platform_get_drvdata(pdev)) {
+ put_device(&pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ ecc = platform_get_drvdata(pdev);
+ clk_prepare_enable(ecc->clk);
+
+ return ecc;
+}
+
+/**
+ * of_ingenic_ecc_get() - get the ECC controller from a DT node
+ * @of_node: the node that contains an ecc-engine property.
+ *
+ * Get the ecc-engine property from the given device tree
+ * node and pass it to ingenic_ecc_get to do the work.
+ *
+ * Return: a pointer to ingenic_ecc, errors are encoded into the pointer.
+ * PTR_ERR(-EPROBE_DEFER) if the device hasn't been initialised yet.
+ */
+struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *of_node)
+{
+ struct ingenic_ecc *ecc = NULL;
+ struct device_node *np;
+
+ np = of_parse_phandle(of_node, "ecc-engine", 0);
+
+ /*
+ * If the ecc-engine property is not found, check for the deprecated
+ * ingenic,bch-controller property
+ */
+ if (!np)
+ np = of_parse_phandle(of_node, "ingenic,bch-controller", 0);
+
+ if (np) {
+ ecc = ingenic_ecc_get(np);
+ of_node_put(np);
+ }
+ return ecc;
+}
+
+/**
+ * ingenic_ecc_release() - release the ECC controller device
+ * @ecc: ECC device.
+ */
+void ingenic_ecc_release(struct ingenic_ecc *ecc)
+{
+ clk_disable_unprepare(ecc->clk);
+ put_device(ecc->dev);
+}
+
+int ingenic_ecc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ingenic_ecc *ecc;
+
+ ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
+ if (!ecc)
+ return -ENOMEM;
+
+ ecc->ops = device_get_match_data(dev);
+ if (!ecc->ops)
+ return -EINVAL;
+
+ ecc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ecc->base))
+ return PTR_ERR(ecc->base);
+
+ ecc->ops->disable(ecc);
+
+ ecc->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ecc->clk)) {
+ dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk));
+ return PTR_ERR(ecc->clk);
+ }
+
+ mutex_init(&ecc->lock);
+
+ ecc->dev = dev;
+ platform_set_drvdata(pdev, ecc);
+
+ return 0;
+}
+EXPORT_SYMBOL(ingenic_ecc_probe);
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h
new file mode 100644
index 0000000000..017868f59f
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DRIVERS_MTD_NAND_INGENIC_ECC_INTERNAL_H__
+#define __DRIVERS_MTD_NAND_INGENIC_ECC_INTERNAL_H__
+
+#include <linux/compiler_types.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <uapi/asm-generic/errno-base.h>
+
+struct clk;
+struct device;
+struct ingenic_ecc;
+struct platform_device;
+
+/**
+ * struct ingenic_ecc_params - ECC parameters
+ * @size: data bytes per ECC step.
+ * @bytes: ECC bytes per step.
+ * @strength: number of correctable bits per ECC step.
+ */
+struct ingenic_ecc_params {
+ int size;
+ int bytes;
+ int strength;
+};
+
+#if IS_ENABLED(CONFIG_MTD_NAND_INGENIC_ECC)
+int ingenic_ecc_calculate(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code);
+int ingenic_ecc_correct(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params, u8 *buf,
+ u8 *ecc_code);
+
+void ingenic_ecc_release(struct ingenic_ecc *ecc);
+struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np);
+#else /* CONFIG_MTD_NAND_INGENIC_ECC */
+static inline int ingenic_ecc_calculate(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code)
+{
+ return -ENODEV;
+}
+
+static inline int ingenic_ecc_correct(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params, u8 *buf,
+ u8 *ecc_code)
+{
+ return -ENODEV;
+}
+
+static inline void ingenic_ecc_release(struct ingenic_ecc *ecc)
+{
+}
+
+static inline struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif /* CONFIG_MTD_NAND_INGENIC_ECC */
+
+struct ingenic_ecc_ops {
+ void (*disable)(struct ingenic_ecc *ecc);
+ int (*calculate)(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code);
+ int (*correct)(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ u8 *buf, u8 *ecc_code);
+};
+
+struct ingenic_ecc {
+ struct device *dev;
+ const struct ingenic_ecc_ops *ops;
+ void __iomem *base;
+ struct clk *clk;
+ struct mutex lock;
+};
+
+int ingenic_ecc_probe(struct platform_device *pdev);
+
+#endif /* __DRIVERS_MTD_NAND_INGENIC_ECC_INTERNAL_H__ */
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
new file mode 100644
index 0000000000..6748226b8b
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
@@ -0,0 +1,575 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Ingenic JZ47xx NAND driver
+ *
+ * Copyright (c) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/jz4780-nemc.h>
+
+#include "ingenic_ecc.h"
+
+#define DRV_NAME "ingenic-nand"
+
+struct jz_soc_info {
+ unsigned long data_offset;
+ unsigned long addr_offset;
+ unsigned long cmd_offset;
+ const struct mtd_ooblayout_ops *oob_layout;
+ bool oob_first;
+};
+
+struct ingenic_nand_cs {
+ unsigned int bank;
+ void __iomem *base;
+};
+
+struct ingenic_nfc {
+ struct device *dev;
+ struct ingenic_ecc *ecc;
+ const struct jz_soc_info *soc_info;
+ struct nand_controller controller;
+ unsigned int num_banks;
+ struct list_head chips;
+ struct ingenic_nand_cs cs[];
+};
+
+struct ingenic_nand {
+ struct nand_chip chip;
+ struct list_head chip_list;
+
+ struct gpio_desc *busy_gpio;
+ struct gpio_desc *wp_gpio;
+ unsigned int reading: 1;
+};
+
+static inline struct ingenic_nand *to_ingenic_nand(struct mtd_info *mtd)
+{
+ return container_of(mtd_to_nand(mtd), struct ingenic_nand, chip);
+}
+
+static inline struct ingenic_nfc *to_ingenic_nfc(struct nand_controller *ctrl)
+{
+ return container_of(ctrl, struct ingenic_nfc, controller);
+}
+
+static int qi_lb60_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section || !ecc->total)
+ return -ERANGE;
+
+ oobregion->length = ecc->total;
+ oobregion->offset = 12;
+
+ return 0;
+}
+
+static int qi_lb60_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = mtd->oobsize - ecc->total - 12;
+ oobregion->offset = 12 + ecc->total;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops qi_lb60_ooblayout_ops = {
+ .ecc = qi_lb60_ooblayout_ecc,
+ .free = qi_lb60_ooblayout_free,
+};
+
+static int jz4725b_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section || !ecc->total)
+ return -ERANGE;
+
+ oobregion->length = ecc->total;
+ oobregion->offset = 3;
+
+ return 0;
+}
+
+static int jz4725b_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = mtd->oobsize - ecc->total - 3;
+ oobregion->offset = 3 + ecc->total;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops jz4725b_ooblayout_ops = {
+ .ecc = jz4725b_ooblayout_ecc,
+ .free = jz4725b_ooblayout_free,
+};
+
+static void ingenic_nand_ecc_hwctl(struct nand_chip *chip, int mode)
+{
+ struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+
+ nand->reading = (mode == NAND_ECC_READ);
+}
+
+static int ingenic_nand_ecc_calculate(struct nand_chip *chip, const u8 *dat,
+ u8 *ecc_code)
+{
+ struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+ struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller);
+ struct ingenic_ecc_params params;
+
+ /*
+ * Don't need to generate the ECC when reading, the ECC engine does it
+ * for us as part of decoding/correction.
+ */
+ if (nand->reading)
+ return 0;
+
+ params.size = nand->chip.ecc.size;
+ params.bytes = nand->chip.ecc.bytes;
+ params.strength = nand->chip.ecc.strength;
+
+ return ingenic_ecc_calculate(nfc->ecc, &params, dat, ecc_code);
+}
+
+static int ingenic_nand_ecc_correct(struct nand_chip *chip, u8 *dat,
+ u8 *read_ecc, u8 *calc_ecc)
+{
+ struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+ struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller);
+ struct ingenic_ecc_params params;
+
+ params.size = nand->chip.ecc.size;
+ params.bytes = nand->chip.ecc.bytes;
+ params.strength = nand->chip.ecc.strength;
+
+ return ingenic_ecc_correct(nfc->ecc, &params, dat, read_ecc);
+}
+
+static int ingenic_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct ingenic_nfc *nfc = to_ingenic_nfc(chip->controller);
+ int eccbytes;
+
+ if (chip->ecc.strength == 4) {
+ /* JZ4740 uses 9 bytes of ECC to correct maximum 4 errors */
+ chip->ecc.bytes = 9;
+ } else {
+ chip->ecc.bytes = fls((1 + 8) * chip->ecc.size) *
+ (chip->ecc.strength / 8);
+ }
+
+ switch (chip->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ if (!nfc->ecc) {
+ dev_err(nfc->dev, "HW ECC selected, but ECC controller not found\n");
+ return -ENODEV;
+ }
+
+ chip->ecc.hwctl = ingenic_nand_ecc_hwctl;
+ chip->ecc.calculate = ingenic_nand_ecc_calculate;
+ chip->ecc.correct = ingenic_nand_ecc_correct;
+ fallthrough;
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ dev_info(nfc->dev, "using %s (strength %d, size %d, bytes %d)\n",
+ (nfc->ecc) ? "hardware ECC" : "software ECC",
+ chip->ecc.strength, chip->ecc.size, chip->ecc.bytes);
+ break;
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ dev_info(nfc->dev, "not using ECC\n");
+ break;
+ default:
+ dev_err(nfc->dev, "ECC mode %d not supported\n",
+ chip->ecc.engine_type);
+ return -EINVAL;
+ }
+
+ /* The NAND core will generate the ECC layout for SW ECC */
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ /* Generate ECC layout. ECC codes are right aligned in the OOB area. */
+ eccbytes = mtd->writesize / chip->ecc.size * chip->ecc.bytes;
+
+ if (eccbytes > mtd->oobsize - 2) {
+ dev_err(nfc->dev,
+ "invalid ECC config: required %d ECC bytes, but only %d are available",
+ eccbytes, mtd->oobsize - 2);
+ return -EINVAL;
+ }
+
+ /*
+ * The generic layout for BBT markers will most likely overlap with our
+ * ECC bytes in the OOB, so move the BBT markers outside the OOB area.
+ */
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ if (nfc->soc_info->oob_first)
+ chip->ecc.read_page = nand_read_page_hwecc_oob_first;
+
+ /* For legacy reasons we use a different layout on the qi,lb60 board. */
+ if (of_machine_is_compatible("qi,lb60"))
+ mtd_set_ooblayout(mtd, &qi_lb60_ooblayout_ops);
+ else if (nfc->soc_info->oob_layout)
+ mtd_set_ooblayout(mtd, nfc->soc_info->oob_layout);
+ else
+ mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
+
+ return 0;
+}
+
+static int ingenic_nand_exec_instr(struct nand_chip *chip,
+ struct ingenic_nand_cs *cs,
+ const struct nand_op_instr *instr)
+{
+ struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+ struct ingenic_nfc *nfc = to_ingenic_nfc(chip->controller);
+ unsigned int i;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ writeb(instr->ctx.cmd.opcode,
+ cs->base + nfc->soc_info->cmd_offset);
+ return 0;
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++)
+ writeb(instr->ctx.addr.addrs[i],
+ cs->base + nfc->soc_info->addr_offset);
+ return 0;
+ case NAND_OP_DATA_IN_INSTR:
+ if (instr->ctx.data.force_8bit ||
+ !(chip->options & NAND_BUSWIDTH_16))
+ ioread8_rep(cs->base + nfc->soc_info->data_offset,
+ instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ else
+ ioread16_rep(cs->base + nfc->soc_info->data_offset,
+ instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ return 0;
+ case NAND_OP_DATA_OUT_INSTR:
+ if (instr->ctx.data.force_8bit ||
+ !(chip->options & NAND_BUSWIDTH_16))
+ iowrite8_rep(cs->base + nfc->soc_info->data_offset,
+ instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ else
+ iowrite16_rep(cs->base + nfc->soc_info->data_offset,
+ instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ return 0;
+ case NAND_OP_WAITRDY_INSTR:
+ if (!nand->busy_gpio)
+ return nand_soft_waitrdy(chip,
+ instr->ctx.waitrdy.timeout_ms);
+
+ return nand_gpio_waitrdy(chip, nand->busy_gpio,
+ instr->ctx.waitrdy.timeout_ms);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int ingenic_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+ struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller);
+ struct ingenic_nand_cs *cs;
+ unsigned int i;
+ int ret = 0;
+
+ if (check_only)
+ return 0;
+
+ cs = &nfc->cs[op->cs];
+ jz4780_nemc_assert(nfc->dev, cs->bank, true);
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = ingenic_nand_exec_instr(chip, cs, &op->instrs[i]);
+ if (ret)
+ break;
+
+ if (op->instrs[i].delay_ns)
+ ndelay(op->instrs[i].delay_ns);
+ }
+ jz4780_nemc_assert(nfc->dev, cs->bank, false);
+
+ return ret;
+}
+
+static const struct nand_controller_ops ingenic_nand_controller_ops = {
+ .attach_chip = ingenic_nand_attach_chip,
+ .exec_op = ingenic_nand_exec_op,
+};
+
+static int ingenic_nand_init_chip(struct platform_device *pdev,
+ struct ingenic_nfc *nfc,
+ struct device_node *np,
+ unsigned int chipnr)
+{
+ struct device *dev = &pdev->dev;
+ struct ingenic_nand *nand;
+ struct ingenic_nand_cs *cs;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ const __be32 *reg;
+ int ret = 0;
+
+ cs = &nfc->cs[chipnr];
+
+ reg = of_get_property(np, "reg", NULL);
+ if (!reg)
+ return -EINVAL;
+
+ cs->bank = be32_to_cpu(*reg);
+
+ jz4780_nemc_set_type(nfc->dev, cs->bank, JZ4780_NEMC_BANK_NAND);
+
+ cs->base = devm_platform_ioremap_resource(pdev, chipnr);
+ if (IS_ERR(cs->base))
+ return PTR_ERR(cs->base);
+
+ nand = devm_kzalloc(dev, sizeof(*nand), GFP_KERNEL);
+ if (!nand)
+ return -ENOMEM;
+
+ nand->busy_gpio = devm_gpiod_get_optional(dev, "rb", GPIOD_IN);
+
+ if (IS_ERR(nand->busy_gpio)) {
+ ret = PTR_ERR(nand->busy_gpio);
+ dev_err(dev, "failed to request busy GPIO: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * The rb-gpios semantics was undocumented and qi,lb60 (along with
+ * the ingenic driver) got it wrong. The active state encodes the
+ * NAND ready state, which is high level. Since there's no signal
+ * inverter on this board, it should be active-high. Let's fix that
+ * here for older DTs so we can re-use the generic nand_gpio_waitrdy()
+ * helper, and be consistent with what other drivers do.
+ */
+ if (of_machine_is_compatible("qi,lb60") &&
+ gpiod_is_active_low(nand->busy_gpio))
+ gpiod_toggle_active_low(nand->busy_gpio);
+
+ nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW);
+
+ if (IS_ERR(nand->wp_gpio)) {
+ ret = PTR_ERR(nand->wp_gpio);
+ dev_err(dev, "failed to request WP GPIO: %d\n", ret);
+ return ret;
+ }
+
+ chip = &nand->chip;
+ mtd = nand_to_mtd(chip);
+ mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev),
+ cs->bank);
+ if (!mtd->name)
+ return -ENOMEM;
+ mtd->dev.parent = dev;
+
+ chip->options = NAND_NO_SUBPAGE_WRITE;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ chip->controller = &nfc->controller;
+ nand_set_flash_node(chip, np);
+
+ chip->controller->ops = &ingenic_nand_controller_ops;
+ ret = nand_scan(chip, 1);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ list_add_tail(&nand->chip_list, &nfc->chips);
+
+ return 0;
+}
+
+static void ingenic_nand_cleanup_chips(struct ingenic_nfc *nfc)
+{
+ struct ingenic_nand *ingenic_chip;
+ struct nand_chip *chip;
+ int ret;
+
+ while (!list_empty(&nfc->chips)) {
+ ingenic_chip = list_first_entry(&nfc->chips,
+ struct ingenic_nand, chip_list);
+ chip = &ingenic_chip->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&ingenic_chip->chip_list);
+ }
+}
+
+static int ingenic_nand_init_chips(struct ingenic_nfc *nfc,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np;
+ int i = 0;
+ int ret;
+ int num_chips = of_get_child_count(dev->of_node);
+
+ if (num_chips > nfc->num_banks) {
+ dev_err(dev, "found %d chips but only %d banks\n",
+ num_chips, nfc->num_banks);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(dev->of_node, np) {
+ ret = ingenic_nand_init_chip(pdev, nfc, np, i);
+ if (ret) {
+ ingenic_nand_cleanup_chips(nfc);
+ of_node_put(np);
+ return ret;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
+static int ingenic_nand_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned int num_banks;
+ struct ingenic_nfc *nfc;
+ int ret;
+
+ num_banks = jz4780_nemc_num_banks(dev);
+ if (num_banks == 0) {
+ dev_err(dev, "no banks found\n");
+ return -ENODEV;
+ }
+
+ nfc = devm_kzalloc(dev, struct_size(nfc, cs, num_banks), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->soc_info = device_get_match_data(dev);
+ if (!nfc->soc_info)
+ return -EINVAL;
+
+ /*
+ * Check for ECC HW before we call nand_scan_ident, to prevent us from
+ * having to call it again if the ECC driver returns -EPROBE_DEFER.
+ */
+ nfc->ecc = of_ingenic_ecc_get(dev->of_node);
+ if (IS_ERR(nfc->ecc))
+ return PTR_ERR(nfc->ecc);
+
+ nfc->dev = dev;
+ nfc->num_banks = num_banks;
+
+ nand_controller_init(&nfc->controller);
+ INIT_LIST_HEAD(&nfc->chips);
+
+ ret = ingenic_nand_init_chips(nfc, pdev);
+ if (ret) {
+ if (nfc->ecc)
+ ingenic_ecc_release(nfc->ecc);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, nfc);
+ return 0;
+}
+
+static void ingenic_nand_remove(struct platform_device *pdev)
+{
+ struct ingenic_nfc *nfc = platform_get_drvdata(pdev);
+
+ if (nfc->ecc)
+ ingenic_ecc_release(nfc->ecc);
+
+ ingenic_nand_cleanup_chips(nfc);
+}
+
+static const struct jz_soc_info jz4740_soc_info = {
+ .data_offset = 0x00000000,
+ .cmd_offset = 0x00008000,
+ .addr_offset = 0x00010000,
+ .oob_first = true,
+};
+
+static const struct jz_soc_info jz4725b_soc_info = {
+ .data_offset = 0x00000000,
+ .cmd_offset = 0x00008000,
+ .addr_offset = 0x00010000,
+ .oob_layout = &jz4725b_ooblayout_ops,
+};
+
+static const struct jz_soc_info jz4780_soc_info = {
+ .data_offset = 0x00000000,
+ .cmd_offset = 0x00400000,
+ .addr_offset = 0x00800000,
+};
+
+static const struct of_device_id ingenic_nand_dt_match[] = {
+ { .compatible = "ingenic,jz4740-nand", .data = &jz4740_soc_info },
+ { .compatible = "ingenic,jz4725b-nand", .data = &jz4725b_soc_info },
+ { .compatible = "ingenic,jz4780-nand", .data = &jz4780_soc_info },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ingenic_nand_dt_match);
+
+static struct platform_driver ingenic_nand_driver = {
+ .probe = ingenic_nand_probe,
+ .remove_new = ingenic_nand_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = ingenic_nand_dt_match,
+ },
+};
+module_platform_driver(ingenic_nand_driver);
+
+MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
+MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>");
+MODULE_DESCRIPTION("Ingenic JZ47xx NAND driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c b/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c
new file mode 100644
index 0000000000..2d0e0a2192
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * JZ4725B BCH controller driver
+ *
+ * Copyright (C) 2019 Paul Cercueil <paul@crapouillou.net>
+ *
+ * Based on jz4780_bch.c
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "ingenic_ecc.h"
+
+#define BCH_BHCR 0x0
+#define BCH_BHCSR 0x4
+#define BCH_BHCCR 0x8
+#define BCH_BHCNT 0xc
+#define BCH_BHDR 0x10
+#define BCH_BHPAR0 0x14
+#define BCH_BHERR0 0x28
+#define BCH_BHINT 0x24
+#define BCH_BHINTES 0x3c
+#define BCH_BHINTEC 0x40
+#define BCH_BHINTE 0x38
+
+#define BCH_BHCR_ENCE BIT(3)
+#define BCH_BHCR_BSEL BIT(2)
+#define BCH_BHCR_INIT BIT(1)
+#define BCH_BHCR_BCHE BIT(0)
+
+#define BCH_BHCNT_DEC_COUNT_SHIFT 16
+#define BCH_BHCNT_DEC_COUNT_MASK (0x3ff << BCH_BHCNT_DEC_COUNT_SHIFT)
+#define BCH_BHCNT_ENC_COUNT_SHIFT 0
+#define BCH_BHCNT_ENC_COUNT_MASK (0x3ff << BCH_BHCNT_ENC_COUNT_SHIFT)
+
+#define BCH_BHERR_INDEX0_SHIFT 0
+#define BCH_BHERR_INDEX0_MASK (0x1fff << BCH_BHERR_INDEX0_SHIFT)
+#define BCH_BHERR_INDEX1_SHIFT 16
+#define BCH_BHERR_INDEX1_MASK (0x1fff << BCH_BHERR_INDEX1_SHIFT)
+
+#define BCH_BHINT_ERRC_SHIFT 28
+#define BCH_BHINT_ERRC_MASK (0xf << BCH_BHINT_ERRC_SHIFT)
+#define BCH_BHINT_TERRC_SHIFT 16
+#define BCH_BHINT_TERRC_MASK (0x7f << BCH_BHINT_TERRC_SHIFT)
+#define BCH_BHINT_ALL_0 BIT(5)
+#define BCH_BHINT_ALL_F BIT(4)
+#define BCH_BHINT_DECF BIT(3)
+#define BCH_BHINT_ENCF BIT(2)
+#define BCH_BHINT_UNCOR BIT(1)
+#define BCH_BHINT_ERR BIT(0)
+
+/* Timeout for BCH calculation/correction. */
+#define BCH_TIMEOUT_US 100000
+
+static inline void jz4725b_bch_config_set(struct ingenic_ecc *bch, u32 cfg)
+{
+ writel(cfg, bch->base + BCH_BHCSR);
+}
+
+static inline void jz4725b_bch_config_clear(struct ingenic_ecc *bch, u32 cfg)
+{
+ writel(cfg, bch->base + BCH_BHCCR);
+}
+
+static int jz4725b_bch_reset(struct ingenic_ecc *bch,
+ struct ingenic_ecc_params *params, bool calc_ecc)
+{
+ u32 reg, max_value;
+
+ /* Clear interrupt status. */
+ writel(readl(bch->base + BCH_BHINT), bch->base + BCH_BHINT);
+
+ /* Initialise and enable BCH. */
+ jz4725b_bch_config_clear(bch, 0x1f);
+ jz4725b_bch_config_set(bch, BCH_BHCR_BCHE);
+
+ if (params->strength == 8)
+ jz4725b_bch_config_set(bch, BCH_BHCR_BSEL);
+ else
+ jz4725b_bch_config_clear(bch, BCH_BHCR_BSEL);
+
+ if (calc_ecc) /* calculate ECC from data */
+ jz4725b_bch_config_set(bch, BCH_BHCR_ENCE);
+ else /* correct data from ECC */
+ jz4725b_bch_config_clear(bch, BCH_BHCR_ENCE);
+
+ jz4725b_bch_config_set(bch, BCH_BHCR_INIT);
+
+ max_value = BCH_BHCNT_ENC_COUNT_MASK >> BCH_BHCNT_ENC_COUNT_SHIFT;
+ if (params->size > max_value)
+ return -EINVAL;
+
+ max_value = BCH_BHCNT_DEC_COUNT_MASK >> BCH_BHCNT_DEC_COUNT_SHIFT;
+ if (params->size + params->bytes > max_value)
+ return -EINVAL;
+
+ /* Set up BCH count register. */
+ reg = params->size << BCH_BHCNT_ENC_COUNT_SHIFT;
+ reg |= (params->size + params->bytes) << BCH_BHCNT_DEC_COUNT_SHIFT;
+ writel(reg, bch->base + BCH_BHCNT);
+
+ return 0;
+}
+
+static void jz4725b_bch_disable(struct ingenic_ecc *bch)
+{
+ /* Clear interrupts */
+ writel(readl(bch->base + BCH_BHINT), bch->base + BCH_BHINT);
+
+ /* Disable the hardware */
+ jz4725b_bch_config_clear(bch, BCH_BHCR_BCHE);
+}
+
+static void jz4725b_bch_write_data(struct ingenic_ecc *bch, const u8 *buf,
+ size_t size)
+{
+ while (size--)
+ writeb(*buf++, bch->base + BCH_BHDR);
+}
+
+static void jz4725b_bch_read_parity(struct ingenic_ecc *bch, u8 *buf,
+ size_t size)
+{
+ size_t size32 = size / sizeof(u32);
+ size_t size8 = size % sizeof(u32);
+ u32 *dest32;
+ u8 *dest8;
+ u32 val, offset = 0;
+
+ dest32 = (u32 *)buf;
+ while (size32--) {
+ *dest32++ = readl_relaxed(bch->base + BCH_BHPAR0 + offset);
+ offset += sizeof(u32);
+ }
+
+ dest8 = (u8 *)dest32;
+ val = readl_relaxed(bch->base + BCH_BHPAR0 + offset);
+ switch (size8) {
+ case 3:
+ dest8[2] = (val >> 16) & 0xff;
+ fallthrough;
+ case 2:
+ dest8[1] = (val >> 8) & 0xff;
+ fallthrough;
+ case 1:
+ dest8[0] = val & 0xff;
+ break;
+ }
+}
+
+static int jz4725b_bch_wait_complete(struct ingenic_ecc *bch, unsigned int irq,
+ u32 *status)
+{
+ u32 reg;
+ int ret;
+
+ /*
+ * While we could use interrupts here and sleep until the operation
+ * completes, the controller works fairly quickly (usually a few
+ * microseconds) and so the overhead of sleeping until we get an
+ * interrupt quite noticeably decreases performance.
+ */
+ ret = readl_relaxed_poll_timeout(bch->base + BCH_BHINT, reg,
+ reg & irq, 0, BCH_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ if (status)
+ *status = reg;
+
+ writel(reg, bch->base + BCH_BHINT);
+
+ return 0;
+}
+
+static int jz4725b_calculate(struct ingenic_ecc *bch,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code)
+{
+ int ret;
+
+ mutex_lock(&bch->lock);
+
+ ret = jz4725b_bch_reset(bch, params, true);
+ if (ret) {
+ dev_err(bch->dev, "Unable to init BCH with given parameters\n");
+ goto out_disable;
+ }
+
+ jz4725b_bch_write_data(bch, buf, params->size);
+
+ ret = jz4725b_bch_wait_complete(bch, BCH_BHINT_ENCF, NULL);
+ if (ret) {
+ dev_err(bch->dev, "timed out while calculating ECC\n");
+ goto out_disable;
+ }
+
+ jz4725b_bch_read_parity(bch, ecc_code, params->bytes);
+
+out_disable:
+ jz4725b_bch_disable(bch);
+ mutex_unlock(&bch->lock);
+
+ return ret;
+}
+
+static int jz4725b_correct(struct ingenic_ecc *bch,
+ struct ingenic_ecc_params *params,
+ u8 *buf, u8 *ecc_code)
+{
+ u32 reg, errors, bit;
+ unsigned int i;
+ int ret;
+
+ mutex_lock(&bch->lock);
+
+ ret = jz4725b_bch_reset(bch, params, false);
+ if (ret) {
+ dev_err(bch->dev, "Unable to init BCH with given parameters\n");
+ goto out;
+ }
+
+ jz4725b_bch_write_data(bch, buf, params->size);
+ jz4725b_bch_write_data(bch, ecc_code, params->bytes);
+
+ ret = jz4725b_bch_wait_complete(bch, BCH_BHINT_DECF, &reg);
+ if (ret) {
+ dev_err(bch->dev, "timed out while correcting data\n");
+ goto out;
+ }
+
+ if (reg & (BCH_BHINT_ALL_F | BCH_BHINT_ALL_0)) {
+ /* Data and ECC is all 0xff or 0x00 - nothing to correct */
+ ret = 0;
+ goto out;
+ }
+
+ if (reg & BCH_BHINT_UNCOR) {
+ /* Uncorrectable ECC error */
+ ret = -EBADMSG;
+ goto out;
+ }
+
+ errors = (reg & BCH_BHINT_ERRC_MASK) >> BCH_BHINT_ERRC_SHIFT;
+
+ /* Correct any detected errors. */
+ for (i = 0; i < errors; i++) {
+ if (i & 1) {
+ bit = (reg & BCH_BHERR_INDEX1_MASK) >> BCH_BHERR_INDEX1_SHIFT;
+ } else {
+ reg = readl(bch->base + BCH_BHERR0 + (i * 4));
+ bit = (reg & BCH_BHERR_INDEX0_MASK) >> BCH_BHERR_INDEX0_SHIFT;
+ }
+
+ buf[(bit >> 3)] ^= BIT(bit & 0x7);
+ }
+
+out:
+ jz4725b_bch_disable(bch);
+ mutex_unlock(&bch->lock);
+
+ return ret;
+}
+
+static const struct ingenic_ecc_ops jz4725b_bch_ops = {
+ .disable = jz4725b_bch_disable,
+ .calculate = jz4725b_calculate,
+ .correct = jz4725b_correct,
+};
+
+static const struct of_device_id jz4725b_bch_dt_match[] = {
+ { .compatible = "ingenic,jz4725b-bch", .data = &jz4725b_bch_ops },
+ {},
+};
+MODULE_DEVICE_TABLE(of, jz4725b_bch_dt_match);
+
+static struct platform_driver jz4725b_bch_driver = {
+ .probe = ingenic_ecc_probe,
+ .driver = {
+ .name = "jz4725b-bch",
+ .of_match_table = jz4725b_bch_dt_match,
+ },
+};
+module_platform_driver(jz4725b_bch_driver);
+
+MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
+MODULE_DESCRIPTION("Ingenic JZ4725B BCH controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/ingenic/jz4740_ecc.c b/drivers/mtd/nand/raw/ingenic/jz4740_ecc.c
new file mode 100644
index 0000000000..54e377754a
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/jz4740_ecc.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * JZ4740 ECC controller driver
+ *
+ * Copyright (c) 2019 Paul Cercueil <paul@crapouillou.net>
+ *
+ * based on jz4740-nand.c
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "ingenic_ecc.h"
+
+#define JZ_REG_NAND_ECC_CTRL 0x00
+#define JZ_REG_NAND_DATA 0x04
+#define JZ_REG_NAND_PAR0 0x08
+#define JZ_REG_NAND_PAR1 0x0C
+#define JZ_REG_NAND_PAR2 0x10
+#define JZ_REG_NAND_IRQ_STAT 0x14
+#define JZ_REG_NAND_IRQ_CTRL 0x18
+#define JZ_REG_NAND_ERR(x) (0x1C + ((x) << 2))
+
+#define JZ_NAND_ECC_CTRL_PAR_READY BIT(4)
+#define JZ_NAND_ECC_CTRL_ENCODING BIT(3)
+#define JZ_NAND_ECC_CTRL_RS BIT(2)
+#define JZ_NAND_ECC_CTRL_RESET BIT(1)
+#define JZ_NAND_ECC_CTRL_ENABLE BIT(0)
+
+#define JZ_NAND_STATUS_ERR_COUNT (BIT(31) | BIT(30) | BIT(29))
+#define JZ_NAND_STATUS_PAD_FINISH BIT(4)
+#define JZ_NAND_STATUS_DEC_FINISH BIT(3)
+#define JZ_NAND_STATUS_ENC_FINISH BIT(2)
+#define JZ_NAND_STATUS_UNCOR_ERROR BIT(1)
+#define JZ_NAND_STATUS_ERROR BIT(0)
+
+static const uint8_t empty_block_ecc[] = {
+ 0xcd, 0x9d, 0x90, 0x58, 0xf4, 0x8b, 0xff, 0xb7, 0x6f
+};
+
+static void jz4740_ecc_reset(struct ingenic_ecc *ecc, bool calc_ecc)
+{
+ uint32_t reg;
+
+ /* Clear interrupt status */
+ writel(0, ecc->base + JZ_REG_NAND_IRQ_STAT);
+
+ /* Initialize and enable ECC hardware */
+ reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL);
+ reg |= JZ_NAND_ECC_CTRL_RESET;
+ reg |= JZ_NAND_ECC_CTRL_ENABLE;
+ reg |= JZ_NAND_ECC_CTRL_RS;
+ if (calc_ecc) /* calculate ECC from data */
+ reg |= JZ_NAND_ECC_CTRL_ENCODING;
+ else /* correct data from ECC */
+ reg &= ~JZ_NAND_ECC_CTRL_ENCODING;
+
+ writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL);
+}
+
+static int jz4740_ecc_calculate(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code)
+{
+ uint32_t reg, status;
+ unsigned int timeout = 1000;
+ int i;
+
+ jz4740_ecc_reset(ecc, true);
+
+ do {
+ status = readl(ecc->base + JZ_REG_NAND_IRQ_STAT);
+ } while (!(status & JZ_NAND_STATUS_ENC_FINISH) && --timeout);
+
+ if (timeout == 0)
+ return -ETIMEDOUT;
+
+ reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL);
+ reg &= ~JZ_NAND_ECC_CTRL_ENABLE;
+ writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL);
+
+ for (i = 0; i < params->bytes; ++i)
+ ecc_code[i] = readb(ecc->base + JZ_REG_NAND_PAR0 + i);
+
+ /*
+ * If the written data is completely 0xff, we also want to write 0xff as
+ * ECC, otherwise we will get in trouble when doing subpage writes.
+ */
+ if (memcmp(ecc_code, empty_block_ecc, sizeof(empty_block_ecc)) == 0)
+ memset(ecc_code, 0xff, sizeof(empty_block_ecc));
+
+ return 0;
+}
+
+static void jz_nand_correct_data(uint8_t *buf, int index, int mask)
+{
+ int offset = index & 0x7;
+ uint16_t data;
+
+ index += (index >> 3);
+
+ data = buf[index];
+ data |= buf[index + 1] << 8;
+
+ mask ^= (data >> offset) & 0x1ff;
+ data &= ~(0x1ff << offset);
+ data |= (mask << offset);
+
+ buf[index] = data & 0xff;
+ buf[index + 1] = (data >> 8) & 0xff;
+}
+
+static int jz4740_ecc_correct(struct ingenic_ecc *ecc,
+ struct ingenic_ecc_params *params,
+ u8 *buf, u8 *ecc_code)
+{
+ int i, error_count, index;
+ uint32_t reg, status, error;
+ unsigned int timeout = 1000;
+
+ jz4740_ecc_reset(ecc, false);
+
+ for (i = 0; i < params->bytes; ++i)
+ writeb(ecc_code[i], ecc->base + JZ_REG_NAND_PAR0 + i);
+
+ reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL);
+ reg |= JZ_NAND_ECC_CTRL_PAR_READY;
+ writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL);
+
+ do {
+ status = readl(ecc->base + JZ_REG_NAND_IRQ_STAT);
+ } while (!(status & JZ_NAND_STATUS_DEC_FINISH) && --timeout);
+
+ if (timeout == 0)
+ return -ETIMEDOUT;
+
+ reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL);
+ reg &= ~JZ_NAND_ECC_CTRL_ENABLE;
+ writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL);
+
+ if (status & JZ_NAND_STATUS_ERROR) {
+ if (status & JZ_NAND_STATUS_UNCOR_ERROR)
+ return -EBADMSG;
+
+ error_count = (status & JZ_NAND_STATUS_ERR_COUNT) >> 29;
+
+ for (i = 0; i < error_count; ++i) {
+ error = readl(ecc->base + JZ_REG_NAND_ERR(i));
+ index = ((error >> 16) & 0x1ff) - 1;
+ if (index >= 0 && index < params->size)
+ jz_nand_correct_data(buf, index, error & 0x1ff);
+ }
+
+ return error_count;
+ }
+
+ return 0;
+}
+
+static void jz4740_ecc_disable(struct ingenic_ecc *ecc)
+{
+ u32 reg;
+
+ writel(0, ecc->base + JZ_REG_NAND_IRQ_STAT);
+ reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL);
+ reg &= ~JZ_NAND_ECC_CTRL_ENABLE;
+ writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL);
+}
+
+static const struct ingenic_ecc_ops jz4740_ecc_ops = {
+ .disable = jz4740_ecc_disable,
+ .calculate = jz4740_ecc_calculate,
+ .correct = jz4740_ecc_correct,
+};
+
+static const struct of_device_id jz4740_ecc_dt_match[] = {
+ { .compatible = "ingenic,jz4740-ecc", .data = &jz4740_ecc_ops },
+ {},
+};
+MODULE_DEVICE_TABLE(of, jz4740_ecc_dt_match);
+
+static struct platform_driver jz4740_ecc_driver = {
+ .probe = ingenic_ecc_probe,
+ .driver = {
+ .name = "jz4740-ecc",
+ .of_match_table = jz4740_ecc_dt_match,
+ },
+};
+module_platform_driver(jz4740_ecc_driver);
+
+MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
+MODULE_DESCRIPTION("Ingenic JZ4740 ECC controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/ingenic/jz4780_bch.c b/drivers/mtd/nand/raw/ingenic/jz4780_bch.c
new file mode 100644
index 0000000000..12b5b0484f
--- /dev/null
+++ b/drivers/mtd/nand/raw/ingenic/jz4780_bch.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * JZ4780 BCH controller driver
+ *
+ * Copyright (c) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "ingenic_ecc.h"
+
+#define BCH_BHCR 0x0
+#define BCH_BHCCR 0x8
+#define BCH_BHCNT 0xc
+#define BCH_BHDR 0x10
+#define BCH_BHPAR0 0x14
+#define BCH_BHERR0 0x84
+#define BCH_BHINT 0x184
+#define BCH_BHINTES 0x188
+#define BCH_BHINTEC 0x18c
+#define BCH_BHINTE 0x190
+
+#define BCH_BHCR_BSEL_SHIFT 4
+#define BCH_BHCR_BSEL_MASK (0x7f << BCH_BHCR_BSEL_SHIFT)
+#define BCH_BHCR_ENCE BIT(2)
+#define BCH_BHCR_INIT BIT(1)
+#define BCH_BHCR_BCHE BIT(0)
+
+#define BCH_BHCNT_PARITYSIZE_SHIFT 16
+#define BCH_BHCNT_PARITYSIZE_MASK (0x7f << BCH_BHCNT_PARITYSIZE_SHIFT)
+#define BCH_BHCNT_BLOCKSIZE_SHIFT 0
+#define BCH_BHCNT_BLOCKSIZE_MASK (0x7ff << BCH_BHCNT_BLOCKSIZE_SHIFT)
+
+#define BCH_BHERR_MASK_SHIFT 16
+#define BCH_BHERR_MASK_MASK (0xffff << BCH_BHERR_MASK_SHIFT)
+#define BCH_BHERR_INDEX_SHIFT 0
+#define BCH_BHERR_INDEX_MASK (0x7ff << BCH_BHERR_INDEX_SHIFT)
+
+#define BCH_BHINT_ERRC_SHIFT 24
+#define BCH_BHINT_ERRC_MASK (0x7f << BCH_BHINT_ERRC_SHIFT)
+#define BCH_BHINT_TERRC_SHIFT 16
+#define BCH_BHINT_TERRC_MASK (0x7f << BCH_BHINT_TERRC_SHIFT)
+#define BCH_BHINT_DECF BIT(3)
+#define BCH_BHINT_ENCF BIT(2)
+#define BCH_BHINT_UNCOR BIT(1)
+#define BCH_BHINT_ERR BIT(0)
+
+#define BCH_CLK_RATE (200 * 1000 * 1000)
+
+/* Timeout for BCH calculation/correction. */
+#define BCH_TIMEOUT_US 100000
+
+static void jz4780_bch_reset(struct ingenic_ecc *bch,
+ struct ingenic_ecc_params *params, bool encode)
+{
+ u32 reg;
+
+ /* Clear interrupt status. */
+ writel(readl(bch->base + BCH_BHINT), bch->base + BCH_BHINT);
+
+ /* Set up BCH count register. */
+ reg = params->size << BCH_BHCNT_BLOCKSIZE_SHIFT;
+ reg |= params->bytes << BCH_BHCNT_PARITYSIZE_SHIFT;
+ writel(reg, bch->base + BCH_BHCNT);
+
+ /* Initialise and enable BCH. */
+ reg = BCH_BHCR_BCHE | BCH_BHCR_INIT;
+ reg |= params->strength << BCH_BHCR_BSEL_SHIFT;
+ if (encode)
+ reg |= BCH_BHCR_ENCE;
+ writel(reg, bch->base + BCH_BHCR);
+}
+
+static void jz4780_bch_disable(struct ingenic_ecc *bch)
+{
+ writel(readl(bch->base + BCH_BHINT), bch->base + BCH_BHINT);
+ writel(BCH_BHCR_BCHE, bch->base + BCH_BHCCR);
+}
+
+static void jz4780_bch_write_data(struct ingenic_ecc *bch, const void *buf,
+ size_t size)
+{
+ size_t size32 = size / sizeof(u32);
+ size_t size8 = size % sizeof(u32);
+ const u32 *src32;
+ const u8 *src8;
+
+ src32 = (const u32 *)buf;
+ while (size32--)
+ writel(*src32++, bch->base + BCH_BHDR);
+
+ src8 = (const u8 *)src32;
+ while (size8--)
+ writeb(*src8++, bch->base + BCH_BHDR);
+}
+
+static void jz4780_bch_read_parity(struct ingenic_ecc *bch, void *buf,
+ size_t size)
+{
+ size_t size32 = size / sizeof(u32);
+ size_t size8 = size % sizeof(u32);
+ u32 *dest32;
+ u8 *dest8;
+ u32 val, offset = 0;
+
+ dest32 = (u32 *)buf;
+ while (size32--) {
+ *dest32++ = readl(bch->base + BCH_BHPAR0 + offset);
+ offset += sizeof(u32);
+ }
+
+ dest8 = (u8 *)dest32;
+ val = readl(bch->base + BCH_BHPAR0 + offset);
+ switch (size8) {
+ case 3:
+ dest8[2] = (val >> 16) & 0xff;
+ fallthrough;
+ case 2:
+ dest8[1] = (val >> 8) & 0xff;
+ fallthrough;
+ case 1:
+ dest8[0] = val & 0xff;
+ break;
+ }
+}
+
+static bool jz4780_bch_wait_complete(struct ingenic_ecc *bch, unsigned int irq,
+ u32 *status)
+{
+ u32 reg;
+ int ret;
+
+ /*
+ * While we could use interrupts here and sleep until the operation
+ * completes, the controller works fairly quickly (usually a few
+ * microseconds) and so the overhead of sleeping until we get an
+ * interrupt quite noticeably decreases performance.
+ */
+ ret = readl_poll_timeout(bch->base + BCH_BHINT, reg,
+ (reg & irq) == irq, 0, BCH_TIMEOUT_US);
+ if (ret)
+ return false;
+
+ if (status)
+ *status = reg;
+
+ writel(reg, bch->base + BCH_BHINT);
+ return true;
+}
+
+static int jz4780_calculate(struct ingenic_ecc *bch,
+ struct ingenic_ecc_params *params,
+ const u8 *buf, u8 *ecc_code)
+{
+ int ret = 0;
+
+ mutex_lock(&bch->lock);
+
+ jz4780_bch_reset(bch, params, true);
+ jz4780_bch_write_data(bch, buf, params->size);
+
+ if (jz4780_bch_wait_complete(bch, BCH_BHINT_ENCF, NULL)) {
+ jz4780_bch_read_parity(bch, ecc_code, params->bytes);
+ } else {
+ dev_err(bch->dev, "timed out while calculating ECC\n");
+ ret = -ETIMEDOUT;
+ }
+
+ jz4780_bch_disable(bch);
+ mutex_unlock(&bch->lock);
+ return ret;
+}
+
+static int jz4780_correct(struct ingenic_ecc *bch,
+ struct ingenic_ecc_params *params,
+ u8 *buf, u8 *ecc_code)
+{
+ u32 reg, mask, index;
+ int i, ret, count;
+
+ mutex_lock(&bch->lock);
+
+ jz4780_bch_reset(bch, params, false);
+ jz4780_bch_write_data(bch, buf, params->size);
+ jz4780_bch_write_data(bch, ecc_code, params->bytes);
+
+ if (!jz4780_bch_wait_complete(bch, BCH_BHINT_DECF, &reg)) {
+ dev_err(bch->dev, "timed out while correcting data\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ if (reg & BCH_BHINT_UNCOR) {
+ dev_warn(bch->dev, "uncorrectable ECC error\n");
+ ret = -EBADMSG;
+ goto out;
+ }
+
+ /* Correct any detected errors. */
+ if (reg & BCH_BHINT_ERR) {
+ count = (reg & BCH_BHINT_ERRC_MASK) >> BCH_BHINT_ERRC_SHIFT;
+ ret = (reg & BCH_BHINT_TERRC_MASK) >> BCH_BHINT_TERRC_SHIFT;
+
+ for (i = 0; i < count; i++) {
+ reg = readl(bch->base + BCH_BHERR0 + (i * 4));
+ mask = (reg & BCH_BHERR_MASK_MASK) >>
+ BCH_BHERR_MASK_SHIFT;
+ index = (reg & BCH_BHERR_INDEX_MASK) >>
+ BCH_BHERR_INDEX_SHIFT;
+ buf[(index * 2) + 0] ^= mask;
+ buf[(index * 2) + 1] ^= mask >> 8;
+ }
+ } else {
+ ret = 0;
+ }
+
+out:
+ jz4780_bch_disable(bch);
+ mutex_unlock(&bch->lock);
+ return ret;
+}
+
+static int jz4780_bch_probe(struct platform_device *pdev)
+{
+ struct ingenic_ecc *bch;
+ int ret;
+
+ ret = ingenic_ecc_probe(pdev);
+ if (ret)
+ return ret;
+
+ bch = platform_get_drvdata(pdev);
+ clk_set_rate(bch->clk, BCH_CLK_RATE);
+
+ return 0;
+}
+
+static const struct ingenic_ecc_ops jz4780_bch_ops = {
+ .disable = jz4780_bch_disable,
+ .calculate = jz4780_calculate,
+ .correct = jz4780_correct,
+};
+
+static const struct of_device_id jz4780_bch_dt_match[] = {
+ { .compatible = "ingenic,jz4780-bch", .data = &jz4780_bch_ops },
+ {},
+};
+MODULE_DEVICE_TABLE(of, jz4780_bch_dt_match);
+
+static struct platform_driver jz4780_bch_driver = {
+ .probe = jz4780_bch_probe,
+ .driver = {
+ .name = "jz4780-bch",
+ .of_match_table = jz4780_bch_dt_match,
+ },
+};
+module_platform_driver(jz4780_bch_driver);
+
+MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
+MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>");
+MODULE_DESCRIPTION("Ingenic JZ4780 BCH error correction driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c
new file mode 100644
index 0000000000..f0ad2308f6
--- /dev/null
+++ b/drivers/mtd/nand/raw/intel-nand-controller.c
@@ -0,0 +1,742 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2020 Intel Corporation. */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand.h>
+
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/units.h>
+#include <asm/unaligned.h>
+
+#define EBU_CLC 0x000
+#define EBU_CLC_RST 0x00000000u
+
+#define EBU_ADDR_SEL(n) (0x020 + (n) * 4)
+/* 5 bits 26:22 included for comparison in the ADDR_SELx */
+#define EBU_ADDR_MASK(x) ((x) << 4)
+#define EBU_ADDR_SEL_REGEN 0x1
+
+#define EBU_BUSCON(n) (0x060 + (n) * 4)
+#define EBU_BUSCON_CMULT_V4 0x1
+#define EBU_BUSCON_RECOVC(n) ((n) << 2)
+#define EBU_BUSCON_HOLDC(n) ((n) << 4)
+#define EBU_BUSCON_WAITRDC(n) ((n) << 6)
+#define EBU_BUSCON_WAITWRC(n) ((n) << 8)
+#define EBU_BUSCON_BCGEN_CS 0x0
+#define EBU_BUSCON_SETUP_EN BIT(22)
+#define EBU_BUSCON_ALEC 0xC000
+
+#define EBU_CON 0x0B0
+#define EBU_CON_NANDM_EN BIT(0)
+#define EBU_CON_NANDM_DIS 0x0
+#define EBU_CON_CSMUX_E_EN BIT(1)
+#define EBU_CON_ALE_P_LOW BIT(2)
+#define EBU_CON_CLE_P_LOW BIT(3)
+#define EBU_CON_CS_P_LOW BIT(4)
+#define EBU_CON_SE_P_LOW BIT(5)
+#define EBU_CON_WP_P_LOW BIT(6)
+#define EBU_CON_PRE_P_LOW BIT(7)
+#define EBU_CON_IN_CS_S(n) ((n) << 8)
+#define EBU_CON_OUT_CS_S(n) ((n) << 10)
+#define EBU_CON_LAT_EN_CS_P ((0x3D) << 18)
+
+#define EBU_WAIT 0x0B4
+#define EBU_WAIT_RDBY BIT(0)
+#define EBU_WAIT_WR_C BIT(3)
+
+#define HSNAND_CTL1 0x110
+#define HSNAND_CTL1_ADDR_SHIFT 24
+
+#define HSNAND_CTL2 0x114
+#define HSNAND_CTL2_ADDR_SHIFT 8
+#define HSNAND_CTL2_CYC_N_V5 (0x2 << 16)
+
+#define HSNAND_INT_MSK_CTL 0x124
+#define HSNAND_INT_MSK_CTL_WR_C BIT(4)
+
+#define HSNAND_INT_STA 0x128
+#define HSNAND_INT_STA_WR_C BIT(4)
+
+#define HSNAND_CTL 0x130
+#define HSNAND_CTL_ENABLE_ECC BIT(0)
+#define HSNAND_CTL_GO BIT(2)
+#define HSNAND_CTL_CE_SEL_CS(n) BIT(3 + (n))
+#define HSNAND_CTL_RW_READ 0x0
+#define HSNAND_CTL_RW_WRITE BIT(10)
+#define HSNAND_CTL_ECC_OFF_V8TH BIT(11)
+#define HSNAND_CTL_CKFF_EN 0x0
+#define HSNAND_CTL_MSG_EN BIT(17)
+
+#define HSNAND_PARA0 0x13c
+#define HSNAND_PARA0_PAGE_V8192 0x3
+#define HSNAND_PARA0_PIB_V256 (0x3 << 4)
+#define HSNAND_PARA0_BYP_EN_NP 0x0
+#define HSNAND_PARA0_BYP_DEC_NP 0x0
+#define HSNAND_PARA0_TYPE_ONFI BIT(18)
+#define HSNAND_PARA0_ADEP_EN BIT(21)
+
+#define HSNAND_CMSG_0 0x150
+#define HSNAND_CMSG_1 0x154
+
+#define HSNAND_ALE_OFFS BIT(2)
+#define HSNAND_CLE_OFFS BIT(3)
+#define HSNAND_CS_OFFS BIT(4)
+
+#define HSNAND_ECC_OFFSET 0x008
+
+#define MAX_CS 2
+
+#define USEC_PER_SEC 1000000L
+
+struct ebu_nand_cs {
+ void __iomem *chipaddr;
+ u32 addr_sel;
+};
+
+struct ebu_nand_controller {
+ struct nand_controller controller;
+ struct nand_chip chip;
+ struct device *dev;
+ void __iomem *ebu;
+ void __iomem *hsnand;
+ struct dma_chan *dma_tx;
+ struct dma_chan *dma_rx;
+ struct completion dma_access_complete;
+ struct clk *clk;
+ u32 nd_para0;
+ u8 cs_num;
+ struct ebu_nand_cs cs[MAX_CS];
+};
+
+static inline struct ebu_nand_controller *nand_to_ebu(struct nand_chip *chip)
+{
+ return container_of(chip, struct ebu_nand_controller, chip);
+}
+
+static int ebu_nand_waitrdy(struct nand_chip *chip, int timeout_ms)
+{
+ struct ebu_nand_controller *ctrl = nand_to_ebu(chip);
+ u32 status;
+
+ return readl_poll_timeout(ctrl->ebu + EBU_WAIT, status,
+ (status & EBU_WAIT_RDBY) ||
+ (status & EBU_WAIT_WR_C), 20, timeout_ms);
+}
+
+static u8 ebu_nand_readb(struct nand_chip *chip)
+{
+ struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
+ u8 cs_num = ebu_host->cs_num;
+ u8 val;
+
+ val = readb(ebu_host->cs[cs_num].chipaddr + HSNAND_CS_OFFS);
+ ebu_nand_waitrdy(chip, 1000);
+ return val;
+}
+
+static void ebu_nand_writeb(struct nand_chip *chip, u32 offset, u8 value)
+{
+ struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
+ u8 cs_num = ebu_host->cs_num;
+
+ writeb(value, ebu_host->cs[cs_num].chipaddr + offset);
+ ebu_nand_waitrdy(chip, 1000);
+}
+
+static void ebu_read_buf(struct nand_chip *chip, u_char *buf, unsigned int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = ebu_nand_readb(chip);
+}
+
+static void ebu_write_buf(struct nand_chip *chip, const u_char *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ ebu_nand_writeb(chip, HSNAND_CS_OFFS, buf[i]);
+}
+
+static void ebu_nand_disable(struct nand_chip *chip)
+{
+ struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
+
+ writel(0, ebu_host->ebu + EBU_CON);
+}
+
+static void ebu_select_chip(struct nand_chip *chip)
+{
+ struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
+ void __iomem *nand_con = ebu_host->ebu + EBU_CON;
+ u32 cs = ebu_host->cs_num;
+
+ writel(EBU_CON_NANDM_EN | EBU_CON_CSMUX_E_EN | EBU_CON_CS_P_LOW |
+ EBU_CON_SE_P_LOW | EBU_CON_WP_P_LOW | EBU_CON_PRE_P_LOW |
+ EBU_CON_IN_CS_S(cs) | EBU_CON_OUT_CS_S(cs) |
+ EBU_CON_LAT_EN_CS_P, nand_con);
+}
+
+static int ebu_nand_set_timings(struct nand_chip *chip, int csline,
+ const struct nand_interface_config *conf)
+{
+ struct ebu_nand_controller *ctrl = nand_to_ebu(chip);
+ unsigned int rate = clk_get_rate(ctrl->clk) / HZ_PER_MHZ;
+ unsigned int period = DIV_ROUND_UP(USEC_PER_SEC, rate);
+ const struct nand_sdr_timings *timings;
+ u32 trecov, thold, twrwait, trdwait;
+ u32 reg = 0;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return PTR_ERR(timings);
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ trecov = DIV_ROUND_UP(max(timings->tREA_max, timings->tREH_min),
+ period);
+ reg |= EBU_BUSCON_RECOVC(trecov);
+
+ thold = DIV_ROUND_UP(max(timings->tDH_min, timings->tDS_min), period);
+ reg |= EBU_BUSCON_HOLDC(thold);
+
+ trdwait = DIV_ROUND_UP(max(timings->tRC_min, timings->tREH_min),
+ period);
+ reg |= EBU_BUSCON_WAITRDC(trdwait);
+
+ twrwait = DIV_ROUND_UP(max(timings->tWC_min, timings->tWH_min), period);
+ reg |= EBU_BUSCON_WAITWRC(twrwait);
+
+ reg |= EBU_BUSCON_CMULT_V4 | EBU_BUSCON_BCGEN_CS | EBU_BUSCON_ALEC |
+ EBU_BUSCON_SETUP_EN;
+
+ writel(reg, ctrl->ebu + EBU_BUSCON(ctrl->cs_num));
+
+ return 0;
+}
+
+static int ebu_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = HSNAND_ECC_OFFSET;
+ oobregion->length = chip->ecc.total;
+
+ return 0;
+}
+
+static int ebu_nand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = chip->ecc.total + HSNAND_ECC_OFFSET;
+ oobregion->length = mtd->oobsize - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops ebu_nand_ooblayout_ops = {
+ .ecc = ebu_nand_ooblayout_ecc,
+ .free = ebu_nand_ooblayout_free,
+};
+
+static void ebu_dma_rx_callback(void *cookie)
+{
+ struct ebu_nand_controller *ebu_host = cookie;
+
+ dmaengine_terminate_async(ebu_host->dma_rx);
+
+ complete(&ebu_host->dma_access_complete);
+}
+
+static void ebu_dma_tx_callback(void *cookie)
+{
+ struct ebu_nand_controller *ebu_host = cookie;
+
+ dmaengine_terminate_async(ebu_host->dma_tx);
+
+ complete(&ebu_host->dma_access_complete);
+}
+
+static int ebu_dma_start(struct ebu_nand_controller *ebu_host, u32 dir,
+ const u8 *buf, u32 len)
+{
+ struct dma_async_tx_descriptor *tx;
+ struct completion *dma_completion;
+ dma_async_tx_callback callback;
+ struct dma_chan *chan;
+ dma_cookie_t cookie;
+ unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ dma_addr_t buf_dma;
+ int ret;
+ u32 timeout;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ chan = ebu_host->dma_rx;
+ dma_completion = &ebu_host->dma_access_complete;
+ callback = ebu_dma_rx_callback;
+ } else {
+ chan = ebu_host->dma_tx;
+ dma_completion = &ebu_host->dma_access_complete;
+ callback = ebu_dma_tx_callback;
+ }
+
+ buf_dma = dma_map_single(chan->device->dev, (void *)buf, len, dir);
+ if (dma_mapping_error(chan->device->dev, buf_dma)) {
+ dev_err(ebu_host->dev, "Failed to map DMA buffer\n");
+ ret = -EIO;
+ goto err_unmap;
+ }
+
+ tx = dmaengine_prep_slave_single(chan, buf_dma, len, dir, flags);
+ if (!tx) {
+ ret = -ENXIO;
+ goto err_unmap;
+ }
+
+ tx->callback = callback;
+ tx->callback_param = ebu_host;
+ cookie = tx->tx_submit(tx);
+
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(ebu_host->dev, "dma_submit_error %d\n", cookie);
+ ret = -EIO;
+ goto err_unmap;
+ }
+
+ init_completion(dma_completion);
+ dma_async_issue_pending(chan);
+
+ /* Wait DMA to finish the data transfer.*/
+ timeout = wait_for_completion_timeout(dma_completion, msecs_to_jiffies(1000));
+ if (!timeout) {
+ dev_err(ebu_host->dev, "I/O Error in DMA RX (status %d)\n",
+ dmaengine_tx_status(chan, cookie, NULL));
+ dmaengine_terminate_sync(chan);
+ ret = -ETIMEDOUT;
+ goto err_unmap;
+ }
+
+ return 0;
+
+err_unmap:
+ dma_unmap_single(ebu_host->dev, buf_dma, len, dir);
+
+ return ret;
+}
+
+static void ebu_nand_trigger(struct ebu_nand_controller *ebu_host,
+ int page, u32 cmd)
+{
+ unsigned int val;
+
+ val = cmd | (page & 0xFF) << HSNAND_CTL1_ADDR_SHIFT;
+ writel(val, ebu_host->hsnand + HSNAND_CTL1);
+ val = (page & 0xFFFF00) >> 8 | HSNAND_CTL2_CYC_N_V5;
+ writel(val, ebu_host->hsnand + HSNAND_CTL2);
+
+ writel(ebu_host->nd_para0, ebu_host->hsnand + HSNAND_PARA0);
+
+ /* clear first, will update later */
+ writel(0xFFFFFFFF, ebu_host->hsnand + HSNAND_CMSG_0);
+ writel(0xFFFFFFFF, ebu_host->hsnand + HSNAND_CMSG_1);
+
+ writel(HSNAND_INT_MSK_CTL_WR_C,
+ ebu_host->hsnand + HSNAND_INT_MSK_CTL);
+
+ if (!cmd)
+ val = HSNAND_CTL_RW_READ;
+ else
+ val = HSNAND_CTL_RW_WRITE;
+
+ writel(HSNAND_CTL_MSG_EN | HSNAND_CTL_CKFF_EN |
+ HSNAND_CTL_ECC_OFF_V8TH | HSNAND_CTL_CE_SEL_CS(ebu_host->cs_num) |
+ HSNAND_CTL_ENABLE_ECC | HSNAND_CTL_GO | val,
+ ebu_host->hsnand + HSNAND_CTL);
+}
+
+static int ebu_nand_read_page_hwecc(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
+ int ret, reg_data;
+
+ ebu_nand_trigger(ebu_host, page, NAND_CMD_READ0);
+
+ ret = ebu_dma_start(ebu_host, DMA_DEV_TO_MEM, buf, mtd->writesize);
+ if (ret)
+ return ret;
+
+ if (oob_required)
+ chip->ecc.read_oob(chip, page);
+
+ reg_data = readl(ebu_host->hsnand + HSNAND_CTL);
+ reg_data &= ~HSNAND_CTL_GO;
+ writel(reg_data, ebu_host->hsnand + HSNAND_CTL);
+
+ return 0;
+}
+
+static int ebu_nand_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
+ void __iomem *int_sta = ebu_host->hsnand + HSNAND_INT_STA;
+ int reg_data, ret, val;
+ u32 reg;
+
+ ebu_nand_trigger(ebu_host, page, NAND_CMD_SEQIN);
+
+ ret = ebu_dma_start(ebu_host, DMA_MEM_TO_DEV, buf, mtd->writesize);
+ if (ret)
+ return ret;
+
+ if (oob_required) {
+ reg = get_unaligned_le32(chip->oob_poi);
+ writel(reg, ebu_host->hsnand + HSNAND_CMSG_0);
+
+ reg = get_unaligned_le32(chip->oob_poi + 4);
+ writel(reg, ebu_host->hsnand + HSNAND_CMSG_1);
+ }
+
+ ret = readl_poll_timeout_atomic(int_sta, val, !(val & HSNAND_INT_STA_WR_C),
+ 10, 1000);
+ if (ret)
+ return ret;
+
+ reg_data = readl(ebu_host->hsnand + HSNAND_CTL);
+ reg_data &= ~HSNAND_CTL_GO;
+ writel(reg_data, ebu_host->hsnand + HSNAND_CTL);
+
+ return 0;
+}
+
+static const u8 ecc_strength[] = { 1, 1, 4, 8, 24, 32, 40, 60, };
+
+static int ebu_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
+ u32 ecc_steps, ecc_bytes, ecc_total, pagesize, pg_per_blk;
+ u32 ecc_strength_ds = chip->ecc.strength;
+ u32 ecc_size = chip->ecc.size;
+ u32 writesize = mtd->writesize;
+ u32 blocksize = mtd->erasesize;
+ int bch_algo, start, val;
+
+ /* Default to an ECC size of 512 */
+ if (!chip->ecc.size)
+ chip->ecc.size = 512;
+
+ switch (ecc_size) {
+ case 512:
+ start = 1;
+ if (!ecc_strength_ds)
+ ecc_strength_ds = 4;
+ break;
+ case 1024:
+ start = 4;
+ if (!ecc_strength_ds)
+ ecc_strength_ds = 32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* BCH ECC algorithm Settings for number of bits per 512B/1024B */
+ bch_algo = round_up(start + 1, 4);
+ for (val = start; val < bch_algo; val++) {
+ if (ecc_strength_ds == ecc_strength[val])
+ break;
+ }
+ if (val == bch_algo)
+ return -EINVAL;
+
+ if (ecc_strength_ds == 8)
+ ecc_bytes = 14;
+ else
+ ecc_bytes = DIV_ROUND_UP(ecc_strength_ds * fls(8 * ecc_size), 8);
+
+ ecc_steps = writesize / ecc_size;
+ ecc_total = ecc_steps * ecc_bytes;
+ if ((ecc_total + 8) > mtd->oobsize)
+ return -ERANGE;
+
+ chip->ecc.total = ecc_total;
+ pagesize = fls(writesize >> 11);
+ if (pagesize > HSNAND_PARA0_PAGE_V8192)
+ return -ERANGE;
+
+ pg_per_blk = fls((blocksize / writesize) >> 6) / 8;
+ if (pg_per_blk > HSNAND_PARA0_PIB_V256)
+ return -ERANGE;
+
+ ebu_host->nd_para0 = pagesize | pg_per_blk | HSNAND_PARA0_BYP_EN_NP |
+ HSNAND_PARA0_BYP_DEC_NP | HSNAND_PARA0_ADEP_EN |
+ HSNAND_PARA0_TYPE_ONFI | (val << 29);
+
+ mtd_set_ooblayout(mtd, &ebu_nand_ooblayout_ops);
+ chip->ecc.read_page = ebu_nand_read_page_hwecc;
+ chip->ecc.write_page = ebu_nand_write_page_hwecc;
+
+ return 0;
+}
+
+static int ebu_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op, bool check_only)
+{
+ const struct nand_op_instr *instr = NULL;
+ unsigned int op_id;
+ int i, timeout_ms, ret = 0;
+
+ if (check_only)
+ return 0;
+
+ ebu_select_chip(chip);
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ ebu_nand_writeb(chip, HSNAND_CLE_OFFS | HSNAND_CS_OFFS,
+ instr->ctx.cmd.opcode);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++)
+ ebu_nand_writeb(chip,
+ HSNAND_ALE_OFFS | HSNAND_CS_OFFS,
+ instr->ctx.addr.addrs[i]);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ ebu_read_buf(chip, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ ebu_write_buf(chip, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ timeout_ms = instr->ctx.waitrdy.timeout_ms * 1000;
+ ret = ebu_nand_waitrdy(chip, timeout_ms);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static const struct nand_controller_ops ebu_nand_controller_ops = {
+ .attach_chip = ebu_nand_attach_chip,
+ .setup_interface = ebu_nand_set_timings,
+ .exec_op = ebu_nand_exec_op,
+};
+
+static void ebu_dma_cleanup(struct ebu_nand_controller *ebu_host)
+{
+ if (ebu_host->dma_rx)
+ dma_release_channel(ebu_host->dma_rx);
+
+ if (ebu_host->dma_tx)
+ dma_release_channel(ebu_host->dma_tx);
+}
+
+static int ebu_nand_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ebu_nand_controller *ebu_host;
+ struct device_node *chip_np;
+ struct nand_chip *nand;
+ struct mtd_info *mtd;
+ struct resource *res;
+ char *resname;
+ int ret;
+ u32 cs;
+
+ ebu_host = devm_kzalloc(dev, sizeof(*ebu_host), GFP_KERNEL);
+ if (!ebu_host)
+ return -ENOMEM;
+
+ ebu_host->dev = dev;
+ nand_controller_init(&ebu_host->controller);
+
+ ebu_host->ebu = devm_platform_ioremap_resource_byname(pdev, "ebunand");
+ if (IS_ERR(ebu_host->ebu))
+ return PTR_ERR(ebu_host->ebu);
+
+ ebu_host->hsnand = devm_platform_ioremap_resource_byname(pdev, "hsnand");
+ if (IS_ERR(ebu_host->hsnand))
+ return PTR_ERR(ebu_host->hsnand);
+
+ chip_np = of_get_next_child(dev->of_node, NULL);
+ if (!chip_np)
+ return dev_err_probe(dev, -EINVAL,
+ "Could not find child node for the NAND chip\n");
+
+ ret = of_property_read_u32(chip_np, "reg", &cs);
+ if (ret) {
+ dev_err(dev, "failed to get chip select: %d\n", ret);
+ goto err_of_node_put;
+ }
+ if (cs >= MAX_CS) {
+ dev_err(dev, "got invalid chip select: %d\n", cs);
+ ret = -EINVAL;
+ goto err_of_node_put;
+ }
+
+ ebu_host->cs_num = cs;
+
+ resname = devm_kasprintf(dev, GFP_KERNEL, "nand_cs%d", cs);
+ if (!resname) {
+ ret = -ENOMEM;
+ goto err_of_node_put;
+ }
+
+ ebu_host->cs[cs].chipaddr = devm_platform_ioremap_resource_byname(pdev,
+ resname);
+ if (IS_ERR(ebu_host->cs[cs].chipaddr)) {
+ ret = PTR_ERR(ebu_host->cs[cs].chipaddr);
+ goto err_of_node_put;
+ }
+
+ ebu_host->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(ebu_host->clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(ebu_host->clk),
+ "failed to get and enable clock\n");
+ goto err_of_node_put;
+ }
+
+ ebu_host->dma_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(ebu_host->dma_tx)) {
+ ret = dev_err_probe(dev, PTR_ERR(ebu_host->dma_tx),
+ "failed to request DMA tx chan!.\n");
+ goto err_of_node_put;
+ }
+
+ ebu_host->dma_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(ebu_host->dma_rx)) {
+ ret = dev_err_probe(dev, PTR_ERR(ebu_host->dma_rx),
+ "failed to request DMA rx chan!.\n");
+ ebu_host->dma_rx = NULL;
+ goto err_cleanup_dma;
+ }
+
+ resname = devm_kasprintf(dev, GFP_KERNEL, "addr_sel%d", cs);
+ if (!resname) {
+ ret = -ENOMEM;
+ goto err_cleanup_dma;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname);
+ if (!res) {
+ ret = -EINVAL;
+ goto err_cleanup_dma;
+ }
+ ebu_host->cs[cs].addr_sel = res->start;
+ writel(ebu_host->cs[cs].addr_sel | EBU_ADDR_MASK(5) | EBU_ADDR_SEL_REGEN,
+ ebu_host->ebu + EBU_ADDR_SEL(cs));
+
+ nand_set_flash_node(&ebu_host->chip, chip_np);
+
+ mtd = nand_to_mtd(&ebu_host->chip);
+ if (!mtd->name) {
+ dev_err(ebu_host->dev, "NAND label property is mandatory\n");
+ ret = -EINVAL;
+ goto err_cleanup_dma;
+ }
+
+ mtd->dev.parent = dev;
+ ebu_host->dev = dev;
+
+ platform_set_drvdata(pdev, ebu_host);
+ nand_set_controller_data(&ebu_host->chip, ebu_host);
+
+ nand = &ebu_host->chip;
+ nand->controller = &ebu_host->controller;
+ nand->controller->ops = &ebu_nand_controller_ops;
+
+ /* Scan to find existence of the device */
+ ret = nand_scan(&ebu_host->chip, 1);
+ if (ret)
+ goto err_cleanup_dma;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ goto err_clean_nand;
+
+ return 0;
+
+err_clean_nand:
+ nand_cleanup(&ebu_host->chip);
+err_cleanup_dma:
+ ebu_dma_cleanup(ebu_host);
+err_of_node_put:
+ of_node_put(chip_np);
+
+ return ret;
+}
+
+static void ebu_nand_remove(struct platform_device *pdev)
+{
+ struct ebu_nand_controller *ebu_host = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(&ebu_host->chip));
+ WARN_ON(ret);
+ nand_cleanup(&ebu_host->chip);
+ ebu_nand_disable(&ebu_host->chip);
+ ebu_dma_cleanup(ebu_host);
+}
+
+static const struct of_device_id ebu_nand_match[] = {
+ { .compatible = "intel,lgm-ebunand" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ebu_nand_match);
+
+static struct platform_driver ebu_nand_driver = {
+ .probe = ebu_nand_probe,
+ .remove_new = ebu_nand_remove,
+ .driver = {
+ .name = "intel-nand-controller",
+ .of_match_table = ebu_nand_match,
+ },
+
+};
+module_platform_driver(ebu_nand_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Vadivel Murugan R <vadivel.muruganx.ramuthevar@intel.com>");
+MODULE_DESCRIPTION("Intel's LGM External Bus NAND Controller driver");
diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h
new file mode 100644
index 0000000000..e9932da18b
--- /dev/null
+++ b/drivers/mtd/nand/raw/internals.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 - Bootlin
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ *
+ * Header containing internal definitions to be used only by core files.
+ * NAND controller drivers should not include this file.
+ */
+
+#ifndef __LINUX_RAWNAND_INTERNALS
+#define __LINUX_RAWNAND_INTERNALS
+
+#include <linux/mtd/rawnand.h>
+
+/*
+ * NAND Flash Manufacturer ID Codes
+ */
+#define NAND_MFR_AMD 0x01
+#define NAND_MFR_ATO 0x9b
+#define NAND_MFR_EON 0x92
+#define NAND_MFR_ESMT 0xc8
+#define NAND_MFR_FUJITSU 0x04
+#define NAND_MFR_HYNIX 0xad
+#define NAND_MFR_INTEL 0x89
+#define NAND_MFR_MACRONIX 0xc2
+#define NAND_MFR_MICRON 0x2c
+#define NAND_MFR_NATIONAL 0x8f
+#define NAND_MFR_RENESAS 0x07
+#define NAND_MFR_SAMSUNG 0xec
+#define NAND_MFR_SANDISK 0x45
+#define NAND_MFR_STMICRO 0x20
+/* Kioxia is new name of Toshiba memory. */
+#define NAND_MFR_TOSHIBA 0x98
+#define NAND_MFR_WINBOND 0xef
+
+/**
+ * struct nand_manufacturer_ops - NAND Manufacturer operations
+ * @detect: detect the NAND memory organization and capabilities
+ * @init: initialize all vendor specific fields (like the ->read_retry()
+ * implementation) if any.
+ * @cleanup: the ->init() function may have allocated resources, ->cleanup()
+ * is here to let vendor specific code release those resources.
+ * @fixup_onfi_param_page: apply vendor specific fixups to the ONFI parameter
+ * page. This is called after the checksum is verified.
+ */
+struct nand_manufacturer_ops {
+ void (*detect)(struct nand_chip *chip);
+ int (*init)(struct nand_chip *chip);
+ void (*cleanup)(struct nand_chip *chip);
+ void (*fixup_onfi_param_page)(struct nand_chip *chip,
+ struct nand_onfi_params *p);
+};
+
+/**
+ * struct nand_manufacturer_desc - NAND Flash Manufacturer descriptor
+ * @name: Manufacturer name
+ * @id: manufacturer ID code of device.
+ * @ops: manufacturer operations
+ */
+struct nand_manufacturer_desc {
+ int id;
+ char *name;
+ const struct nand_manufacturer_ops *ops;
+};
+
+
+extern struct nand_flash_dev nand_flash_ids[];
+
+extern const struct nand_manufacturer_ops amd_nand_manuf_ops;
+extern const struct nand_manufacturer_ops esmt_nand_manuf_ops;
+extern const struct nand_manufacturer_ops hynix_nand_manuf_ops;
+extern const struct nand_manufacturer_ops macronix_nand_manuf_ops;
+extern const struct nand_manufacturer_ops micron_nand_manuf_ops;
+extern const struct nand_manufacturer_ops samsung_nand_manuf_ops;
+extern const struct nand_manufacturer_ops sandisk_nand_manuf_ops;
+extern const struct nand_manufacturer_ops toshiba_nand_manuf_ops;
+
+/* MLC pairing schemes */
+extern const struct mtd_pairing_scheme dist3_pairing_scheme;
+
+/* Core functions */
+const struct nand_manufacturer_desc *nand_get_manufacturer_desc(u8 id);
+int nand_bbm_get_next_page(struct nand_chip *chip, int page);
+int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs);
+int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
+ int allowbbt);
+void onfi_fill_interface_config(struct nand_chip *chip,
+ struct nand_interface_config *iface,
+ enum nand_interface_type type,
+ unsigned int timing_mode);
+unsigned int
+onfi_find_closest_sdr_mode(const struct nand_sdr_timings *spec_timings);
+unsigned int
+onfi_find_closest_nvddr_mode(const struct nand_nvddr_timings *spec_timings);
+int nand_choose_best_sdr_timings(struct nand_chip *chip,
+ struct nand_interface_config *iface,
+ struct nand_sdr_timings *spec_timings);
+int nand_choose_best_nvddr_timings(struct nand_chip *chip,
+ struct nand_interface_config *iface,
+ struct nand_nvddr_timings *spec_timings);
+const struct nand_interface_config *nand_get_reset_interface_config(void);
+int nand_get_features(struct nand_chip *chip, int addr, u8 *subfeature_param);
+int nand_set_features(struct nand_chip *chip, int addr, u8 *subfeature_param);
+int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page);
+int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page);
+int nand_exit_status_op(struct nand_chip *chip);
+int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
+ unsigned int len);
+void nand_decode_ext_id(struct nand_chip *chip);
+void panic_nand_wait(struct nand_chip *chip, unsigned long timeo);
+void sanitize_string(uint8_t *s, size_t len);
+
+static inline bool nand_has_exec_op(struct nand_chip *chip)
+{
+ if (!chip->controller || !chip->controller->ops ||
+ !chip->controller->ops->exec_op)
+ return false;
+
+ return true;
+}
+
+static inline int nand_check_op(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ if (!nand_has_exec_op(chip))
+ return 0;
+
+ return chip->controller->ops->exec_op(chip, op, true);
+}
+
+static inline int nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ if (!nand_has_exec_op(chip))
+ return -ENOTSUPP;
+
+ if (WARN_ON(op->cs >= nanddev_ntargets(&chip->base)))
+ return -EINVAL;
+
+ return chip->controller->ops->exec_op(chip, op, false);
+}
+
+static inline bool nand_controller_can_setup_interface(struct nand_chip *chip)
+{
+ if (!chip->controller || !chip->controller->ops ||
+ !chip->controller->ops->setup_interface)
+ return false;
+
+ if (chip->options & NAND_KEEP_TIMINGS)
+ return false;
+
+ return true;
+}
+
+/* BBT functions */
+int nand_markbad_bbt(struct nand_chip *chip, loff_t offs);
+int nand_isreserved_bbt(struct nand_chip *chip, loff_t offs);
+int nand_isbad_bbt(struct nand_chip *chip, loff_t offs, int allowbbt);
+
+/* Legacy */
+void nand_legacy_set_defaults(struct nand_chip *chip);
+void nand_legacy_adjust_cmdfunc(struct nand_chip *chip);
+int nand_legacy_check_hooks(struct nand_chip *chip);
+
+/* ONFI functions */
+u16 onfi_crc16(u16 crc, u8 const *p, size_t len);
+int nand_onfi_detect(struct nand_chip *chip);
+
+/* JEDEC functions */
+int nand_jedec_detect(struct nand_chip *chip);
+
+#endif /* __LINUX_RAWNAND_INTERNALS */
diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c
new file mode 100644
index 0000000000..488fd45261
--- /dev/null
+++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c
@@ -0,0 +1,902 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for NAND MLC Controller in LPC32xx
+ *
+ * Author: Roland Stigge <stigge@antcom.de>
+ *
+ * Copyright © 2011 WORK Microwave GmbH
+ * Copyright © 2011, 2012 Roland Stigge
+ *
+ * NAND Flash Controller Operation:
+ * - Read: Auto Decode
+ * - Write: Auto Encode
+ * - Tested Page Sizes: 2048, 4096
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mtd/lpc32xx_mlc.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+
+#define DRV_NAME "lpc32xx_mlc"
+
+/**********************************************************************
+* MLC NAND controller register offsets
+**********************************************************************/
+
+#define MLC_BUFF(x) (x + 0x00000)
+#define MLC_DATA(x) (x + 0x08000)
+#define MLC_CMD(x) (x + 0x10000)
+#define MLC_ADDR(x) (x + 0x10004)
+#define MLC_ECC_ENC_REG(x) (x + 0x10008)
+#define MLC_ECC_DEC_REG(x) (x + 0x1000C)
+#define MLC_ECC_AUTO_ENC_REG(x) (x + 0x10010)
+#define MLC_ECC_AUTO_DEC_REG(x) (x + 0x10014)
+#define MLC_RPR(x) (x + 0x10018)
+#define MLC_WPR(x) (x + 0x1001C)
+#define MLC_RUBP(x) (x + 0x10020)
+#define MLC_ROBP(x) (x + 0x10024)
+#define MLC_SW_WP_ADD_LOW(x) (x + 0x10028)
+#define MLC_SW_WP_ADD_HIG(x) (x + 0x1002C)
+#define MLC_ICR(x) (x + 0x10030)
+#define MLC_TIME_REG(x) (x + 0x10034)
+#define MLC_IRQ_MR(x) (x + 0x10038)
+#define MLC_IRQ_SR(x) (x + 0x1003C)
+#define MLC_LOCK_PR(x) (x + 0x10044)
+#define MLC_ISR(x) (x + 0x10048)
+#define MLC_CEH(x) (x + 0x1004C)
+
+/**********************************************************************
+* MLC_CMD bit definitions
+**********************************************************************/
+#define MLCCMD_RESET 0xFF
+
+/**********************************************************************
+* MLC_ICR bit definitions
+**********************************************************************/
+#define MLCICR_WPROT (1 << 3)
+#define MLCICR_LARGEBLOCK (1 << 2)
+#define MLCICR_LONGADDR (1 << 1)
+#define MLCICR_16BIT (1 << 0) /* unsupported by LPC32x0! */
+
+/**********************************************************************
+* MLC_TIME_REG bit definitions
+**********************************************************************/
+#define MLCTIMEREG_TCEA_DELAY(n) (((n) & 0x03) << 24)
+#define MLCTIMEREG_BUSY_DELAY(n) (((n) & 0x1F) << 19)
+#define MLCTIMEREG_NAND_TA(n) (((n) & 0x07) << 16)
+#define MLCTIMEREG_RD_HIGH(n) (((n) & 0x0F) << 12)
+#define MLCTIMEREG_RD_LOW(n) (((n) & 0x0F) << 8)
+#define MLCTIMEREG_WR_HIGH(n) (((n) & 0x0F) << 4)
+#define MLCTIMEREG_WR_LOW(n) (((n) & 0x0F) << 0)
+
+/**********************************************************************
+* MLC_IRQ_MR and MLC_IRQ_SR bit definitions
+**********************************************************************/
+#define MLCIRQ_NAND_READY (1 << 5)
+#define MLCIRQ_CONTROLLER_READY (1 << 4)
+#define MLCIRQ_DECODE_FAILURE (1 << 3)
+#define MLCIRQ_DECODE_ERROR (1 << 2)
+#define MLCIRQ_ECC_READY (1 << 1)
+#define MLCIRQ_WRPROT_FAULT (1 << 0)
+
+/**********************************************************************
+* MLC_LOCK_PR bit definitions
+**********************************************************************/
+#define MLCLOCKPR_MAGIC 0xA25E
+
+/**********************************************************************
+* MLC_ISR bit definitions
+**********************************************************************/
+#define MLCISR_DECODER_FAILURE (1 << 6)
+#define MLCISR_ERRORS ((1 << 4) | (1 << 5))
+#define MLCISR_ERRORS_DETECTED (1 << 3)
+#define MLCISR_ECC_READY (1 << 2)
+#define MLCISR_CONTROLLER_READY (1 << 1)
+#define MLCISR_NAND_READY (1 << 0)
+
+/**********************************************************************
+* MLC_CEH bit definitions
+**********************************************************************/
+#define MLCCEH_NORMAL (1 << 0)
+
+struct lpc32xx_nand_cfg_mlc {
+ uint32_t tcea_delay;
+ uint32_t busy_delay;
+ uint32_t nand_ta;
+ uint32_t rd_high;
+ uint32_t rd_low;
+ uint32_t wr_high;
+ uint32_t wr_low;
+ struct mtd_partition *parts;
+ unsigned num_parts;
+};
+
+static int lpc32xx_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+
+ if (section >= nand_chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = ((section + 1) * 16) - nand_chip->ecc.bytes;
+ oobregion->length = nand_chip->ecc.bytes;
+
+ return 0;
+}
+
+static int lpc32xx_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+
+ if (section >= nand_chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = 16 * section;
+ oobregion->length = 16 - nand_chip->ecc.bytes;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops = {
+ .ecc = lpc32xx_ooblayout_ecc,
+ .free = lpc32xx_ooblayout_free,
+};
+
+static struct nand_bbt_descr lpc32xx_nand_bbt = {
+ .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
+ NAND_BBT_WRITE,
+ .pages = { 524224, 0, 0, 0, 0, 0, 0, 0 },
+};
+
+static struct nand_bbt_descr lpc32xx_nand_bbt_mirror = {
+ .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
+ NAND_BBT_WRITE,
+ .pages = { 524160, 0, 0, 0, 0, 0, 0, 0 },
+};
+
+struct lpc32xx_nand_host {
+ struct platform_device *pdev;
+ struct nand_chip nand_chip;
+ struct lpc32xx_mlc_platform_data *pdata;
+ struct clk *clk;
+ struct gpio_desc *wp_gpio;
+ void __iomem *io_base;
+ int irq;
+ struct lpc32xx_nand_cfg_mlc *ncfg;
+ struct completion comp_nand;
+ struct completion comp_controller;
+ uint32_t llptr;
+ /*
+ * Physical addresses of ECC buffer, DMA data buffers, OOB data buffer
+ */
+ dma_addr_t oob_buf_phy;
+ /*
+ * Virtual addresses of ECC buffer, DMA data buffers, OOB data buffer
+ */
+ uint8_t *oob_buf;
+ /* Physical address of DMA base address */
+ dma_addr_t io_base_phy;
+
+ struct completion comp_dma;
+ struct dma_chan *dma_chan;
+ struct dma_slave_config dma_slave_config;
+ struct scatterlist sgl;
+ uint8_t *dma_buf;
+ uint8_t *dummy_buf;
+ int mlcsubpages; /* number of 512bytes-subpages */
+};
+
+/*
+ * Activate/Deactivate DMA Operation:
+ *
+ * Using the PL080 DMA Controller for transferring the 512 byte subpages
+ * instead of doing readl() / writel() in a loop slows it down significantly.
+ * Measurements via getnstimeofday() upon 512 byte subpage reads reveal:
+ *
+ * - readl() of 128 x 32 bits in a loop: ~20us
+ * - DMA read of 512 bytes (32 bit, 4...128 words bursts): ~60us
+ * - DMA read of 512 bytes (32 bit, no bursts): ~100us
+ *
+ * This applies to the transfer itself. In the DMA case: only the
+ * wait_for_completion() (DMA setup _not_ included).
+ *
+ * Note that the 512 bytes subpage transfer is done directly from/to a
+ * FIFO/buffer inside the NAND controller. Most of the time (~400-800us for a
+ * 2048 bytes page) is spent waiting for the NAND IRQ, anyway. (The NAND
+ * controller transferring data between its internal buffer to/from the NAND
+ * chip.)
+ *
+ * Therefore, using the PL080 DMA is disabled by default, for now.
+ *
+ */
+static int use_dma;
+
+static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
+{
+ uint32_t clkrate, tmp;
+
+ /* Reset MLC controller */
+ writel(MLCCMD_RESET, MLC_CMD(host->io_base));
+ udelay(1000);
+
+ /* Get base clock for MLC block */
+ clkrate = clk_get_rate(host->clk);
+ if (clkrate == 0)
+ clkrate = 104000000;
+
+ /* Unlock MLC_ICR
+ * (among others, will be locked again automatically) */
+ writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
+
+ /* Configure MLC Controller: Large Block, 5 Byte Address */
+ tmp = MLCICR_LARGEBLOCK | MLCICR_LONGADDR;
+ writel(tmp, MLC_ICR(host->io_base));
+
+ /* Unlock MLC_TIME_REG
+ * (among others, will be locked again automatically) */
+ writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
+
+ /* Compute clock setup values, see LPC and NAND manual */
+ tmp = 0;
+ tmp |= MLCTIMEREG_TCEA_DELAY(clkrate / host->ncfg->tcea_delay + 1);
+ tmp |= MLCTIMEREG_BUSY_DELAY(clkrate / host->ncfg->busy_delay + 1);
+ tmp |= MLCTIMEREG_NAND_TA(clkrate / host->ncfg->nand_ta + 1);
+ tmp |= MLCTIMEREG_RD_HIGH(clkrate / host->ncfg->rd_high + 1);
+ tmp |= MLCTIMEREG_RD_LOW(clkrate / host->ncfg->rd_low);
+ tmp |= MLCTIMEREG_WR_HIGH(clkrate / host->ncfg->wr_high + 1);
+ tmp |= MLCTIMEREG_WR_LOW(clkrate / host->ncfg->wr_low);
+ writel(tmp, MLC_TIME_REG(host->io_base));
+
+ /* Enable IRQ for CONTROLLER_READY and NAND_READY */
+ writeb(MLCIRQ_CONTROLLER_READY | MLCIRQ_NAND_READY,
+ MLC_IRQ_MR(host->io_base));
+
+ /* Normal nCE operation: nCE controlled by controller */
+ writel(MLCCEH_NORMAL, MLC_CEH(host->io_base));
+}
+
+/*
+ * Hardware specific access to control lines
+ */
+static void lpc32xx_nand_cmd_ctrl(struct nand_chip *nand_chip, int cmd,
+ unsigned int ctrl)
+{
+ struct lpc32xx_nand_host *host = nand_get_controller_data(nand_chip);
+
+ if (cmd != NAND_CMD_NONE) {
+ if (ctrl & NAND_CLE)
+ writel(cmd, MLC_CMD(host->io_base));
+ else
+ writel(cmd, MLC_ADDR(host->io_base));
+ }
+}
+
+/*
+ * Read Device Ready (NAND device _and_ controller ready)
+ */
+static int lpc32xx_nand_device_ready(struct nand_chip *nand_chip)
+{
+ struct lpc32xx_nand_host *host = nand_get_controller_data(nand_chip);
+
+ if ((readb(MLC_ISR(host->io_base)) &
+ (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY)) ==
+ (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY))
+ return 1;
+
+ return 0;
+}
+
+static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
+{
+ uint8_t sr;
+
+ /* Clear interrupt flag by reading status */
+ sr = readb(MLC_IRQ_SR(host->io_base));
+ if (sr & MLCIRQ_NAND_READY)
+ complete(&host->comp_nand);
+ if (sr & MLCIRQ_CONTROLLER_READY)
+ complete(&host->comp_controller);
+
+ return IRQ_HANDLED;
+}
+
+static int lpc32xx_waitfunc_nand(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+
+ if (readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)
+ goto exit;
+
+ wait_for_completion(&host->comp_nand);
+
+ while (!(readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)) {
+ /* Seems to be delayed sometimes by controller */
+ dev_dbg(&mtd->dev, "Warning: NAND not ready.\n");
+ cpu_relax();
+ }
+
+exit:
+ return NAND_STATUS_READY;
+}
+
+static int lpc32xx_waitfunc_controller(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+
+ if (readb(MLC_ISR(host->io_base)) & MLCISR_CONTROLLER_READY)
+ goto exit;
+
+ wait_for_completion(&host->comp_controller);
+
+ while (!(readb(MLC_ISR(host->io_base)) &
+ MLCISR_CONTROLLER_READY)) {
+ dev_dbg(&mtd->dev, "Warning: Controller not ready.\n");
+ cpu_relax();
+ }
+
+exit:
+ return NAND_STATUS_READY;
+}
+
+static int lpc32xx_waitfunc(struct nand_chip *chip)
+{
+ lpc32xx_waitfunc_nand(chip);
+ lpc32xx_waitfunc_controller(chip);
+
+ return NAND_STATUS_READY;
+}
+
+/*
+ * Enable NAND write protect
+ */
+static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
+{
+ if (host->wp_gpio)
+ gpiod_set_value_cansleep(host->wp_gpio, 1);
+}
+
+/*
+ * Disable NAND write protect
+ */
+static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
+{
+ if (host->wp_gpio)
+ gpiod_set_value_cansleep(host->wp_gpio, 0);
+}
+
+static void lpc32xx_dma_complete_func(void *completion)
+{
+ complete(completion);
+}
+
+static int lpc32xx_xmit_dma(struct mtd_info *mtd, void *mem, int len,
+ enum dma_transfer_direction dir)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+ struct dma_async_tx_descriptor *desc;
+ int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ int res;
+
+ sg_init_one(&host->sgl, mem, len);
+
+ res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+ if (res != 1) {
+ dev_err(mtd->dev.parent, "Failed to map sg list\n");
+ return -ENXIO;
+ }
+ desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
+ flags);
+ if (!desc) {
+ dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
+ goto out1;
+ }
+
+ init_completion(&host->comp_dma);
+ desc->callback = lpc32xx_dma_complete_func;
+ desc->callback_param = &host->comp_dma;
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(host->dma_chan);
+
+ wait_for_completion_timeout(&host->comp_dma, msecs_to_jiffies(1000));
+
+ dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+ return 0;
+out1:
+ dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+ return -ENXIO;
+}
+
+static int lpc32xx_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+ int i, j;
+ uint8_t *oobbuf = chip->oob_poi;
+ uint32_t mlc_isr;
+ int res;
+ uint8_t *dma_buf;
+ bool dma_mapped;
+
+ if ((void *)buf <= high_memory) {
+ dma_buf = buf;
+ dma_mapped = true;
+ } else {
+ dma_buf = host->dma_buf;
+ dma_mapped = false;
+ }
+
+ /* Writing Command and Address */
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ /* For all sub-pages */
+ for (i = 0; i < host->mlcsubpages; i++) {
+ /* Start Auto Decode Command */
+ writeb(0x00, MLC_ECC_AUTO_DEC_REG(host->io_base));
+
+ /* Wait for Controller Ready */
+ lpc32xx_waitfunc_controller(chip);
+
+ /* Check ECC Error status */
+ mlc_isr = readl(MLC_ISR(host->io_base));
+ if (mlc_isr & MLCISR_DECODER_FAILURE) {
+ mtd->ecc_stats.failed++;
+ dev_warn(&mtd->dev, "%s: DECODER_FAILURE\n", __func__);
+ } else if (mlc_isr & MLCISR_ERRORS_DETECTED) {
+ mtd->ecc_stats.corrected += ((mlc_isr >> 4) & 0x3) + 1;
+ }
+
+ /* Read 512 + 16 Bytes */
+ if (use_dma) {
+ res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
+ DMA_DEV_TO_MEM);
+ if (res)
+ return res;
+ } else {
+ for (j = 0; j < (512 >> 2); j++) {
+ *((uint32_t *)(buf)) =
+ readl(MLC_BUFF(host->io_base));
+ buf += 4;
+ }
+ }
+ for (j = 0; j < (16 >> 2); j++) {
+ *((uint32_t *)(oobbuf)) =
+ readl(MLC_BUFF(host->io_base));
+ oobbuf += 4;
+ }
+ }
+
+ if (use_dma && !dma_mapped)
+ memcpy(buf, dma_buf, mtd->writesize);
+
+ return 0;
+}
+
+static int lpc32xx_write_page_lowlevel(struct nand_chip *chip,
+ const uint8_t *buf, int oob_required,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+ const uint8_t *oobbuf = chip->oob_poi;
+ uint8_t *dma_buf = (uint8_t *)buf;
+ int res;
+ int i, j;
+
+ if (use_dma && (void *)buf >= high_memory) {
+ dma_buf = host->dma_buf;
+ memcpy(dma_buf, buf, mtd->writesize);
+ }
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
+ for (i = 0; i < host->mlcsubpages; i++) {
+ /* Start Encode */
+ writeb(0x00, MLC_ECC_ENC_REG(host->io_base));
+
+ /* Write 512 + 6 Bytes to Buffer */
+ if (use_dma) {
+ res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
+ DMA_MEM_TO_DEV);
+ if (res)
+ return res;
+ } else {
+ for (j = 0; j < (512 >> 2); j++) {
+ writel(*((uint32_t *)(buf)),
+ MLC_BUFF(host->io_base));
+ buf += 4;
+ }
+ }
+ writel(*((uint32_t *)(oobbuf)), MLC_BUFF(host->io_base));
+ oobbuf += 4;
+ writew(*((uint16_t *)(oobbuf)), MLC_BUFF(host->io_base));
+ oobbuf += 12;
+
+ /* Auto Encode w/ Bit 8 = 0 (see LPC MLC Controller manual) */
+ writeb(0x00, MLC_ECC_AUTO_ENC_REG(host->io_base));
+
+ /* Wait for Controller Ready */
+ lpc32xx_waitfunc_controller(chip);
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int lpc32xx_read_oob(struct nand_chip *chip, int page)
+{
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+
+ /* Read whole page - necessary with MLC controller! */
+ lpc32xx_read_page(chip, host->dummy_buf, 1, page);
+
+ return 0;
+}
+
+static int lpc32xx_write_oob(struct nand_chip *chip, int page)
+{
+ /* None, write_oob conflicts with the automatic LPC MLC ECC decoder! */
+ return 0;
+}
+
+/* Prepares MLC for transfers with H/W ECC enabled: always enabled anyway */
+static void lpc32xx_ecc_enable(struct nand_chip *chip, int mode)
+{
+ /* Always enabled! */
+}
+
+static int lpc32xx_dma_setup(struct lpc32xx_nand_host *host)
+{
+ struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
+ dma_cap_mask_t mask;
+
+ if (!host->pdata || !host->pdata->dma_filter) {
+ dev_err(mtd->dev.parent, "no DMA platform data\n");
+ return -ENOENT;
+ }
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
+ "nand-mlc");
+ if (!host->dma_chan) {
+ dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
+ return -EBUSY;
+ }
+
+ /*
+ * Set direction to a sensible value even if the dmaengine driver
+ * should ignore it. With the default (DMA_MEM_TO_MEM), the amba-pl08x
+ * driver criticizes it as "alien transfer direction".
+ */
+ host->dma_slave_config.direction = DMA_DEV_TO_MEM;
+ host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_slave_config.src_maxburst = 128;
+ host->dma_slave_config.dst_maxburst = 128;
+ /* DMA controller does flow control: */
+ host->dma_slave_config.device_fc = false;
+ host->dma_slave_config.src_addr = MLC_BUFF(host->io_base_phy);
+ host->dma_slave_config.dst_addr = MLC_BUFF(host->io_base_phy);
+ if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
+ dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
+ goto out1;
+ }
+
+ return 0;
+out1:
+ dma_release_channel(host->dma_chan);
+ return -ENXIO;
+}
+
+static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
+{
+ struct lpc32xx_nand_cfg_mlc *ncfg;
+ struct device_node *np = dev->of_node;
+
+ ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
+ if (!ncfg)
+ return NULL;
+
+ of_property_read_u32(np, "nxp,tcea-delay", &ncfg->tcea_delay);
+ of_property_read_u32(np, "nxp,busy-delay", &ncfg->busy_delay);
+ of_property_read_u32(np, "nxp,nand-ta", &ncfg->nand_ta);
+ of_property_read_u32(np, "nxp,rd-high", &ncfg->rd_high);
+ of_property_read_u32(np, "nxp,rd-low", &ncfg->rd_low);
+ of_property_read_u32(np, "nxp,wr-high", &ncfg->wr_high);
+ of_property_read_u32(np, "nxp,wr-low", &ncfg->wr_low);
+
+ if (!ncfg->tcea_delay || !ncfg->busy_delay || !ncfg->nand_ta ||
+ !ncfg->rd_high || !ncfg->rd_low || !ncfg->wr_high ||
+ !ncfg->wr_low) {
+ dev_err(dev, "chip parameters not specified correctly\n");
+ return NULL;
+ }
+
+ return ncfg;
+}
+
+static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+ struct device *dev = &host->pdev->dev;
+
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ host->dma_buf = devm_kzalloc(dev, mtd->writesize, GFP_KERNEL);
+ if (!host->dma_buf)
+ return -ENOMEM;
+
+ host->dummy_buf = devm_kzalloc(dev, mtd->writesize, GFP_KERNEL);
+ if (!host->dummy_buf)
+ return -ENOMEM;
+
+ chip->ecc.size = 512;
+ chip->ecc.hwctl = lpc32xx_ecc_enable;
+ chip->ecc.read_page_raw = lpc32xx_read_page;
+ chip->ecc.read_page = lpc32xx_read_page;
+ chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel;
+ chip->ecc.write_page = lpc32xx_write_page_lowlevel;
+ chip->ecc.write_oob = lpc32xx_write_oob;
+ chip->ecc.read_oob = lpc32xx_read_oob;
+ chip->ecc.strength = 4;
+ chip->ecc.bytes = 10;
+
+ mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
+ host->mlcsubpages = mtd->writesize / 512;
+
+ return 0;
+}
+
+static const struct nand_controller_ops lpc32xx_nand_controller_ops = {
+ .attach_chip = lpc32xx_nand_attach_chip,
+};
+
+/*
+ * Probe for NAND controller
+ */
+static int lpc32xx_nand_probe(struct platform_device *pdev)
+{
+ struct lpc32xx_nand_host *host;
+ struct mtd_info *mtd;
+ struct nand_chip *nand_chip;
+ struct resource *rc;
+ int res;
+
+ /* Allocate memory for the device structure (and zero it) */
+ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->pdev = pdev;
+
+ host->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &rc);
+ if (IS_ERR(host->io_base))
+ return PTR_ERR(host->io_base);
+
+ host->io_base_phy = rc->start;
+
+ nand_chip = &host->nand_chip;
+ mtd = nand_to_mtd(nand_chip);
+ if (pdev->dev.of_node)
+ host->ncfg = lpc32xx_parse_dt(&pdev->dev);
+ if (!host->ncfg) {
+ dev_err(&pdev->dev,
+ "Missing or bad NAND config from device tree\n");
+ return -ENOENT;
+ }
+
+ /* Start with WP disabled, if available */
+ host->wp_gpio = gpiod_get_optional(&pdev->dev, NULL, GPIOD_OUT_LOW);
+ res = PTR_ERR_OR_ZERO(host->wp_gpio);
+ if (res) {
+ if (res != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "WP GPIO is not available: %d\n",
+ res);
+ return res;
+ }
+
+ gpiod_set_consumer_name(host->wp_gpio, "NAND WP");
+
+ host->pdata = dev_get_platdata(&pdev->dev);
+
+ /* link the private data structures */
+ nand_set_controller_data(nand_chip, host);
+ nand_set_flash_node(nand_chip, pdev->dev.of_node);
+ mtd->dev.parent = &pdev->dev;
+
+ /* Get NAND clock */
+ host->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->clk)) {
+ dev_err(&pdev->dev, "Clock initialization failure\n");
+ res = -ENOENT;
+ goto free_gpio;
+ }
+ res = clk_prepare_enable(host->clk);
+ if (res)
+ goto put_clk;
+
+ nand_chip->legacy.cmd_ctrl = lpc32xx_nand_cmd_ctrl;
+ nand_chip->legacy.dev_ready = lpc32xx_nand_device_ready;
+ nand_chip->legacy.chip_delay = 25; /* us */
+ nand_chip->legacy.IO_ADDR_R = MLC_DATA(host->io_base);
+ nand_chip->legacy.IO_ADDR_W = MLC_DATA(host->io_base);
+
+ /* Init NAND controller */
+ lpc32xx_nand_setup(host);
+
+ platform_set_drvdata(pdev, host);
+
+ /* Initialize function pointers */
+ nand_chip->legacy.waitfunc = lpc32xx_waitfunc;
+
+ nand_chip->options = NAND_NO_SUBPAGE_WRITE;
+ nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
+ nand_chip->bbt_td = &lpc32xx_nand_bbt;
+ nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror;
+
+ if (use_dma) {
+ res = lpc32xx_dma_setup(host);
+ if (res) {
+ res = -EIO;
+ goto unprepare_clk;
+ }
+ }
+
+ /* initially clear interrupt status */
+ readb(MLC_IRQ_SR(host->io_base));
+
+ init_completion(&host->comp_nand);
+ init_completion(&host->comp_controller);
+
+ host->irq = platform_get_irq(pdev, 0);
+ if (host->irq < 0) {
+ res = -EINVAL;
+ goto release_dma_chan;
+ }
+
+ if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
+ IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
+ dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
+ res = -ENXIO;
+ goto release_dma_chan;
+ }
+
+ /*
+ * Scan to find existence of the device and get the type of NAND device:
+ * SMALL block or LARGE block.
+ */
+ nand_chip->legacy.dummy_controller.ops = &lpc32xx_nand_controller_ops;
+ res = nand_scan(nand_chip, 1);
+ if (res)
+ goto free_irq;
+
+ mtd->name = DRV_NAME;
+
+ res = mtd_device_register(mtd, host->ncfg->parts,
+ host->ncfg->num_parts);
+ if (res)
+ goto cleanup_nand;
+
+ return 0;
+
+cleanup_nand:
+ nand_cleanup(nand_chip);
+free_irq:
+ free_irq(host->irq, host);
+release_dma_chan:
+ if (use_dma)
+ dma_release_channel(host->dma_chan);
+unprepare_clk:
+ clk_disable_unprepare(host->clk);
+put_clk:
+ clk_put(host->clk);
+free_gpio:
+ lpc32xx_wp_enable(host);
+ gpiod_put(host->wp_gpio);
+
+ return res;
+}
+
+/*
+ * Remove NAND device
+ */
+static void lpc32xx_nand_remove(struct platform_device *pdev)
+{
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &host->nand_chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+
+ free_irq(host->irq, host);
+ if (use_dma)
+ dma_release_channel(host->dma_chan);
+
+ clk_disable_unprepare(host->clk);
+ clk_put(host->clk);
+
+ lpc32xx_wp_enable(host);
+ gpiod_put(host->wp_gpio);
+}
+
+static int lpc32xx_nand_resume(struct platform_device *pdev)
+{
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+ int ret;
+
+ /* Re-enable NAND clock */
+ ret = clk_prepare_enable(host->clk);
+ if (ret)
+ return ret;
+
+ /* Fresh init of NAND controller */
+ lpc32xx_nand_setup(host);
+
+ /* Disable write protect */
+ lpc32xx_wp_disable(host);
+
+ return 0;
+}
+
+static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
+{
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+
+ /* Enable write protect for safety */
+ lpc32xx_wp_enable(host);
+
+ /* Disable clock */
+ clk_disable_unprepare(host->clk);
+ return 0;
+}
+
+static const struct of_device_id lpc32xx_nand_match[] = {
+ { .compatible = "nxp,lpc3220-mlc" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
+
+static struct platform_driver lpc32xx_nand_driver = {
+ .probe = lpc32xx_nand_probe,
+ .remove_new = lpc32xx_nand_remove,
+ .resume = pm_ptr(lpc32xx_nand_resume),
+ .suspend = pm_ptr(lpc32xx_nand_suspend),
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = lpc32xx_nand_match,
+ },
+};
+
+module_platform_driver(lpc32xx_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
+MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX MLC controller");
diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c
new file mode 100644
index 0000000000..1c5fa855b9
--- /dev/null
+++ b/drivers/mtd/nand/raw/lpc32xx_slc.c
@@ -0,0 +1,1023 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * NXP LPC32XX NAND SLC driver
+ *
+ * Authors:
+ * Kevin Wells <kevin.wells@nxp.com>
+ * Roland Stigge <stigge@antcom.de>
+ *
+ * Copyright © 2011 NXP Semiconductors
+ * Copyright © 2012 Roland Stigge
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/mtd/lpc32xx_slc.h>
+
+#define LPC32XX_MODNAME "lpc32xx-nand"
+
+/**********************************************************************
+* SLC NAND controller register offsets
+**********************************************************************/
+
+#define SLC_DATA(x) (x + 0x000)
+#define SLC_ADDR(x) (x + 0x004)
+#define SLC_CMD(x) (x + 0x008)
+#define SLC_STOP(x) (x + 0x00C)
+#define SLC_CTRL(x) (x + 0x010)
+#define SLC_CFG(x) (x + 0x014)
+#define SLC_STAT(x) (x + 0x018)
+#define SLC_INT_STAT(x) (x + 0x01C)
+#define SLC_IEN(x) (x + 0x020)
+#define SLC_ISR(x) (x + 0x024)
+#define SLC_ICR(x) (x + 0x028)
+#define SLC_TAC(x) (x + 0x02C)
+#define SLC_TC(x) (x + 0x030)
+#define SLC_ECC(x) (x + 0x034)
+#define SLC_DMA_DATA(x) (x + 0x038)
+
+/**********************************************************************
+* slc_ctrl register definitions
+**********************************************************************/
+#define SLCCTRL_SW_RESET (1 << 2) /* Reset the NAND controller bit */
+#define SLCCTRL_ECC_CLEAR (1 << 1) /* Reset ECC bit */
+#define SLCCTRL_DMA_START (1 << 0) /* Start DMA channel bit */
+
+/**********************************************************************
+* slc_cfg register definitions
+**********************************************************************/
+#define SLCCFG_CE_LOW (1 << 5) /* Force CE low bit */
+#define SLCCFG_DMA_ECC (1 << 4) /* Enable DMA ECC bit */
+#define SLCCFG_ECC_EN (1 << 3) /* ECC enable bit */
+#define SLCCFG_DMA_BURST (1 << 2) /* DMA burst bit */
+#define SLCCFG_DMA_DIR (1 << 1) /* DMA write(0)/read(1) bit */
+#define SLCCFG_WIDTH (1 << 0) /* External device width, 0=8bit */
+
+/**********************************************************************
+* slc_stat register definitions
+**********************************************************************/
+#define SLCSTAT_DMA_FIFO (1 << 2) /* DMA FIFO has data bit */
+#define SLCSTAT_SLC_FIFO (1 << 1) /* SLC FIFO has data bit */
+#define SLCSTAT_NAND_READY (1 << 0) /* NAND device is ready bit */
+
+/**********************************************************************
+* slc_int_stat, slc_ien, slc_isr, and slc_icr register definitions
+**********************************************************************/
+#define SLCSTAT_INT_TC (1 << 1) /* Transfer count bit */
+#define SLCSTAT_INT_RDY_EN (1 << 0) /* Ready interrupt bit */
+
+/**********************************************************************
+* slc_tac register definitions
+**********************************************************************/
+/* Computation of clock cycles on basis of controller and device clock rates */
+#define SLCTAC_CLOCKS(c, n, s) (min_t(u32, DIV_ROUND_UP(c, n) - 1, 0xF) << s)
+
+/* Clock setting for RDY write sample wait time in 2*n clocks */
+#define SLCTAC_WDR(n) (((n) & 0xF) << 28)
+/* Write pulse width in clock cycles, 1 to 16 clocks */
+#define SLCTAC_WWIDTH(c, n) (SLCTAC_CLOCKS(c, n, 24))
+/* Write hold time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_WHOLD(c, n) (SLCTAC_CLOCKS(c, n, 20))
+/* Write setup time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_WSETUP(c, n) (SLCTAC_CLOCKS(c, n, 16))
+/* Clock setting for RDY read sample wait time in 2*n clocks */
+#define SLCTAC_RDR(n) (((n) & 0xF) << 12)
+/* Read pulse width in clock cycles, 1 to 16 clocks */
+#define SLCTAC_RWIDTH(c, n) (SLCTAC_CLOCKS(c, n, 8))
+/* Read hold time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_RHOLD(c, n) (SLCTAC_CLOCKS(c, n, 4))
+/* Read setup time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_RSETUP(c, n) (SLCTAC_CLOCKS(c, n, 0))
+
+/**********************************************************************
+* slc_ecc register definitions
+**********************************************************************/
+/* ECC line party fetch macro */
+#define SLCECC_TO_LINEPAR(n) (((n) >> 6) & 0x7FFF)
+#define SLCECC_TO_COLPAR(n) ((n) & 0x3F)
+
+/*
+ * DMA requires storage space for the DMA local buffer and the hardware ECC
+ * storage area. The DMA local buffer is only used if DMA mapping fails
+ * during runtime.
+ */
+#define LPC32XX_DMA_DATA_SIZE 4096
+#define LPC32XX_ECC_SAVE_SIZE ((4096 / 256) * 4)
+
+/* Number of bytes used for ECC stored in NAND per 256 bytes */
+#define LPC32XX_SLC_DEV_ECC_BYTES 3
+
+/*
+ * If the NAND base clock frequency can't be fetched, this frequency will be
+ * used instead as the base. This rate is used to setup the timing registers
+ * used for NAND accesses.
+ */
+#define LPC32XX_DEF_BUS_RATE 133250000
+
+/* Milliseconds for DMA FIFO timeout (unlikely anyway) */
+#define LPC32XX_DMA_TIMEOUT 100
+
+/*
+ * NAND ECC Layout for small page NAND devices
+ * Note: For large and huge page devices, the default layouts are used
+ */
+static int lpc32xx_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = 6;
+ oobregion->offset = 10;
+
+ return 0;
+}
+
+static int lpc32xx_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 1)
+ return -ERANGE;
+
+ if (!section) {
+ oobregion->offset = 0;
+ oobregion->length = 4;
+ } else {
+ oobregion->offset = 6;
+ oobregion->length = 4;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops = {
+ .ecc = lpc32xx_ooblayout_ecc,
+ .free = lpc32xx_ooblayout_free,
+};
+
+static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+/*
+ * Small page FLASH BBT descriptors, marker at offset 0, version at offset 6
+ * Note: Large page devices used the default layout
+ */
+static struct nand_bbt_descr bbt_smallpage_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 0,
+ .len = 4,
+ .veroffs = 6,
+ .maxblocks = 4,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_smallpage_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 0,
+ .len = 4,
+ .veroffs = 6,
+ .maxblocks = 4,
+ .pattern = mirror_pattern
+};
+
+/*
+ * NAND platform configuration structure
+ */
+struct lpc32xx_nand_cfg_slc {
+ uint32_t wdr_clks;
+ uint32_t wwidth;
+ uint32_t whold;
+ uint32_t wsetup;
+ uint32_t rdr_clks;
+ uint32_t rwidth;
+ uint32_t rhold;
+ uint32_t rsetup;
+ struct mtd_partition *parts;
+ unsigned num_parts;
+};
+
+struct lpc32xx_nand_host {
+ struct nand_chip nand_chip;
+ struct lpc32xx_slc_platform_data *pdata;
+ struct clk *clk;
+ struct gpio_desc *wp_gpio;
+ void __iomem *io_base;
+ struct lpc32xx_nand_cfg_slc *ncfg;
+
+ struct completion comp;
+ struct dma_chan *dma_chan;
+ uint32_t dma_buf_len;
+ struct dma_slave_config dma_slave_config;
+ struct scatterlist sgl;
+
+ /*
+ * DMA and CPU addresses of ECC work area and data buffer
+ */
+ uint32_t *ecc_buf;
+ uint8_t *data_buf;
+ dma_addr_t io_base_dma;
+};
+
+static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
+{
+ uint32_t clkrate, tmp;
+
+ /* Reset SLC controller */
+ writel(SLCCTRL_SW_RESET, SLC_CTRL(host->io_base));
+ udelay(1000);
+
+ /* Basic setup */
+ writel(0, SLC_CFG(host->io_base));
+ writel(0, SLC_IEN(host->io_base));
+ writel((SLCSTAT_INT_TC | SLCSTAT_INT_RDY_EN),
+ SLC_ICR(host->io_base));
+
+ /* Get base clock for SLC block */
+ clkrate = clk_get_rate(host->clk);
+ if (clkrate == 0)
+ clkrate = LPC32XX_DEF_BUS_RATE;
+
+ /* Compute clock setup values */
+ tmp = SLCTAC_WDR(host->ncfg->wdr_clks) |
+ SLCTAC_WWIDTH(clkrate, host->ncfg->wwidth) |
+ SLCTAC_WHOLD(clkrate, host->ncfg->whold) |
+ SLCTAC_WSETUP(clkrate, host->ncfg->wsetup) |
+ SLCTAC_RDR(host->ncfg->rdr_clks) |
+ SLCTAC_RWIDTH(clkrate, host->ncfg->rwidth) |
+ SLCTAC_RHOLD(clkrate, host->ncfg->rhold) |
+ SLCTAC_RSETUP(clkrate, host->ncfg->rsetup);
+ writel(tmp, SLC_TAC(host->io_base));
+}
+
+/*
+ * Hardware specific access to control lines
+ */
+static void lpc32xx_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
+ unsigned int ctrl)
+{
+ uint32_t tmp;
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+
+ /* Does CE state need to be changed? */
+ tmp = readl(SLC_CFG(host->io_base));
+ if (ctrl & NAND_NCE)
+ tmp |= SLCCFG_CE_LOW;
+ else
+ tmp &= ~SLCCFG_CE_LOW;
+ writel(tmp, SLC_CFG(host->io_base));
+
+ if (cmd != NAND_CMD_NONE) {
+ if (ctrl & NAND_CLE)
+ writel(cmd, SLC_CMD(host->io_base));
+ else
+ writel(cmd, SLC_ADDR(host->io_base));
+ }
+}
+
+/*
+ * Read the Device Ready pin
+ */
+static int lpc32xx_nand_device_ready(struct nand_chip *chip)
+{
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+ int rdy = 0;
+
+ if ((readl(SLC_STAT(host->io_base)) & SLCSTAT_NAND_READY) != 0)
+ rdy = 1;
+
+ return rdy;
+}
+
+/*
+ * Enable NAND write protect
+ */
+static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
+{
+ if (host->wp_gpio)
+ gpiod_set_value_cansleep(host->wp_gpio, 1);
+}
+
+/*
+ * Disable NAND write protect
+ */
+static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
+{
+ if (host->wp_gpio)
+ gpiod_set_value_cansleep(host->wp_gpio, 0);
+}
+
+/*
+ * Prepares SLC for transfers with H/W ECC enabled
+ */
+static void lpc32xx_nand_ecc_enable(struct nand_chip *chip, int mode)
+{
+ /* Hardware ECC is enabled automatically in hardware as needed */
+}
+
+/*
+ * Calculates the ECC for the data
+ */
+static int lpc32xx_nand_ecc_calculate(struct nand_chip *chip,
+ const unsigned char *buf,
+ unsigned char *code)
+{
+ /*
+ * ECC is calculated automatically in hardware during syndrome read
+ * and write operations, so it doesn't need to be calculated here.
+ */
+ return 0;
+}
+
+/*
+ * Read a single byte from NAND device
+ */
+static uint8_t lpc32xx_nand_read_byte(struct nand_chip *chip)
+{
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+
+ return (uint8_t)readl(SLC_DATA(host->io_base));
+}
+
+/*
+ * Simple device read without ECC
+ */
+static void lpc32xx_nand_read_buf(struct nand_chip *chip, u_char *buf, int len)
+{
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+
+ /* Direct device read with no ECC */
+ while (len-- > 0)
+ *buf++ = (uint8_t)readl(SLC_DATA(host->io_base));
+}
+
+/*
+ * Simple device write without ECC
+ */
+static void lpc32xx_nand_write_buf(struct nand_chip *chip, const uint8_t *buf,
+ int len)
+{
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+
+ /* Direct device write with no ECC */
+ while (len-- > 0)
+ writel((uint32_t)*buf++, SLC_DATA(host->io_base));
+}
+
+/*
+ * Read the OOB data from the device without ECC using FIFO method
+ */
+static int lpc32xx_nand_read_oob_syndrome(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+}
+
+/*
+ * Write the OOB data to the device without ECC using FIFO method
+ */
+static int lpc32xx_nand_write_oob_syndrome(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
+}
+
+/*
+ * Fills in the ECC fields in the OOB buffer with the hardware generated ECC
+ */
+static void lpc32xx_slc_ecc_copy(uint8_t *spare, const uint32_t *ecc, int count)
+{
+ int i;
+
+ for (i = 0; i < (count * 3); i += 3) {
+ uint32_t ce = ecc[i / 3];
+ ce = ~(ce << 2) & 0xFFFFFF;
+ spare[i + 2] = (uint8_t)(ce & 0xFF);
+ ce >>= 8;
+ spare[i + 1] = (uint8_t)(ce & 0xFF);
+ ce >>= 8;
+ spare[i] = (uint8_t)(ce & 0xFF);
+ }
+}
+
+static void lpc32xx_dma_complete_func(void *completion)
+{
+ complete(completion);
+}
+
+static int lpc32xx_xmit_dma(struct mtd_info *mtd, dma_addr_t dma,
+ void *mem, int len, enum dma_transfer_direction dir)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+ struct dma_async_tx_descriptor *desc;
+ int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ int res;
+
+ host->dma_slave_config.direction = dir;
+ host->dma_slave_config.src_addr = dma;
+ host->dma_slave_config.dst_addr = dma;
+ host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_slave_config.src_maxburst = 4;
+ host->dma_slave_config.dst_maxburst = 4;
+ /* DMA controller does flow control: */
+ host->dma_slave_config.device_fc = false;
+ if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
+ dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
+ return -ENXIO;
+ }
+
+ sg_init_one(&host->sgl, mem, len);
+
+ res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+ if (res != 1) {
+ dev_err(mtd->dev.parent, "Failed to map sg list\n");
+ return -ENXIO;
+ }
+ desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
+ flags);
+ if (!desc) {
+ dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
+ goto out1;
+ }
+
+ init_completion(&host->comp);
+ desc->callback = lpc32xx_dma_complete_func;
+ desc->callback_param = &host->comp;
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(host->dma_chan);
+
+ wait_for_completion_timeout(&host->comp, msecs_to_jiffies(1000));
+
+ dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+
+ return 0;
+out1:
+ dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+ return -ENXIO;
+}
+
+/*
+ * DMA read/write transfers with ECC support
+ */
+static int lpc32xx_xfer(struct mtd_info *mtd, uint8_t *buf, int eccsubpages,
+ int read)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+ int i, status = 0;
+ unsigned long timeout;
+ int res;
+ enum dma_transfer_direction dir =
+ read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
+ uint8_t *dma_buf;
+ bool dma_mapped;
+
+ if ((void *)buf <= high_memory) {
+ dma_buf = buf;
+ dma_mapped = true;
+ } else {
+ dma_buf = host->data_buf;
+ dma_mapped = false;
+ if (!read)
+ memcpy(host->data_buf, buf, mtd->writesize);
+ }
+
+ if (read) {
+ writel(readl(SLC_CFG(host->io_base)) |
+ SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
+ SLCCFG_DMA_BURST, SLC_CFG(host->io_base));
+ } else {
+ writel((readl(SLC_CFG(host->io_base)) |
+ SLCCFG_ECC_EN | SLCCFG_DMA_ECC | SLCCFG_DMA_BURST) &
+ ~SLCCFG_DMA_DIR,
+ SLC_CFG(host->io_base));
+ }
+
+ /* Clear initial ECC */
+ writel(SLCCTRL_ECC_CLEAR, SLC_CTRL(host->io_base));
+
+ /* Transfer size is data area only */
+ writel(mtd->writesize, SLC_TC(host->io_base));
+
+ /* Start transfer in the NAND controller */
+ writel(readl(SLC_CTRL(host->io_base)) | SLCCTRL_DMA_START,
+ SLC_CTRL(host->io_base));
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ /* Data */
+ res = lpc32xx_xmit_dma(mtd, SLC_DMA_DATA(host->io_base_dma),
+ dma_buf + i * chip->ecc.size,
+ mtd->writesize / chip->ecc.steps, dir);
+ if (res)
+ return res;
+
+ /* Always _read_ ECC */
+ if (i == chip->ecc.steps - 1)
+ break;
+ if (!read) /* ECC availability delayed on write */
+ udelay(10);
+ res = lpc32xx_xmit_dma(mtd, SLC_ECC(host->io_base_dma),
+ &host->ecc_buf[i], 4, DMA_DEV_TO_MEM);
+ if (res)
+ return res;
+ }
+
+ /*
+ * According to NXP, the DMA can be finished here, but the NAND
+ * controller may still have buffered data. After porting to using the
+ * dmaengine DMA driver (amba-pl080), the condition (DMA_FIFO empty)
+ * appears to be always true, according to tests. Keeping the check for
+ * safety reasons for now.
+ */
+ if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) {
+ dev_warn(mtd->dev.parent, "FIFO not empty!\n");
+ timeout = jiffies + msecs_to_jiffies(LPC32XX_DMA_TIMEOUT);
+ while ((readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) &&
+ time_before(jiffies, timeout))
+ cpu_relax();
+ if (!time_before(jiffies, timeout)) {
+ dev_err(mtd->dev.parent, "FIFO held data too long\n");
+ status = -EIO;
+ }
+ }
+
+ /* Read last calculated ECC value */
+ if (!read)
+ udelay(10);
+ host->ecc_buf[chip->ecc.steps - 1] =
+ readl(SLC_ECC(host->io_base));
+
+ /* Flush DMA */
+ dmaengine_terminate_all(host->dma_chan);
+
+ if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO ||
+ readl(SLC_TC(host->io_base))) {
+ /* Something is left in the FIFO, something is wrong */
+ dev_err(mtd->dev.parent, "DMA FIFO failure\n");
+ status = -EIO;
+ }
+
+ /* Stop DMA & HW ECC */
+ writel(readl(SLC_CTRL(host->io_base)) & ~SLCCTRL_DMA_START,
+ SLC_CTRL(host->io_base));
+ writel(readl(SLC_CFG(host->io_base)) &
+ ~(SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
+ SLCCFG_DMA_BURST), SLC_CFG(host->io_base));
+
+ if (!dma_mapped && read)
+ memcpy(buf, host->data_buf, mtd->writesize);
+
+ return status;
+}
+
+/*
+ * Read the data and OOB data from the device, use ECC correction with the
+ * data, disable ECC for the OOB data
+ */
+static int lpc32xx_nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+ struct mtd_oob_region oobregion = { };
+ int stat, i, status, error;
+ uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE];
+
+ /* Issue read command */
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ /* Read data and oob, calculate ECC */
+ status = lpc32xx_xfer(mtd, buf, chip->ecc.steps, 1);
+
+ /* Get OOB data */
+ chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ /* Convert to stored ECC format */
+ lpc32xx_slc_ecc_copy(tmpecc, (uint32_t *) host->ecc_buf, chip->ecc.steps);
+
+ /* Pointer to ECC data retrieved from NAND spare area */
+ error = mtd_ooblayout_ecc(mtd, 0, &oobregion);
+ if (error)
+ return error;
+
+ oobecc = chip->oob_poi + oobregion.offset;
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ stat = chip->ecc.correct(chip, buf, oobecc,
+ &tmpecc[i * chip->ecc.bytes]);
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+
+ buf += chip->ecc.size;
+ oobecc += chip->ecc.bytes;
+ }
+
+ return status;
+}
+
+/*
+ * Read the data and OOB data from the device, no ECC correction with the
+ * data or OOB data
+ */
+static int lpc32xx_nand_read_page_raw_syndrome(struct nand_chip *chip,
+ uint8_t *buf, int oob_required,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /* Issue read command */
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ /* Raw reads can just use the FIFO interface */
+ chip->legacy.read_buf(chip, buf, chip->ecc.size * chip->ecc.steps);
+ chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+/*
+ * Write the data and OOB data to the device, use ECC with the data,
+ * disable ECC for the OOB data
+ */
+static int lpc32xx_nand_write_page_syndrome(struct nand_chip *chip,
+ const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+ struct mtd_oob_region oobregion = { };
+ uint8_t *pb;
+ int error;
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
+ /* Write data, calculate ECC on outbound data */
+ error = lpc32xx_xfer(mtd, (uint8_t *)buf, chip->ecc.steps, 0);
+ if (error)
+ return error;
+
+ /*
+ * The calculated ECC needs some manual work done to it before
+ * committing it to NAND. Process the calculated ECC and place
+ * the resultant values directly into the OOB buffer. */
+ error = mtd_ooblayout_ecc(mtd, 0, &oobregion);
+ if (error)
+ return error;
+
+ pb = chip->oob_poi + oobregion.offset;
+ lpc32xx_slc_ecc_copy(pb, (uint32_t *)host->ecc_buf, chip->ecc.steps);
+
+ /* Write ECC data to device */
+ chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ return nand_prog_page_end_op(chip);
+}
+
+/*
+ * Write the data and OOB data to the device, no ECC correction with the
+ * data or OOB data
+ */
+static int lpc32xx_nand_write_page_raw_syndrome(struct nand_chip *chip,
+ const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /* Raw writes can just use the FIFO interface */
+ nand_prog_page_begin_op(chip, page, 0, buf,
+ chip->ecc.size * chip->ecc.steps);
+ chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host)
+{
+ struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
+ dma_cap_mask_t mask;
+
+ if (!host->pdata || !host->pdata->dma_filter) {
+ dev_err(mtd->dev.parent, "no DMA platform data\n");
+ return -ENOENT;
+ }
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
+ "nand-slc");
+ if (!host->dma_chan) {
+ dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)
+{
+ struct lpc32xx_nand_cfg_slc *ncfg;
+ struct device_node *np = dev->of_node;
+
+ ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
+ if (!ncfg)
+ return NULL;
+
+ of_property_read_u32(np, "nxp,wdr-clks", &ncfg->wdr_clks);
+ of_property_read_u32(np, "nxp,wwidth", &ncfg->wwidth);
+ of_property_read_u32(np, "nxp,whold", &ncfg->whold);
+ of_property_read_u32(np, "nxp,wsetup", &ncfg->wsetup);
+ of_property_read_u32(np, "nxp,rdr-clks", &ncfg->rdr_clks);
+ of_property_read_u32(np, "nxp,rwidth", &ncfg->rwidth);
+ of_property_read_u32(np, "nxp,rhold", &ncfg->rhold);
+ of_property_read_u32(np, "nxp,rsetup", &ncfg->rsetup);
+
+ if (!ncfg->wdr_clks || !ncfg->wwidth || !ncfg->whold ||
+ !ncfg->wsetup || !ncfg->rdr_clks || !ncfg->rwidth ||
+ !ncfg->rhold || !ncfg->rsetup) {
+ dev_err(dev, "chip parameters not specified correctly\n");
+ return NULL;
+ }
+
+ return ncfg;
+}
+
+static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ /* OOB and ECC CPU and DMA work areas */
+ host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
+
+ /*
+ * Small page FLASH has a unique OOB layout, but large and huge
+ * page FLASH use the standard layout. Small page FLASH uses a
+ * custom BBT marker layout.
+ */
+ if (mtd->writesize <= 512)
+ mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
+
+ chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
+ /* These sizes remain the same regardless of page size */
+ chip->ecc.size = 256;
+ chip->ecc.strength = 1;
+ chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES;
+ chip->ecc.prepad = 0;
+ chip->ecc.postpad = 0;
+ chip->ecc.read_page_raw = lpc32xx_nand_read_page_raw_syndrome;
+ chip->ecc.read_page = lpc32xx_nand_read_page_syndrome;
+ chip->ecc.write_page_raw = lpc32xx_nand_write_page_raw_syndrome;
+ chip->ecc.write_page = lpc32xx_nand_write_page_syndrome;
+ chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
+ chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
+ chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
+ chip->ecc.correct = rawnand_sw_hamming_correct;
+ chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
+
+ /*
+ * Use a custom BBT marker setup for small page FLASH that
+ * won't interfere with the ECC layout. Large and huge page
+ * FLASH use the standard layout.
+ */
+ if ((chip->bbt_options & NAND_BBT_USE_FLASH) &&
+ mtd->writesize <= 512) {
+ chip->bbt_td = &bbt_smallpage_main_descr;
+ chip->bbt_md = &bbt_smallpage_mirror_descr;
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops lpc32xx_nand_controller_ops = {
+ .attach_chip = lpc32xx_nand_attach_chip,
+};
+
+/*
+ * Probe for NAND controller
+ */
+static int lpc32xx_nand_probe(struct platform_device *pdev)
+{
+ struct lpc32xx_nand_host *host;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ struct resource *rc;
+ int res;
+
+ /* Allocate memory for the device structure (and zero it) */
+ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &rc);
+ if (IS_ERR(host->io_base))
+ return PTR_ERR(host->io_base);
+
+ host->io_base_dma = rc->start;
+ if (pdev->dev.of_node)
+ host->ncfg = lpc32xx_parse_dt(&pdev->dev);
+ if (!host->ncfg) {
+ dev_err(&pdev->dev,
+ "Missing or bad NAND config from device tree\n");
+ return -ENOENT;
+ }
+
+ /* Start with WP disabled, if available */
+ host->wp_gpio = gpiod_get_optional(&pdev->dev, NULL, GPIOD_OUT_LOW);
+ res = PTR_ERR_OR_ZERO(host->wp_gpio);
+ if (res) {
+ if (res != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "WP GPIO is not available: %d\n",
+ res);
+ return res;
+ }
+
+ gpiod_set_consumer_name(host->wp_gpio, "NAND WP");
+
+ host->pdata = dev_get_platdata(&pdev->dev);
+
+ chip = &host->nand_chip;
+ mtd = nand_to_mtd(chip);
+ nand_set_controller_data(chip, host);
+ nand_set_flash_node(chip, pdev->dev.of_node);
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = &pdev->dev;
+
+ /* Get NAND clock */
+ host->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(host->clk)) {
+ dev_err(&pdev->dev, "Clock failure\n");
+ res = -ENOENT;
+ goto enable_wp;
+ }
+
+ /* Set NAND IO addresses and command/ready functions */
+ chip->legacy.IO_ADDR_R = SLC_DATA(host->io_base);
+ chip->legacy.IO_ADDR_W = SLC_DATA(host->io_base);
+ chip->legacy.cmd_ctrl = lpc32xx_nand_cmd_ctrl;
+ chip->legacy.dev_ready = lpc32xx_nand_device_ready;
+ chip->legacy.chip_delay = 20; /* 20us command delay time */
+
+ /* Init NAND controller */
+ lpc32xx_nand_setup(host);
+
+ platform_set_drvdata(pdev, host);
+
+ /* NAND callbacks for LPC32xx SLC hardware */
+ chip->legacy.read_byte = lpc32xx_nand_read_byte;
+ chip->legacy.read_buf = lpc32xx_nand_read_buf;
+ chip->legacy.write_buf = lpc32xx_nand_write_buf;
+
+ /*
+ * Allocate a large enough buffer for a single huge page plus
+ * extra space for the spare area and ECC storage area
+ */
+ host->dma_buf_len = LPC32XX_DMA_DATA_SIZE + LPC32XX_ECC_SAVE_SIZE;
+ host->data_buf = devm_kzalloc(&pdev->dev, host->dma_buf_len,
+ GFP_KERNEL);
+ if (host->data_buf == NULL) {
+ res = -ENOMEM;
+ goto enable_wp;
+ }
+
+ res = lpc32xx_nand_dma_setup(host);
+ if (res) {
+ res = -EIO;
+ goto enable_wp;
+ }
+
+ /* Find NAND device */
+ chip->legacy.dummy_controller.ops = &lpc32xx_nand_controller_ops;
+ res = nand_scan(chip, 1);
+ if (res)
+ goto release_dma;
+
+ mtd->name = "nxp_lpc3220_slc";
+ res = mtd_device_register(mtd, host->ncfg->parts,
+ host->ncfg->num_parts);
+ if (res)
+ goto cleanup_nand;
+
+ return 0;
+
+cleanup_nand:
+ nand_cleanup(chip);
+release_dma:
+ dma_release_channel(host->dma_chan);
+enable_wp:
+ lpc32xx_wp_enable(host);
+
+ return res;
+}
+
+/*
+ * Remove NAND device.
+ */
+static void lpc32xx_nand_remove(struct platform_device *pdev)
+{
+ uint32_t tmp;
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &host->nand_chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ dma_release_channel(host->dma_chan);
+
+ /* Force CE high */
+ tmp = readl(SLC_CTRL(host->io_base));
+ tmp &= ~SLCCFG_CE_LOW;
+ writel(tmp, SLC_CTRL(host->io_base));
+
+ lpc32xx_wp_enable(host);
+}
+
+static int lpc32xx_nand_resume(struct platform_device *pdev)
+{
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+ int ret;
+
+ /* Re-enable NAND clock */
+ ret = clk_prepare_enable(host->clk);
+ if (ret)
+ return ret;
+
+ /* Fresh init of NAND controller */
+ lpc32xx_nand_setup(host);
+
+ /* Disable write protect */
+ lpc32xx_wp_disable(host);
+
+ return 0;
+}
+
+static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
+{
+ uint32_t tmp;
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+
+ /* Force CE high */
+ tmp = readl(SLC_CTRL(host->io_base));
+ tmp &= ~SLCCFG_CE_LOW;
+ writel(tmp, SLC_CTRL(host->io_base));
+
+ /* Enable write protect for safety */
+ lpc32xx_wp_enable(host);
+
+ /* Disable clock */
+ clk_disable_unprepare(host->clk);
+
+ return 0;
+}
+
+static const struct of_device_id lpc32xx_nand_match[] = {
+ { .compatible = "nxp,lpc3220-slc" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
+
+static struct platform_driver lpc32xx_nand_driver = {
+ .probe = lpc32xx_nand_probe,
+ .remove_new = lpc32xx_nand_remove,
+ .resume = pm_ptr(lpc32xx_nand_resume),
+ .suspend = pm_ptr(lpc32xx_nand_suspend),
+ .driver = {
+ .name = LPC32XX_MODNAME,
+ .of_match_table = lpc32xx_nand_match,
+ },
+};
+
+module_platform_driver(lpc32xx_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
+MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
+MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX SLC controller");
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
new file mode 100644
index 0000000000..b841a81cb1
--- /dev/null
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -0,0 +1,3200 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Marvell NAND flash controller driver
+ *
+ * Copyright (C) 2017 Marvell
+ * Author: Miquel RAYNAL <miquel.raynal@free-electrons.com>
+ *
+ *
+ * This NAND controller driver handles two versions of the hardware,
+ * one is called NFCv1 and is available on PXA SoCs and the other is
+ * called NFCv2 and is available on Armada SoCs.
+ *
+ * The main visible difference is that NFCv1 only has Hamming ECC
+ * capabilities, while NFCv2 also embeds a BCH ECC engine. Also, DMA
+ * is not used with NFCv2.
+ *
+ * The ECC layouts are depicted in details in Marvell AN-379, but here
+ * is a brief description.
+ *
+ * When using Hamming, the data is split in 512B chunks (either 1, 2
+ * or 4) and each chunk will have its own ECC "digest" of 6B at the
+ * beginning of the OOB area and eventually the remaining free OOB
+ * bytes (also called "spare" bytes in the driver). This engine
+ * corrects up to 1 bit per chunk and detects reliably an error if
+ * there are at most 2 bitflips. Here is the page layout used by the
+ * controller when Hamming is chosen:
+ *
+ * +-------------------------------------------------------------+
+ * | Data 1 | ... | Data N | ECC 1 | ... | ECCN | Free OOB bytes |
+ * +-------------------------------------------------------------+
+ *
+ * When using the BCH engine, there are N identical (data + free OOB +
+ * ECC) sections and potentially an extra one to deal with
+ * configurations where the chosen (data + free OOB + ECC) sizes do
+ * not align with the page (data + OOB) size. ECC bytes are always
+ * 30B per ECC chunk. Here is the page layout used by the controller
+ * when BCH is chosen:
+ *
+ * +-----------------------------------------
+ * | Data 1 | Free OOB bytes 1 | ECC 1 | ...
+ * +-----------------------------------------
+ *
+ * -------------------------------------------
+ * ... | Data N | Free OOB bytes N | ECC N |
+ * -------------------------------------------
+ *
+ * --------------------------------------------+
+ * Last Data | Last Free OOB bytes | Last ECC |
+ * --------------------------------------------+
+ *
+ * In both cases, the layout seen by the user is always: all data
+ * first, then all free OOB bytes and finally all ECC bytes. With BCH,
+ * ECC bytes are 30B long and are padded with 0xFF to align on 32
+ * bytes.
+ *
+ * The controller has certain limitations that are handled by the
+ * driver:
+ * - It can only read 2k at a time. To overcome this limitation, the
+ * driver issues data cycles on the bus, without issuing new
+ * CMD + ADDR cycles. The Marvell term is "naked" operations.
+ * - The ECC strength in BCH mode cannot be tuned. It is fixed 16
+ * bits. What can be tuned is the ECC block size as long as it
+ * stays between 512B and 2kiB. It's usually chosen based on the
+ * chip ECC requirements. For instance, using 2kiB ECC chunks
+ * provides 4b/512B correctability.
+ * - The controller will always treat data bytes, free OOB bytes
+ * and ECC bytes in that order, no matter what the real layout is
+ * (which is usually all data then all OOB bytes). The
+ * marvell_nfc_layouts array below contains the currently
+ * supported layouts.
+ * - Because of these weird layouts, the Bad Block Markers can be
+ * located in data section. In this case, the NAND_BBT_NO_OOB_BBM
+ * option must be set to prevent scanning/writing bad block
+ * markers.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of.h>
+#include <linux/iopoll.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <asm/unaligned.h>
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/pxa-dma.h>
+#include <linux/platform_data/mtd-nand-pxa3xx.h>
+
+/* Data FIFO granularity, FIFO reads/writes must be a multiple of this length */
+#define FIFO_DEPTH 8
+#define FIFO_REP(x) (x / sizeof(u32))
+#define BCH_SEQ_READS (32 / FIFO_DEPTH)
+/* NFC does not support transfers of larger chunks at a time */
+#define MAX_CHUNK_SIZE 2112
+/* NFCv1 cannot read more that 7 bytes of ID */
+#define NFCV1_READID_LEN 7
+/* Polling is done at a pace of POLL_PERIOD us until POLL_TIMEOUT is reached */
+#define POLL_PERIOD 0
+#define POLL_TIMEOUT 100000
+/* Interrupt maximum wait period in ms */
+#define IRQ_TIMEOUT 1000
+/* Latency in clock cycles between SoC pins and NFC logic */
+#define MIN_RD_DEL_CNT 3
+/* Maximum number of contiguous address cycles */
+#define MAX_ADDRESS_CYC_NFCV1 5
+#define MAX_ADDRESS_CYC_NFCV2 7
+/* System control registers/bits to enable the NAND controller on some SoCs */
+#define GENCONF_SOC_DEVICE_MUX 0x208
+#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
+#define GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST BIT(20)
+#define GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST BIT(21)
+#define GENCONF_SOC_DEVICE_MUX_NFC_INT_EN BIT(25)
+#define GENCONF_SOC_DEVICE_MUX_NFC_DEVBUS_ARB_EN BIT(27)
+#define GENCONF_CLK_GATING_CTRL 0x220
+#define GENCONF_CLK_GATING_CTRL_ND_GATE BIT(2)
+#define GENCONF_ND_CLK_CTRL 0x700
+#define GENCONF_ND_CLK_CTRL_EN BIT(0)
+
+/* NAND controller data flash control register */
+#define NDCR 0x00
+#define NDCR_ALL_INT GENMASK(11, 0)
+#define NDCR_CS1_CMDDM BIT(7)
+#define NDCR_CS0_CMDDM BIT(8)
+#define NDCR_RDYM BIT(11)
+#define NDCR_ND_ARB_EN BIT(12)
+#define NDCR_RA_START BIT(15)
+#define NDCR_RD_ID_CNT(x) (min_t(unsigned int, x, 0x7) << 16)
+#define NDCR_PAGE_SZ(x) (x >= 2048 ? BIT(24) : 0)
+#define NDCR_DWIDTH_M BIT(26)
+#define NDCR_DWIDTH_C BIT(27)
+#define NDCR_ND_RUN BIT(28)
+#define NDCR_DMA_EN BIT(29)
+#define NDCR_ECC_EN BIT(30)
+#define NDCR_SPARE_EN BIT(31)
+#define NDCR_GENERIC_FIELDS_MASK (~(NDCR_RA_START | NDCR_PAGE_SZ(2048) | \
+ NDCR_DWIDTH_M | NDCR_DWIDTH_C))
+
+/* NAND interface timing parameter 0 register */
+#define NDTR0 0x04
+#define NDTR0_TRP(x) ((min_t(unsigned int, x, 0xF) & 0x7) << 0)
+#define NDTR0_TRH(x) (min_t(unsigned int, x, 0x7) << 3)
+#define NDTR0_ETRP(x) ((min_t(unsigned int, x, 0xF) & 0x8) << 3)
+#define NDTR0_SEL_NRE_EDGE BIT(7)
+#define NDTR0_TWP(x) (min_t(unsigned int, x, 0x7) << 8)
+#define NDTR0_TWH(x) (min_t(unsigned int, x, 0x7) << 11)
+#define NDTR0_TCS(x) (min_t(unsigned int, x, 0x7) << 16)
+#define NDTR0_TCH(x) (min_t(unsigned int, x, 0x7) << 19)
+#define NDTR0_RD_CNT_DEL(x) (min_t(unsigned int, x, 0xF) << 22)
+#define NDTR0_SELCNTR BIT(26)
+#define NDTR0_TADL(x) (min_t(unsigned int, x, 0x1F) << 27)
+
+/* NAND interface timing parameter 1 register */
+#define NDTR1 0x0C
+#define NDTR1_TAR(x) (min_t(unsigned int, x, 0xF) << 0)
+#define NDTR1_TWHR(x) (min_t(unsigned int, x, 0xF) << 4)
+#define NDTR1_TRHW(x) (min_t(unsigned int, x / 16, 0x3) << 8)
+#define NDTR1_PRESCALE BIT(14)
+#define NDTR1_WAIT_MODE BIT(15)
+#define NDTR1_TR(x) (min_t(unsigned int, x, 0xFFFF) << 16)
+
+/* NAND controller status register */
+#define NDSR 0x14
+#define NDSR_WRCMDREQ BIT(0)
+#define NDSR_RDDREQ BIT(1)
+#define NDSR_WRDREQ BIT(2)
+#define NDSR_CORERR BIT(3)
+#define NDSR_UNCERR BIT(4)
+#define NDSR_CMDD(cs) BIT(8 - cs)
+#define NDSR_RDY(rb) BIT(11 + rb)
+#define NDSR_ERRCNT(x) ((x >> 16) & 0x1F)
+
+/* NAND ECC control register */
+#define NDECCCTRL 0x28
+#define NDECCCTRL_BCH_EN BIT(0)
+
+/* NAND controller data buffer register */
+#define NDDB 0x40
+
+/* NAND controller command buffer 0 register */
+#define NDCB0 0x48
+#define NDCB0_CMD1(x) ((x & 0xFF) << 0)
+#define NDCB0_CMD2(x) ((x & 0xFF) << 8)
+#define NDCB0_ADDR_CYC(x) ((x & 0x7) << 16)
+#define NDCB0_ADDR_GET_NUM_CYC(x) (((x) >> 16) & 0x7)
+#define NDCB0_DBC BIT(19)
+#define NDCB0_CMD_TYPE(x) ((x & 0x7) << 21)
+#define NDCB0_CSEL BIT(24)
+#define NDCB0_RDY_BYP BIT(27)
+#define NDCB0_LEN_OVRD BIT(28)
+#define NDCB0_CMD_XTYPE(x) ((x & 0x7) << 29)
+
+/* NAND controller command buffer 1 register */
+#define NDCB1 0x4C
+#define NDCB1_COLS(x) ((x & 0xFFFF) << 0)
+#define NDCB1_ADDRS_PAGE(x) (x << 16)
+
+/* NAND controller command buffer 2 register */
+#define NDCB2 0x50
+#define NDCB2_ADDR5_PAGE(x) (((x >> 16) & 0xFF) << 0)
+#define NDCB2_ADDR5_CYC(x) ((x & 0xFF) << 0)
+
+/* NAND controller command buffer 3 register */
+#define NDCB3 0x54
+#define NDCB3_ADDR6_CYC(x) ((x & 0xFF) << 16)
+#define NDCB3_ADDR7_CYC(x) ((x & 0xFF) << 24)
+
+/* NAND controller command buffer 0 register 'type' and 'xtype' fields */
+#define TYPE_READ 0
+#define TYPE_WRITE 1
+#define TYPE_ERASE 2
+#define TYPE_READ_ID 3
+#define TYPE_STATUS 4
+#define TYPE_RESET 5
+#define TYPE_NAKED_CMD 6
+#define TYPE_NAKED_ADDR 7
+#define TYPE_MASK 7
+#define XTYPE_MONOLITHIC_RW 0
+#define XTYPE_LAST_NAKED_RW 1
+#define XTYPE_FINAL_COMMAND 3
+#define XTYPE_READ 4
+#define XTYPE_WRITE_DISPATCH 4
+#define XTYPE_NAKED_RW 5
+#define XTYPE_COMMAND_DISPATCH 6
+#define XTYPE_MASK 7
+
+/**
+ * struct marvell_hw_ecc_layout - layout of Marvell ECC
+ *
+ * Marvell ECC engine works differently than the others, in order to limit the
+ * size of the IP, hardware engineers chose to set a fixed strength at 16 bits
+ * per subpage, and depending on a the desired strength needed by the NAND chip,
+ * a particular layout mixing data/spare/ecc is defined, with a possible last
+ * chunk smaller that the others.
+ *
+ * @writesize: Full page size on which the layout applies
+ * @chunk: Desired ECC chunk size on which the layout applies
+ * @strength: Desired ECC strength (per chunk size bytes) on which the
+ * layout applies
+ * @nchunks: Total number of chunks
+ * @full_chunk_cnt: Number of full-sized chunks, which is the number of
+ * repetitions of the pattern:
+ * (data_bytes + spare_bytes + ecc_bytes).
+ * @data_bytes: Number of data bytes per chunk
+ * @spare_bytes: Number of spare bytes per chunk
+ * @ecc_bytes: Number of ecc bytes per chunk
+ * @last_data_bytes: Number of data bytes in the last chunk
+ * @last_spare_bytes: Number of spare bytes in the last chunk
+ * @last_ecc_bytes: Number of ecc bytes in the last chunk
+ */
+struct marvell_hw_ecc_layout {
+ /* Constraints */
+ int writesize;
+ int chunk;
+ int strength;
+ /* Corresponding layout */
+ int nchunks;
+ int full_chunk_cnt;
+ int data_bytes;
+ int spare_bytes;
+ int ecc_bytes;
+ int last_data_bytes;
+ int last_spare_bytes;
+ int last_ecc_bytes;
+};
+
+#define MARVELL_LAYOUT(ws, dc, ds, nc, fcc, db, sb, eb, ldb, lsb, leb) \
+ { \
+ .writesize = ws, \
+ .chunk = dc, \
+ .strength = ds, \
+ .nchunks = nc, \
+ .full_chunk_cnt = fcc, \
+ .data_bytes = db, \
+ .spare_bytes = sb, \
+ .ecc_bytes = eb, \
+ .last_data_bytes = ldb, \
+ .last_spare_bytes = lsb, \
+ .last_ecc_bytes = leb, \
+ }
+
+/* Layouts explained in AN-379_Marvell_SoC_NFC_ECC */
+static const struct marvell_hw_ecc_layout marvell_nfc_layouts[] = {
+ MARVELL_LAYOUT( 512, 512, 1, 1, 1, 512, 8, 8, 0, 0, 0),
+ MARVELL_LAYOUT( 2048, 512, 1, 1, 1, 2048, 40, 24, 0, 0, 0),
+ MARVELL_LAYOUT( 2048, 512, 4, 1, 1, 2048, 32, 30, 0, 0, 0),
+ MARVELL_LAYOUT( 2048, 512, 8, 2, 1, 1024, 0, 30,1024,32, 30),
+ MARVELL_LAYOUT( 2048, 512, 8, 2, 1, 1024, 0, 30,1024,64, 30),
+ MARVELL_LAYOUT( 2048, 512, 12, 3, 2, 704, 0, 30,640, 0, 30),
+ MARVELL_LAYOUT( 2048, 512, 16, 5, 4, 512, 0, 30, 0, 32, 30),
+ MARVELL_LAYOUT( 4096, 512, 4, 2, 2, 2048, 32, 30, 0, 0, 0),
+ MARVELL_LAYOUT( 4096, 512, 8, 5, 4, 1024, 0, 30, 0, 64, 30),
+ MARVELL_LAYOUT( 4096, 512, 12, 6, 5, 704, 0, 30,576, 32, 30),
+ MARVELL_LAYOUT( 4096, 512, 16, 9, 8, 512, 0, 30, 0, 32, 30),
+ MARVELL_LAYOUT( 8192, 512, 4, 4, 4, 2048, 0, 30, 0, 0, 0),
+ MARVELL_LAYOUT( 8192, 512, 8, 9, 8, 1024, 0, 30, 0, 160, 30),
+ MARVELL_LAYOUT( 8192, 512, 12, 12, 11, 704, 0, 30,448, 64, 30),
+ MARVELL_LAYOUT( 8192, 512, 16, 17, 16, 512, 0, 30, 0, 32, 30),
+};
+
+/**
+ * struct marvell_nand_chip_sel - CS line description
+ *
+ * The Nand Flash Controller has up to 4 CE and 2 RB pins. The CE selection
+ * is made by a field in NDCB0 register, and in another field in NDCB2 register.
+ * The datasheet describes the logic with an error: ADDR5 field is once
+ * declared at the beginning of NDCB2, and another time at its end. Because the
+ * ADDR5 field of NDCB2 may be used by other bytes, it would be more logical
+ * to use the last bit of this field instead of the first ones.
+ *
+ * @cs: Wanted CE lane.
+ * @ndcb0_csel: Value of the NDCB0 register with or without the flag
+ * selecting the wanted CE lane. This is set once when
+ * the Device Tree is probed.
+ * @rb: Ready/Busy pin for the flash chip
+ */
+struct marvell_nand_chip_sel {
+ unsigned int cs;
+ u32 ndcb0_csel;
+ unsigned int rb;
+};
+
+/**
+ * struct marvell_nand_chip - stores NAND chip device related information
+ *
+ * @chip: Base NAND chip structure
+ * @node: Used to store NAND chips into a list
+ * @layout: NAND layout when using hardware ECC
+ * @ndcr: Controller register value for this NAND chip
+ * @ndtr0: Timing registers 0 value for this NAND chip
+ * @ndtr1: Timing registers 1 value for this NAND chip
+ * @addr_cyc: Amount of cycles needed to pass column address
+ * @selected_die: Current active CS
+ * @nsels: Number of CS lines required by the NAND chip
+ * @sels: Array of CS lines descriptions
+ */
+struct marvell_nand_chip {
+ struct nand_chip chip;
+ struct list_head node;
+ const struct marvell_hw_ecc_layout *layout;
+ u32 ndcr;
+ u32 ndtr0;
+ u32 ndtr1;
+ int addr_cyc;
+ int selected_die;
+ unsigned int nsels;
+ struct marvell_nand_chip_sel sels[];
+};
+
+static inline struct marvell_nand_chip *to_marvell_nand(struct nand_chip *chip)
+{
+ return container_of(chip, struct marvell_nand_chip, chip);
+}
+
+static inline struct marvell_nand_chip_sel *to_nand_sel(struct marvell_nand_chip
+ *nand)
+{
+ return &nand->sels[nand->selected_die];
+}
+
+/**
+ * struct marvell_nfc_caps - NAND controller capabilities for distinction
+ * between compatible strings
+ *
+ * @max_cs_nb: Number of Chip Select lines available
+ * @max_rb_nb: Number of Ready/Busy lines available
+ * @need_system_controller: Indicates if the SoC needs to have access to the
+ * system controller (ie. to enable the NAND controller)
+ * @legacy_of_bindings: Indicates if DT parsing must be done using the old
+ * fashion way
+ * @is_nfcv2: NFCv2 has numerous enhancements compared to NFCv1, ie.
+ * BCH error detection and correction algorithm,
+ * NDCB3 register has been added
+ * @use_dma: Use dma for data transfers
+ * @max_mode_number: Maximum timing mode supported by the controller
+ */
+struct marvell_nfc_caps {
+ unsigned int max_cs_nb;
+ unsigned int max_rb_nb;
+ bool need_system_controller;
+ bool legacy_of_bindings;
+ bool is_nfcv2;
+ bool use_dma;
+ unsigned int max_mode_number;
+};
+
+/**
+ * struct marvell_nfc - stores Marvell NAND controller information
+ *
+ * @controller: Base controller structure
+ * @dev: Parent device (used to print error messages)
+ * @regs: NAND controller registers
+ * @core_clk: Core clock
+ * @reg_clk: Registers clock
+ * @complete: Completion object to wait for NAND controller events
+ * @assigned_cs: Bitmask describing already assigned CS lines
+ * @chips: List containing all the NAND chips attached to
+ * this NAND controller
+ * @selected_chip: Currently selected target chip
+ * @caps: NAND controller capabilities for each compatible string
+ * @use_dma: Whetner DMA is used
+ * @dma_chan: DMA channel (NFCv1 only)
+ * @dma_buf: 32-bit aligned buffer for DMA transfers (NFCv1 only)
+ */
+struct marvell_nfc {
+ struct nand_controller controller;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *core_clk;
+ struct clk *reg_clk;
+ struct completion complete;
+ unsigned long assigned_cs;
+ struct list_head chips;
+ struct nand_chip *selected_chip;
+ const struct marvell_nfc_caps *caps;
+
+ /* DMA (NFCv1 only) */
+ bool use_dma;
+ struct dma_chan *dma_chan;
+ u8 *dma_buf;
+};
+
+static inline struct marvell_nfc *to_marvell_nfc(struct nand_controller *ctrl)
+{
+ return container_of(ctrl, struct marvell_nfc, controller);
+}
+
+/**
+ * struct marvell_nfc_timings - NAND controller timings expressed in NAND
+ * Controller clock cycles
+ *
+ * @tRP: ND_nRE pulse width
+ * @tRH: ND_nRE high duration
+ * @tWP: ND_nWE pulse time
+ * @tWH: ND_nWE high duration
+ * @tCS: Enable signal setup time
+ * @tCH: Enable signal hold time
+ * @tADL: Address to write data delay
+ * @tAR: ND_ALE low to ND_nRE low delay
+ * @tWHR: ND_nWE high to ND_nRE low for status read
+ * @tRHW: ND_nRE high duration, read to write delay
+ * @tR: ND_nWE high to ND_nRE low for read
+ */
+struct marvell_nfc_timings {
+ /* NDTR0 fields */
+ unsigned int tRP;
+ unsigned int tRH;
+ unsigned int tWP;
+ unsigned int tWH;
+ unsigned int tCS;
+ unsigned int tCH;
+ unsigned int tADL;
+ /* NDTR1 fields */
+ unsigned int tAR;
+ unsigned int tWHR;
+ unsigned int tRHW;
+ unsigned int tR;
+};
+
+/**
+ * TO_CYCLES() - Derives a duration in numbers of clock cycles.
+ *
+ * @ps: Duration in pico-seconds
+ * @period_ns: Clock period in nano-seconds
+ *
+ * Convert the duration in nano-seconds, then divide by the period and
+ * return the number of clock periods.
+ */
+#define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP(ps / 1000, period_ns))
+#define TO_CYCLES64(ps, period_ns) (DIV_ROUND_UP_ULL(div_u64(ps, 1000), \
+ period_ns))
+
+/**
+ * struct marvell_nfc_op - filled during the parsing of the ->exec_op()
+ * subop subset of instructions.
+ *
+ * @ndcb: Array of values written to NDCBx registers
+ * @cle_ale_delay_ns: Optional delay after the last CMD or ADDR cycle
+ * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin
+ * @rdy_delay_ns: Optional delay after waiting for the RB pin
+ * @data_delay_ns: Optional delay after the data xfer
+ * @data_instr_idx: Index of the data instruction in the subop
+ * @data_instr: Pointer to the data instruction in the subop
+ */
+struct marvell_nfc_op {
+ u32 ndcb[4];
+ unsigned int cle_ale_delay_ns;
+ unsigned int rdy_timeout_ms;
+ unsigned int rdy_delay_ns;
+ unsigned int data_delay_ns;
+ unsigned int data_instr_idx;
+ const struct nand_op_instr *data_instr;
+};
+
+/*
+ * Internal helper to conditionnally apply a delay (from the above structure,
+ * most of the time).
+ */
+static void cond_delay(unsigned int ns)
+{
+ if (!ns)
+ return;
+
+ if (ns < 10000)
+ ndelay(ns);
+ else
+ udelay(DIV_ROUND_UP(ns, 1000));
+}
+
+/*
+ * The controller has many flags that could generate interrupts, most of them
+ * are disabled and polling is used. For the very slow signals, using interrupts
+ * may relax the CPU charge.
+ */
+static void marvell_nfc_disable_int(struct marvell_nfc *nfc, u32 int_mask)
+{
+ u32 reg;
+
+ /* Writing 1 disables the interrupt */
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg | int_mask, nfc->regs + NDCR);
+}
+
+static void marvell_nfc_enable_int(struct marvell_nfc *nfc, u32 int_mask)
+{
+ u32 reg;
+
+ /* Writing 0 enables the interrupt */
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg & ~int_mask, nfc->regs + NDCR);
+}
+
+static u32 marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
+{
+ u32 reg;
+
+ reg = readl_relaxed(nfc->regs + NDSR);
+ writel_relaxed(int_mask, nfc->regs + NDSR);
+
+ return reg & int_mask;
+}
+
+static void marvell_nfc_force_byte_access(struct nand_chip *chip,
+ bool force_8bit)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr;
+
+ /*
+ * Callers of this function do not verify if the NAND is using a 16-bit
+ * an 8-bit bus for normal operations, so we need to take care of that
+ * here by leaving the configuration unchanged if the NAND does not have
+ * the NAND_BUSWIDTH_16 flag set.
+ */
+ if (!(chip->options & NAND_BUSWIDTH_16))
+ return;
+
+ ndcr = readl_relaxed(nfc->regs + NDCR);
+
+ if (force_8bit)
+ ndcr &= ~(NDCR_DWIDTH_M | NDCR_DWIDTH_C);
+ else
+ ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
+
+ writel_relaxed(ndcr, nfc->regs + NDCR);
+}
+
+static int marvell_nfc_wait_ndrun(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 val;
+ int ret;
+
+ /*
+ * The command is being processed, wait for the ND_RUN bit to be
+ * cleared by the NFC. If not, we must clear it by hand.
+ */
+ ret = readl_relaxed_poll_timeout(nfc->regs + NDCR, val,
+ (val & NDCR_ND_RUN) == 0,
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret) {
+ dev_err(nfc->dev, "Timeout on NAND controller run mode\n");
+ writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
+ nfc->regs + NDCR);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Any time a command has to be sent to the controller, the following sequence
+ * has to be followed:
+ * - call marvell_nfc_prepare_cmd()
+ * -> activate the ND_RUN bit that will kind of 'start a job'
+ * -> wait the signal indicating the NFC is waiting for a command
+ * - send the command (cmd and address cycles)
+ * - enventually send or receive the data
+ * - call marvell_nfc_end_cmd() with the corresponding flag
+ * -> wait the flag to be triggered or cancel the job with a timeout
+ *
+ * The following helpers are here to factorize the code a bit so that
+ * specialized functions responsible for executing the actual NAND
+ * operations do not have to replicate the same code blocks.
+ */
+static int marvell_nfc_prepare_cmd(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr, val;
+ int ret;
+
+ /* Poll ND_RUN and clear NDSR before issuing any command */
+ ret = marvell_nfc_wait_ndrun(chip);
+ if (ret) {
+ dev_err(nfc->dev, "Last operation did not succeed\n");
+ return ret;
+ }
+
+ ndcr = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(readl(nfc->regs + NDSR), nfc->regs + NDSR);
+
+ /* Assert ND_RUN bit and wait the NFC to be ready */
+ writel_relaxed(ndcr | NDCR_ND_RUN, nfc->regs + NDCR);
+ ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val,
+ val & NDSR_WRCMDREQ,
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret) {
+ dev_err(nfc->dev, "Timeout on WRCMDRE\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Command may be written, clear WRCMDREQ status bit */
+ writel_relaxed(NDSR_WRCMDREQ, nfc->regs + NDSR);
+
+ return 0;
+}
+
+static void marvell_nfc_send_cmd(struct nand_chip *chip,
+ struct marvell_nfc_op *nfc_op)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ dev_dbg(nfc->dev, "\nNDCR: 0x%08x\n"
+ "NDCB0: 0x%08x\nNDCB1: 0x%08x\nNDCB2: 0x%08x\nNDCB3: 0x%08x\n",
+ (u32)readl_relaxed(nfc->regs + NDCR), nfc_op->ndcb[0],
+ nfc_op->ndcb[1], nfc_op->ndcb[2], nfc_op->ndcb[3]);
+
+ writel_relaxed(to_nand_sel(marvell_nand)->ndcb0_csel | nfc_op->ndcb[0],
+ nfc->regs + NDCB0);
+ writel_relaxed(nfc_op->ndcb[1], nfc->regs + NDCB0);
+ writel(nfc_op->ndcb[2], nfc->regs + NDCB0);
+
+ /*
+ * Write NDCB0 four times only if LEN_OVRD is set or if ADDR6 or ADDR7
+ * fields are used (only available on NFCv2).
+ */
+ if (nfc_op->ndcb[0] & NDCB0_LEN_OVRD ||
+ NDCB0_ADDR_GET_NUM_CYC(nfc_op->ndcb[0]) >= 6) {
+ if (!WARN_ON_ONCE(!nfc->caps->is_nfcv2))
+ writel(nfc_op->ndcb[3], nfc->regs + NDCB0);
+ }
+}
+
+static int marvell_nfc_end_cmd(struct nand_chip *chip, int flag,
+ const char *label)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 val;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val,
+ val & flag,
+ POLL_PERIOD, POLL_TIMEOUT);
+
+ if (ret) {
+ dev_err(nfc->dev, "Timeout on %s (NDSR: 0x%08x)\n",
+ label, val);
+ if (nfc->dma_chan)
+ dmaengine_terminate_all(nfc->dma_chan);
+ return ret;
+ }
+
+ /*
+ * DMA function uses this helper to poll on CMDD bits without wanting
+ * them to be cleared.
+ */
+ if (nfc->use_dma && (readl_relaxed(nfc->regs + NDCR) & NDCR_DMA_EN))
+ return 0;
+
+ writel_relaxed(flag, nfc->regs + NDSR);
+
+ return 0;
+}
+
+static int marvell_nfc_wait_cmdd(struct nand_chip *chip)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ int cs_flag = NDSR_CMDD(to_nand_sel(marvell_nand)->ndcb0_csel);
+
+ return marvell_nfc_end_cmd(chip, cs_flag, "CMDD");
+}
+
+static int marvell_nfc_poll_status(struct marvell_nfc *nfc, u32 mask,
+ u32 expected_val, unsigned long timeout_ms)
+{
+ unsigned long limit;
+ u32 st;
+
+ limit = jiffies + msecs_to_jiffies(timeout_ms);
+ do {
+ st = readl_relaxed(nfc->regs + NDSR);
+ if (st & NDSR_RDY(1))
+ st |= NDSR_RDY(0);
+
+ if ((st & mask) == expected_val)
+ return 0;
+
+ cpu_relax();
+ } while (time_after(limit, jiffies));
+
+ return -ETIMEDOUT;
+}
+
+static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 pending;
+ int ret;
+
+ /* Timeout is expressed in ms */
+ if (!timeout_ms)
+ timeout_ms = IRQ_TIMEOUT;
+
+ if (mtd->oops_panic_write) {
+ ret = marvell_nfc_poll_status(nfc, NDSR_RDY(0),
+ NDSR_RDY(0),
+ timeout_ms);
+ } else {
+ init_completion(&nfc->complete);
+
+ marvell_nfc_enable_int(nfc, NDCR_RDYM);
+ ret = wait_for_completion_timeout(&nfc->complete,
+ msecs_to_jiffies(timeout_ms));
+ marvell_nfc_disable_int(nfc, NDCR_RDYM);
+ }
+ pending = marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
+
+ /*
+ * In case the interrupt was not served in the required time frame,
+ * check if the ISR was not served or if something went actually wrong.
+ */
+ if (!ret && !pending) {
+ dev_err(nfc->dev, "Timeout waiting for RB signal\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void marvell_nfc_select_target(struct nand_chip *chip,
+ unsigned int die_nr)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr_generic;
+
+ /*
+ * Reset the NDCR register to a clean state for this particular chip,
+ * also clear ND_RUN bit.
+ */
+ ndcr_generic = readl_relaxed(nfc->regs + NDCR) &
+ NDCR_GENERIC_FIELDS_MASK & ~NDCR_ND_RUN;
+ writel_relaxed(ndcr_generic | marvell_nand->ndcr, nfc->regs + NDCR);
+
+ /* Also reset the interrupt status register */
+ marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
+
+ if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die)
+ return;
+
+ writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
+ writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
+
+ nfc->selected_chip = chip;
+ marvell_nand->selected_die = die_nr;
+}
+
+static irqreturn_t marvell_nfc_isr(int irq, void *dev_id)
+{
+ struct marvell_nfc *nfc = dev_id;
+ u32 st = readl_relaxed(nfc->regs + NDSR);
+ u32 ien = (~readl_relaxed(nfc->regs + NDCR)) & NDCR_ALL_INT;
+
+ /*
+ * RDY interrupt mask is one bit in NDCR while there are two status
+ * bit in NDSR (RDY[cs0/cs2] and RDY[cs1/cs3]).
+ */
+ if (st & NDSR_RDY(1))
+ st |= NDSR_RDY(0);
+
+ if (!(st & ien))
+ return IRQ_NONE;
+
+ marvell_nfc_disable_int(nfc, st & NDCR_ALL_INT);
+
+ if (st & (NDSR_RDY(0) | NDSR_RDY(1)))
+ complete(&nfc->complete);
+
+ return IRQ_HANDLED;
+}
+
+/* HW ECC related functions */
+static void marvell_nfc_enable_hw_ecc(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr = readl_relaxed(nfc->regs + NDCR);
+
+ if (!(ndcr & NDCR_ECC_EN)) {
+ writel_relaxed(ndcr | NDCR_ECC_EN, nfc->regs + NDCR);
+
+ /*
+ * When enabling BCH, set threshold to 0 to always know the
+ * number of corrected bitflips.
+ */
+ if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
+ writel_relaxed(NDECCCTRL_BCH_EN, nfc->regs + NDECCCTRL);
+ }
+}
+
+static void marvell_nfc_disable_hw_ecc(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr = readl_relaxed(nfc->regs + NDCR);
+
+ if (ndcr & NDCR_ECC_EN) {
+ writel_relaxed(ndcr & ~NDCR_ECC_EN, nfc->regs + NDCR);
+ if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
+ writel_relaxed(0, nfc->regs + NDECCCTRL);
+ }
+}
+
+/* DMA related helpers */
+static void marvell_nfc_enable_dma(struct marvell_nfc *nfc)
+{
+ u32 reg;
+
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg | NDCR_DMA_EN, nfc->regs + NDCR);
+}
+
+static void marvell_nfc_disable_dma(struct marvell_nfc *nfc)
+{
+ u32 reg;
+
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg & ~NDCR_DMA_EN, nfc->regs + NDCR);
+}
+
+/* Read/write PIO/DMA accessors */
+static int marvell_nfc_xfer_data_dma(struct marvell_nfc *nfc,
+ enum dma_data_direction direction,
+ unsigned int len)
+{
+ unsigned int dma_len = min_t(int, ALIGN(len, 32), MAX_CHUNK_SIZE);
+ struct dma_async_tx_descriptor *tx;
+ struct scatterlist sg;
+ dma_cookie_t cookie;
+ int ret;
+
+ marvell_nfc_enable_dma(nfc);
+ /* Prepare the DMA transfer */
+ sg_init_one(&sg, nfc->dma_buf, dma_len);
+ ret = dma_map_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
+ if (!ret) {
+ dev_err(nfc->dev, "Could not map DMA S/G list\n");
+ return -ENXIO;
+ }
+
+ tx = dmaengine_prep_slave_sg(nfc->dma_chan, &sg, 1,
+ direction == DMA_FROM_DEVICE ?
+ DMA_DEV_TO_MEM : DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(nfc->dev, "Could not prepare DMA S/G list\n");
+ dma_unmap_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
+ return -ENXIO;
+ }
+
+ /* Do the task and wait for it to finish */
+ cookie = dmaengine_submit(tx);
+ ret = dma_submit_error(cookie);
+ if (ret)
+ return -EIO;
+
+ dma_async_issue_pending(nfc->dma_chan);
+ ret = marvell_nfc_wait_cmdd(nfc->selected_chip);
+ dma_unmap_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
+ marvell_nfc_disable_dma(nfc);
+ if (ret) {
+ dev_err(nfc->dev, "Timeout waiting for DMA (status: %d)\n",
+ dmaengine_tx_status(nfc->dma_chan, cookie, NULL));
+ dmaengine_terminate_all(nfc->dma_chan);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_xfer_data_in_pio(struct marvell_nfc *nfc, u8 *in,
+ unsigned int len)
+{
+ unsigned int last_len = len % FIFO_DEPTH;
+ unsigned int last_full_offset = round_down(len, FIFO_DEPTH);
+ int i;
+
+ for (i = 0; i < last_full_offset; i += FIFO_DEPTH)
+ ioread32_rep(nfc->regs + NDDB, in + i, FIFO_REP(FIFO_DEPTH));
+
+ if (last_len) {
+ u8 tmp_buf[FIFO_DEPTH];
+
+ ioread32_rep(nfc->regs + NDDB, tmp_buf, FIFO_REP(FIFO_DEPTH));
+ memcpy(in + last_full_offset, tmp_buf, last_len);
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_xfer_data_out_pio(struct marvell_nfc *nfc, const u8 *out,
+ unsigned int len)
+{
+ unsigned int last_len = len % FIFO_DEPTH;
+ unsigned int last_full_offset = round_down(len, FIFO_DEPTH);
+ int i;
+
+ for (i = 0; i < last_full_offset; i += FIFO_DEPTH)
+ iowrite32_rep(nfc->regs + NDDB, out + i, FIFO_REP(FIFO_DEPTH));
+
+ if (last_len) {
+ u8 tmp_buf[FIFO_DEPTH];
+
+ memcpy(tmp_buf, out + last_full_offset, last_len);
+ iowrite32_rep(nfc->regs + NDDB, tmp_buf, FIFO_REP(FIFO_DEPTH));
+ }
+
+ return 0;
+}
+
+static void marvell_nfc_check_empty_chunk(struct nand_chip *chip,
+ u8 *data, int data_len,
+ u8 *spare, int spare_len,
+ u8 *ecc, int ecc_len,
+ unsigned int *max_bitflips)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int bf;
+
+ /*
+ * Blank pages (all 0xFF) that have not been written may be recognized
+ * as bad if bitflips occur, so whenever an uncorrectable error occurs,
+ * check if the entire page (with ECC bytes) is actually blank or not.
+ */
+ if (!data)
+ data_len = 0;
+ if (!spare)
+ spare_len = 0;
+ if (!ecc)
+ ecc_len = 0;
+
+ bf = nand_check_erased_ecc_chunk(data, data_len, ecc, ecc_len,
+ spare, spare_len, chip->ecc.strength);
+ if (bf < 0) {
+ mtd->ecc_stats.failed++;
+ return;
+ }
+
+ /* Update the stats and max_bitflips */
+ mtd->ecc_stats.corrected += bf;
+ *max_bitflips = max_t(unsigned int, *max_bitflips, bf);
+}
+
+/*
+ * Check if a chunk is correct or not according to the hardware ECC engine.
+ * mtd->ecc_stats.corrected is updated, as well as max_bitflips, however
+ * mtd->ecc_stats.failure is not, the function will instead return a non-zero
+ * value indicating that a check on the emptyness of the subpage must be
+ * performed before actually declaring the subpage as "corrupted".
+ */
+static int marvell_nfc_hw_ecc_check_bitflips(struct nand_chip *chip,
+ unsigned int *max_bitflips)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ int bf = 0;
+ u32 ndsr;
+
+ ndsr = readl_relaxed(nfc->regs + NDSR);
+
+ /* Check uncorrectable error flag */
+ if (ndsr & NDSR_UNCERR) {
+ writel_relaxed(ndsr, nfc->regs + NDSR);
+
+ /*
+ * Do not increment ->ecc_stats.failed now, instead, return a
+ * non-zero value to indicate that this chunk was apparently
+ * bad, and it should be check to see if it empty or not. If
+ * the chunk (with ECC bytes) is not declared empty, the calling
+ * function must increment the failure count.
+ */
+ return -EBADMSG;
+ }
+
+ /* Check correctable error flag */
+ if (ndsr & NDSR_CORERR) {
+ writel_relaxed(ndsr, nfc->regs + NDSR);
+
+ if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
+ bf = NDSR_ERRCNT(ndsr);
+ else
+ bf = 1;
+ }
+
+ /* Update the stats and max_bitflips */
+ mtd->ecc_stats.corrected += bf;
+ *max_bitflips = max_t(unsigned int, *max_bitflips, bf);
+
+ return 0;
+}
+
+/* Hamming read helpers */
+static int marvell_nfc_hw_ecc_hmg_do_read_page(struct nand_chip *chip,
+ u8 *data_buf, u8 *oob_buf,
+ bool raw, int page)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_READ) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_DBC |
+ NDCB0_CMD1(NAND_CMD_READ0) |
+ NDCB0_CMD2(NAND_CMD_READSTART),
+ .ndcb[1] = NDCB1_ADDRS_PAGE(page),
+ .ndcb[2] = NDCB2_ADDR5_PAGE(page),
+ };
+ unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
+ int ret;
+
+ /* NFCv2 needs more information about the operation being executed */
+ if (nfc->caps->is_nfcv2)
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while draining FIFO (data/oob)");
+ if (ret)
+ return ret;
+
+ /*
+ * Read the page then the OOB area. Unlike what is shown in current
+ * documentation, spare bytes are protected by the ECC engine, and must
+ * be at the beginning of the OOB area or running this driver on legacy
+ * systems will prevent the discovery of the BBM/BBT.
+ */
+ if (nfc->use_dma) {
+ marvell_nfc_xfer_data_dma(nfc, DMA_FROM_DEVICE,
+ lt->data_bytes + oob_bytes);
+ memcpy(data_buf, nfc->dma_buf, lt->data_bytes);
+ memcpy(oob_buf, nfc->dma_buf + lt->data_bytes, oob_bytes);
+ } else {
+ marvell_nfc_xfer_data_in_pio(nfc, data_buf, lt->data_bytes);
+ marvell_nfc_xfer_data_in_pio(nfc, oob_buf, oob_bytes);
+ }
+
+ ret = marvell_nfc_wait_cmdd(chip);
+ return ret;
+}
+
+static int marvell_nfc_hw_ecc_hmg_read_page_raw(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ marvell_nfc_select_target(chip, chip->cur_cs);
+ return marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi,
+ true, page);
+}
+
+static int marvell_nfc_hw_ecc_hmg_read_page(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ unsigned int full_sz = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
+ int max_bitflips = 0, ret;
+ u8 *raw_buf;
+
+ marvell_nfc_select_target(chip, chip->cur_cs);
+ marvell_nfc_enable_hw_ecc(chip);
+ marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi, false,
+ page);
+ ret = marvell_nfc_hw_ecc_check_bitflips(chip, &max_bitflips);
+ marvell_nfc_disable_hw_ecc(chip);
+
+ if (!ret)
+ return max_bitflips;
+
+ /*
+ * When ECC failures are detected, check if the full page has been
+ * written or not. Ignore the failure if it is actually empty.
+ */
+ raw_buf = kmalloc(full_sz, GFP_KERNEL);
+ if (!raw_buf)
+ return -ENOMEM;
+
+ marvell_nfc_hw_ecc_hmg_do_read_page(chip, raw_buf, raw_buf +
+ lt->data_bytes, true, page);
+ marvell_nfc_check_empty_chunk(chip, raw_buf, full_sz, NULL, 0, NULL, 0,
+ &max_bitflips);
+ kfree(raw_buf);
+
+ return max_bitflips;
+}
+
+/*
+ * Spare area in Hamming layouts is not protected by the ECC engine (even if
+ * it appears before the ECC bytes when reading), the ->read_oob_raw() function
+ * also stands for ->read_oob().
+ */
+static int marvell_nfc_hw_ecc_hmg_read_oob_raw(struct nand_chip *chip, int page)
+{
+ u8 *buf = nand_get_data_buf(chip);
+
+ marvell_nfc_select_target(chip, chip->cur_cs);
+ return marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi,
+ true, page);
+}
+
+/* Hamming write helpers */
+static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
+ const u8 *data_buf,
+ const u8 *oob_buf, bool raw,
+ int page)
+{
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_CMD1(NAND_CMD_SEQIN) |
+ NDCB0_CMD2(NAND_CMD_PAGEPROG) |
+ NDCB0_DBC,
+ .ndcb[1] = NDCB1_ADDRS_PAGE(page),
+ .ndcb[2] = NDCB2_ADDR5_PAGE(page),
+ };
+ unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
+ u8 status;
+ int ret;
+
+ /* NFCv2 needs more information about the operation being executed */
+ if (nfc->caps->is_nfcv2)
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_WRDREQ,
+ "WRDREQ while loading FIFO (data)");
+ if (ret)
+ return ret;
+
+ /* Write the page then the OOB area */
+ if (nfc->use_dma) {
+ memcpy(nfc->dma_buf, data_buf, lt->data_bytes);
+ memcpy(nfc->dma_buf + lt->data_bytes, oob_buf, oob_bytes);
+ marvell_nfc_xfer_data_dma(nfc, DMA_TO_DEVICE, lt->data_bytes +
+ lt->ecc_bytes + lt->spare_bytes);
+ } else {
+ marvell_nfc_xfer_data_out_pio(nfc, data_buf, lt->data_bytes);
+ marvell_nfc_xfer_data_out_pio(nfc, oob_buf, oob_bytes);
+ }
+
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ ret = marvell_nfc_wait_op(chip,
+ PSEC_TO_MSEC(sdr->tPROG_max));
+ if (ret)
+ return ret;
+
+ /* Check write status on the chip side */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+
+static int marvell_nfc_hw_ecc_hmg_write_page_raw(struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ marvell_nfc_select_target(chip, chip->cur_cs);
+ return marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
+ true, page);
+}
+
+static int marvell_nfc_hw_ecc_hmg_write_page(struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ int ret;
+
+ marvell_nfc_select_target(chip, chip->cur_cs);
+ marvell_nfc_enable_hw_ecc(chip);
+ ret = marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
+ false, page);
+ marvell_nfc_disable_hw_ecc(chip);
+
+ return ret;
+}
+
+/*
+ * Spare area in Hamming layouts is not protected by the ECC engine (even if
+ * it appears before the ECC bytes when reading), the ->write_oob_raw() function
+ * also stands for ->write_oob().
+ */
+static int marvell_nfc_hw_ecc_hmg_write_oob_raw(struct nand_chip *chip,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *buf = nand_get_data_buf(chip);
+
+ memset(buf, 0xFF, mtd->writesize);
+
+ marvell_nfc_select_target(chip, chip->cur_cs);
+ return marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
+ true, page);
+}
+
+/* BCH read helpers */
+static int marvell_nfc_hw_ecc_bch_read_page_raw(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ u8 *oob = chip->oob_poi;
+ int chunk_size = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
+ int ecc_offset = (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes;
+ int data_len = lt->data_bytes;
+ int spare_len = lt->spare_bytes;
+ int ecc_len = lt->ecc_bytes;
+ int chunk;
+
+ marvell_nfc_select_target(chip, chip->cur_cs);
+
+ if (oob_required)
+ memset(chip->oob_poi, 0xFF, mtd->oobsize);
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ /* Update last chunk length */
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ ecc_len = lt->last_ecc_bytes;
+ }
+
+ /* Read data bytes*/
+ nand_change_read_column_op(chip, chunk * chunk_size,
+ buf + (lt->data_bytes * chunk),
+ data_len, false);
+
+ /* Read spare bytes */
+ nand_read_data_op(chip, oob + (lt->spare_bytes * chunk),
+ spare_len, false, false);
+
+ /* Read ECC bytes */
+ nand_read_data_op(chip, oob + ecc_offset +
+ (ALIGN(lt->ecc_bytes, 32) * chunk),
+ ecc_len, false, false);
+ }
+
+ return 0;
+}
+
+static void marvell_nfc_hw_ecc_bch_read_chunk(struct nand_chip *chip, int chunk,
+ u8 *data, unsigned int data_len,
+ u8 *spare, unsigned int spare_len,
+ int page)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ int i, ret;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_READ) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_LEN_OVRD,
+ .ndcb[1] = NDCB1_ADDRS_PAGE(page),
+ .ndcb[2] = NDCB2_ADDR5_PAGE(page),
+ .ndcb[3] = data_len + spare_len,
+ };
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return;
+
+ if (chunk == 0)
+ nfc_op.ndcb[0] |= NDCB0_DBC |
+ NDCB0_CMD1(NAND_CMD_READ0) |
+ NDCB0_CMD2(NAND_CMD_READSTART);
+
+ /*
+ * Trigger the monolithic read on the first chunk, then naked read on
+ * intermediate chunks and finally a last naked read on the last chunk.
+ */
+ if (chunk == 0)
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
+ else if (chunk < lt->nchunks - 1)
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW);
+ else
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+
+ /*
+ * According to the datasheet, when reading from NDDB
+ * with BCH enabled, after each 32 bytes reads, we
+ * have to make sure that the NDSR.RDDREQ bit is set.
+ *
+ * Drain the FIFO, 8 32-bit reads at a time, and skip
+ * the polling on the last read.
+ *
+ * Length is a multiple of 32 bytes, hence it is a multiple of 8 too.
+ */
+ for (i = 0; i < data_len; i += FIFO_DEPTH * BCH_SEQ_READS) {
+ marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while draining FIFO (data)");
+ marvell_nfc_xfer_data_in_pio(nfc, data,
+ FIFO_DEPTH * BCH_SEQ_READS);
+ data += FIFO_DEPTH * BCH_SEQ_READS;
+ }
+
+ for (i = 0; i < spare_len; i += FIFO_DEPTH * BCH_SEQ_READS) {
+ marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while draining FIFO (OOB)");
+ marvell_nfc_xfer_data_in_pio(nfc, spare,
+ FIFO_DEPTH * BCH_SEQ_READS);
+ spare += FIFO_DEPTH * BCH_SEQ_READS;
+ }
+}
+
+static int marvell_nfc_hw_ecc_bch_read_page(struct nand_chip *chip,
+ u8 *buf, int oob_required,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ int data_len = lt->data_bytes, spare_len = lt->spare_bytes;
+ u8 *data = buf, *spare = chip->oob_poi;
+ int max_bitflips = 0;
+ u32 failure_mask = 0;
+ int chunk, ret;
+
+ marvell_nfc_select_target(chip, chip->cur_cs);
+
+ /*
+ * With BCH, OOB is not fully used (and thus not read entirely), not
+ * expected bytes could show up at the end of the OOB buffer if not
+ * explicitly erased.
+ */
+ if (oob_required)
+ memset(chip->oob_poi, 0xFF, mtd->oobsize);
+
+ marvell_nfc_enable_hw_ecc(chip);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ /* Update length for the last chunk */
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ }
+
+ /* Read the chunk and detect number of bitflips */
+ marvell_nfc_hw_ecc_bch_read_chunk(chip, chunk, data, data_len,
+ spare, spare_len, page);
+ ret = marvell_nfc_hw_ecc_check_bitflips(chip, &max_bitflips);
+ if (ret)
+ failure_mask |= BIT(chunk);
+
+ data += data_len;
+ spare += spare_len;
+ }
+
+ marvell_nfc_disable_hw_ecc(chip);
+
+ if (!failure_mask)
+ return max_bitflips;
+
+ /*
+ * Please note that dumping the ECC bytes during a normal read with OOB
+ * area would add a significant overhead as ECC bytes are "consumed" by
+ * the controller in normal mode and must be re-read in raw mode. To
+ * avoid dropping the performances, we prefer not to include them. The
+ * user should re-read the page in raw mode if ECC bytes are required.
+ */
+
+ /*
+ * In case there is any subpage read error, we usually re-read only ECC
+ * bytes in raw mode and check if the whole page is empty. In this case,
+ * it is normal that the ECC check failed and we just ignore the error.
+ *
+ * However, it has been empirically observed that for some layouts (e.g
+ * 2k page, 8b strength per 512B chunk), the controller tries to correct
+ * bits and may create itself bitflips in the erased area. To overcome
+ * this strange behavior, the whole page is re-read in raw mode, not
+ * only the ECC bytes.
+ */
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ int data_off_in_page, spare_off_in_page, ecc_off_in_page;
+ int data_off, spare_off, ecc_off;
+ int data_len, spare_len, ecc_len;
+
+ /* No failure reported for this chunk, move to the next one */
+ if (!(failure_mask & BIT(chunk)))
+ continue;
+
+ data_off_in_page = chunk * (lt->data_bytes + lt->spare_bytes +
+ lt->ecc_bytes);
+ spare_off_in_page = data_off_in_page +
+ (chunk < lt->full_chunk_cnt ? lt->data_bytes :
+ lt->last_data_bytes);
+ ecc_off_in_page = spare_off_in_page +
+ (chunk < lt->full_chunk_cnt ? lt->spare_bytes :
+ lt->last_spare_bytes);
+
+ data_off = chunk * lt->data_bytes;
+ spare_off = chunk * lt->spare_bytes;
+ ecc_off = (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes +
+ (chunk * (lt->ecc_bytes + 2));
+
+ data_len = chunk < lt->full_chunk_cnt ? lt->data_bytes :
+ lt->last_data_bytes;
+ spare_len = chunk < lt->full_chunk_cnt ? lt->spare_bytes :
+ lt->last_spare_bytes;
+ ecc_len = chunk < lt->full_chunk_cnt ? lt->ecc_bytes :
+ lt->last_ecc_bytes;
+
+ /*
+ * Only re-read the ECC bytes, unless we are using the 2k/8b
+ * layout which is buggy in the sense that the ECC engine will
+ * try to correct data bytes anyway, creating bitflips. In this
+ * case, re-read the entire page.
+ */
+ if (lt->writesize == 2048 && lt->strength == 8) {
+ nand_change_read_column_op(chip, data_off_in_page,
+ buf + data_off, data_len,
+ false);
+ nand_change_read_column_op(chip, spare_off_in_page,
+ chip->oob_poi + spare_off, spare_len,
+ false);
+ }
+
+ nand_change_read_column_op(chip, ecc_off_in_page,
+ chip->oob_poi + ecc_off, ecc_len,
+ false);
+
+ /* Check the entire chunk (data + spare + ecc) for emptyness */
+ marvell_nfc_check_empty_chunk(chip, buf + data_off, data_len,
+ chip->oob_poi + spare_off, spare_len,
+ chip->oob_poi + ecc_off, ecc_len,
+ &max_bitflips);
+ }
+
+ return max_bitflips;
+}
+
+static int marvell_nfc_hw_ecc_bch_read_oob_raw(struct nand_chip *chip, int page)
+{
+ u8 *buf = nand_get_data_buf(chip);
+
+ return chip->ecc.read_page_raw(chip, buf, true, page);
+}
+
+static int marvell_nfc_hw_ecc_bch_read_oob(struct nand_chip *chip, int page)
+{
+ u8 *buf = nand_get_data_buf(chip);
+
+ return chip->ecc.read_page(chip, buf, true, page);
+}
+
+/* BCH write helpers */
+static int marvell_nfc_hw_ecc_bch_write_page_raw(struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ int full_chunk_size = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
+ int data_len = lt->data_bytes;
+ int spare_len = lt->spare_bytes;
+ int ecc_len = lt->ecc_bytes;
+ int spare_offset = 0;
+ int ecc_offset = (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes;
+ int chunk;
+
+ marvell_nfc_select_target(chip, chip->cur_cs);
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ ecc_len = lt->last_ecc_bytes;
+ }
+
+ /* Point to the column of the next chunk */
+ nand_change_write_column_op(chip, chunk * full_chunk_size,
+ NULL, 0, false);
+
+ /* Write the data */
+ nand_write_data_op(chip, buf + (chunk * lt->data_bytes),
+ data_len, false);
+
+ if (!oob_required)
+ continue;
+
+ /* Write the spare bytes */
+ if (spare_len)
+ nand_write_data_op(chip, chip->oob_poi + spare_offset,
+ spare_len, false);
+
+ /* Write the ECC bytes */
+ if (ecc_len)
+ nand_write_data_op(chip, chip->oob_poi + ecc_offset,
+ ecc_len, false);
+
+ spare_offset += spare_len;
+ ecc_offset += ALIGN(ecc_len, 32);
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int
+marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk,
+ const u8 *data, unsigned int data_len,
+ const u8 *spare, unsigned int spare_len,
+ int page)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ u32 xtype;
+ int ret;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) | NDCB0_LEN_OVRD,
+ .ndcb[3] = data_len + spare_len,
+ };
+
+ /*
+ * First operation dispatches the CMD_SEQIN command, issue the address
+ * cycles and asks for the first chunk of data.
+ * All operations in the middle (if any) will issue a naked write and
+ * also ask for data.
+ * Last operation (if any) asks for the last chunk of data through a
+ * last naked write.
+ */
+ if (chunk == 0) {
+ if (lt->nchunks == 1)
+ xtype = XTYPE_MONOLITHIC_RW;
+ else
+ xtype = XTYPE_WRITE_DISPATCH;
+
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(xtype) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_CMD1(NAND_CMD_SEQIN);
+ nfc_op.ndcb[1] |= NDCB1_ADDRS_PAGE(page);
+ nfc_op.ndcb[2] |= NDCB2_ADDR5_PAGE(page);
+ } else if (chunk < lt->nchunks - 1) {
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW);
+ } else {
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+ }
+
+ /* Always dispatch the PAGEPROG command on the last chunk */
+ if (chunk == lt->nchunks - 1)
+ nfc_op.ndcb[0] |= NDCB0_CMD2(NAND_CMD_PAGEPROG) | NDCB0_DBC;
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_WRDREQ,
+ "WRDREQ while loading FIFO (data)");
+ if (ret)
+ return ret;
+
+ /* Transfer the contents */
+ iowrite32_rep(nfc->regs + NDDB, data, FIFO_REP(data_len));
+ iowrite32_rep(nfc->regs + NDDB, spare, FIFO_REP(spare_len));
+
+ return 0;
+}
+
+static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ const u8 *data = buf;
+ const u8 *spare = chip->oob_poi;
+ int data_len = lt->data_bytes;
+ int spare_len = lt->spare_bytes;
+ int chunk, ret;
+ u8 status;
+
+ marvell_nfc_select_target(chip, chip->cur_cs);
+
+ /* Spare data will be written anyway, so clear it to avoid garbage */
+ if (!oob_required)
+ memset(chip->oob_poi, 0xFF, mtd->oobsize);
+
+ marvell_nfc_enable_hw_ecc(chip);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ }
+
+ marvell_nfc_hw_ecc_bch_write_chunk(chip, chunk, data, data_len,
+ spare, spare_len, page);
+ data += data_len;
+ spare += spare_len;
+
+ /*
+ * Waiting only for CMDD or PAGED is not enough, ECC are
+ * partially written. No flag is set once the operation is
+ * really finished but the ND_RUN bit is cleared, so wait for it
+ * before stepping into the next command.
+ */
+ marvell_nfc_wait_ndrun(chip);
+ }
+
+ ret = marvell_nfc_wait_op(chip, PSEC_TO_MSEC(sdr->tPROG_max));
+
+ marvell_nfc_disable_hw_ecc(chip);
+
+ if (ret)
+ return ret;
+
+ /* Check write status on the chip side */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+
+static int marvell_nfc_hw_ecc_bch_write_oob_raw(struct nand_chip *chip,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *buf = nand_get_data_buf(chip);
+
+ memset(buf, 0xFF, mtd->writesize);
+
+ return chip->ecc.write_page_raw(chip, buf, true, page);
+}
+
+static int marvell_nfc_hw_ecc_bch_write_oob(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *buf = nand_get_data_buf(chip);
+
+ memset(buf, 0xFF, mtd->writesize);
+
+ return chip->ecc.write_page(chip, buf, true, page);
+}
+
+/* NAND framework ->exec_op() hooks and related helpers */
+static void marvell_nfc_parse_instructions(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct marvell_nfc_op *nfc_op)
+{
+ const struct nand_op_instr *instr = NULL;
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ bool first_cmd = true;
+ unsigned int op_id;
+ int i;
+
+ /* Reset the input structure as most of its fields will be OR'ed */
+ memset(nfc_op, 0, sizeof(struct marvell_nfc_op));
+
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ unsigned int offset, naddrs;
+ const u8 *addrs;
+ int len;
+
+ instr = &subop->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (first_cmd)
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD1(instr->ctx.cmd.opcode);
+ else
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD2(instr->ctx.cmd.opcode) |
+ NDCB0_DBC;
+
+ nfc_op->cle_ale_delay_ns = instr->delay_ns;
+ first_cmd = false;
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+
+ nfc_op->ndcb[0] |= NDCB0_ADDR_CYC(naddrs);
+
+ for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
+ nfc_op->ndcb[1] |= addrs[i] << (8 * i);
+
+ if (naddrs >= 5)
+ nfc_op->ndcb[2] |= NDCB2_ADDR5_CYC(addrs[4]);
+ if (naddrs >= 6)
+ nfc_op->ndcb[3] |= NDCB3_ADDR6_CYC(addrs[5]);
+ if (naddrs == 7)
+ nfc_op->ndcb[3] |= NDCB3_ADDR7_CYC(addrs[6]);
+
+ nfc_op->cle_ale_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ nfc_op->ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ);
+ if (nfc->caps->is_nfcv2) {
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
+ NDCB0_LEN_OVRD;
+ len = nand_subop_get_data_len(subop, op_id);
+ nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
+ }
+ nfc_op->data_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ nfc_op->ndcb[0] |= NDCB0_CMD_TYPE(TYPE_WRITE);
+ if (nfc->caps->is_nfcv2) {
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
+ NDCB0_LEN_OVRD;
+ len = nand_subop_get_data_len(subop, op_id);
+ nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
+ }
+ nfc_op->data_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
+ nfc_op->rdy_delay_ns = instr->delay_ns;
+ break;
+ }
+ }
+}
+
+static int marvell_nfc_xfer_data_pio(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct marvell_nfc_op *nfc_op)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct nand_op_instr *instr = nfc_op->data_instr;
+ unsigned int op_id = nfc_op->data_instr_idx;
+ unsigned int len = nand_subop_get_data_len(subop, op_id);
+ unsigned int offset = nand_subop_get_data_start_off(subop, op_id);
+ bool reading = (instr->type == NAND_OP_DATA_IN_INSTR);
+ int ret;
+
+ if (instr->ctx.data.force_8bit)
+ marvell_nfc_force_byte_access(chip, true);
+
+ if (reading) {
+ u8 *in = instr->ctx.data.buf.in + offset;
+
+ ret = marvell_nfc_xfer_data_in_pio(nfc, in, len);
+ } else {
+ const u8 *out = instr->ctx.data.buf.out + offset;
+
+ ret = marvell_nfc_xfer_data_out_pio(nfc, out, len);
+ }
+
+ if (instr->ctx.data.force_8bit)
+ marvell_nfc_force_byte_access(chip, false);
+
+ return ret;
+}
+
+static int marvell_nfc_monolithic_access_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ bool reading;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ reading = (nfc_op.data_instr->type == NAND_OP_DATA_IN_INSTR);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ,
+ "RDDREQ/WRDREQ while draining raw data");
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ if (reading) {
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+ }
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.data_delay_ns);
+
+ if (!reading) {
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+ }
+
+ /*
+ * NDCR ND_RUN bit should be cleared automatically at the end of each
+ * operation but experience shows that the behavior is buggy when it
+ * comes to writes (with LEN_OVRD). Clear it by hand in this case.
+ */
+ if (!reading) {
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
+ nfc->regs + NDCR);
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_naked_access_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+
+ /*
+ * Naked access are different in that they need to be flagged as naked
+ * by the controller. Reset the controller registers fields that inform
+ * on the type and refill them according to the ongoing operation.
+ */
+ nfc_op.ndcb[0] &= ~(NDCB0_CMD_TYPE(TYPE_MASK) |
+ NDCB0_CMD_XTYPE(XTYPE_MASK));
+ switch (subop->instrs[0].type) {
+ case NAND_OP_CMD_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_CMD);
+ break;
+ case NAND_OP_ADDR_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_ADDR);
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ) |
+ NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_WRITE) |
+ NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+ break;
+ default:
+ /* This should never happen */
+ break;
+ }
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+
+ if (!nfc_op.data_instr) {
+ ret = marvell_nfc_wait_cmdd(chip);
+ cond_delay(nfc_op.cle_ale_delay_ns);
+ return ret;
+ }
+
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ,
+ "RDDREQ/WRDREQ while draining raw data");
+ if (ret)
+ return ret;
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ /*
+ * NDCR ND_RUN bit should be cleared automatically at the end of each
+ * operation but experience shows that the behavior is buggy when it
+ * comes to writes (with LEN_OVRD). Clear it by hand in this case.
+ */
+ if (subop->instrs[0].type == NAND_OP_DATA_OUT_INSTR) {
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
+ nfc->regs + NDCR);
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_naked_waitrdy_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ return ret;
+}
+
+static int marvell_nfc_read_id_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] &= ~NDCB0_CMD_TYPE(TYPE_READ);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ_ID);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while reading ID");
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.data_delay_ns);
+
+ return 0;
+}
+
+static int marvell_nfc_read_status_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] &= ~NDCB0_CMD_TYPE(TYPE_READ);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_STATUS);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while reading status");
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.data_delay_ns);
+
+ return 0;
+}
+
+static int marvell_nfc_reset_cmd_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_RESET);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ return 0;
+}
+
+static int marvell_nfc_erase_cmd_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_ERASE);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ return 0;
+}
+
+static const struct nand_op_parser marvell_nfcv2_op_parser = NAND_OP_PARSER(
+ /* Monolithic reads/writes */
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_monolithic_access_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYC_NFCV2),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_monolithic_access_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV2),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_CHUNK_SIZE),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ /* Naked commands */
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV2)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_waitrdy_exec,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ );
+
+static const struct nand_op_parser marvell_nfcv1_op_parser = NAND_OP_PARSER(
+ /* Naked commands not supported, use a function for each pattern */
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_read_id_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV1),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_erase_cmd_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV1),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_read_status_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_reset_cmd_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_waitrdy_exec,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ );
+
+static int marvell_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ if (!check_only)
+ marvell_nfc_select_target(chip, op->cs);
+
+ if (nfc->caps->is_nfcv2)
+ return nand_op_parser_exec_op(chip, &marvell_nfcv2_op_parser,
+ op, check_only);
+ else
+ return nand_op_parser_exec_op(chip, &marvell_nfcv1_op_parser,
+ op, check_only);
+}
+
+/*
+ * Layouts were broken in old pxa3xx_nand driver, these are supposed to be
+ * usable.
+ */
+static int marvell_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = (lt->full_chunk_cnt * lt->ecc_bytes) +
+ lt->last_ecc_bytes;
+ oobregion->offset = mtd->oobsize - oobregion->length;
+
+ return 0;
+}
+
+static int marvell_nand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+
+ if (section)
+ return -ERANGE;
+
+ /*
+ * Bootrom looks in bytes 0 & 5 for bad blocks for the
+ * 4KB page / 4bit BCH combination.
+ */
+ if (mtd->writesize == SZ_4K && lt->data_bytes == SZ_2K)
+ oobregion->offset = 6;
+ else
+ oobregion->offset = 2;
+
+ oobregion->length = (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops marvell_nand_ooblayout_ops = {
+ .ecc = marvell_nand_ooblayout_ecc,
+ .free = marvell_nand_ooblayout_free,
+};
+
+static int marvell_nand_hw_ecc_controller_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *l;
+ int i;
+
+ if (!nfc->caps->is_nfcv2 &&
+ (mtd->writesize + mtd->oobsize > MAX_CHUNK_SIZE)) {
+ dev_err(nfc->dev,
+ "NFCv1: writesize (%d) cannot be bigger than a chunk (%d)\n",
+ mtd->writesize, MAX_CHUNK_SIZE - mtd->oobsize);
+ return -ENOTSUPP;
+ }
+
+ to_marvell_nand(chip)->layout = NULL;
+ for (i = 0; i < ARRAY_SIZE(marvell_nfc_layouts); i++) {
+ l = &marvell_nfc_layouts[i];
+ if (mtd->writesize == l->writesize &&
+ ecc->size == l->chunk && ecc->strength == l->strength) {
+ to_marvell_nand(chip)->layout = l;
+ break;
+ }
+ }
+
+ if (!to_marvell_nand(chip)->layout ||
+ (!nfc->caps->is_nfcv2 && ecc->strength > 1)) {
+ dev_err(nfc->dev,
+ "ECC strength %d at page size %d is not supported\n",
+ ecc->strength, mtd->writesize);
+ return -ENOTSUPP;
+ }
+
+ /* Special care for the layout 2k/8-bit/512B */
+ if (l->writesize == 2048 && l->strength == 8) {
+ if (mtd->oobsize < 128) {
+ dev_err(nfc->dev, "Requested layout needs at least 128 OOB bytes\n");
+ return -ENOTSUPP;
+ } else {
+ chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
+ }
+ }
+
+ mtd_set_ooblayout(mtd, &marvell_nand_ooblayout_ops);
+ ecc->steps = l->nchunks;
+ ecc->size = l->data_bytes;
+
+ if (ecc->strength == 1) {
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+ ecc->read_page_raw = marvell_nfc_hw_ecc_hmg_read_page_raw;
+ ecc->read_page = marvell_nfc_hw_ecc_hmg_read_page;
+ ecc->read_oob_raw = marvell_nfc_hw_ecc_hmg_read_oob_raw;
+ ecc->read_oob = ecc->read_oob_raw;
+ ecc->write_page_raw = marvell_nfc_hw_ecc_hmg_write_page_raw;
+ ecc->write_page = marvell_nfc_hw_ecc_hmg_write_page;
+ ecc->write_oob_raw = marvell_nfc_hw_ecc_hmg_write_oob_raw;
+ ecc->write_oob = ecc->write_oob_raw;
+ } else {
+ chip->ecc.algo = NAND_ECC_ALGO_BCH;
+ ecc->strength = 16;
+ ecc->read_page_raw = marvell_nfc_hw_ecc_bch_read_page_raw;
+ ecc->read_page = marvell_nfc_hw_ecc_bch_read_page;
+ ecc->read_oob_raw = marvell_nfc_hw_ecc_bch_read_oob_raw;
+ ecc->read_oob = marvell_nfc_hw_ecc_bch_read_oob;
+ ecc->write_page_raw = marvell_nfc_hw_ecc_bch_write_page_raw;
+ ecc->write_page = marvell_nfc_hw_ecc_bch_write_page;
+ ecc->write_oob_raw = marvell_nfc_hw_ecc_bch_write_oob_raw;
+ ecc->write_oob = marvell_nfc_hw_ecc_bch_write_oob;
+ }
+
+ return 0;
+}
+
+static int marvell_nand_ecc_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&chip->base);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ int ret;
+
+ if (ecc->engine_type != NAND_ECC_ENGINE_TYPE_NONE &&
+ (!ecc->size || !ecc->strength)) {
+ if (requirements->step_size && requirements->strength) {
+ ecc->size = requirements->step_size;
+ ecc->strength = requirements->strength;
+ } else {
+ dev_info(nfc->dev,
+ "No minimum ECC strength, using 1b/512B\n");
+ ecc->size = 512;
+ ecc->strength = 1;
+ }
+ }
+
+ switch (ecc->engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ ret = marvell_nand_hw_ecc_controller_init(mtd, ecc);
+ if (ret)
+ return ret;
+ break;
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
+ if (!nfc->caps->is_nfcv2 && mtd->writesize != SZ_512 &&
+ mtd->writesize != SZ_2K) {
+ dev_err(nfc->dev, "NFCv1 cannot write %d bytes pages\n",
+ mtd->writesize);
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
+static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 8,
+ .len = 6,
+ .veroffs = 14,
+ .maxblocks = 8, /* Last 8 blocks in each chip */
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 8,
+ .len = 6,
+ .veroffs = 14,
+ .maxblocks = 8, /* Last 8 blocks in each chip */
+ .pattern = bbt_mirror_pattern
+};
+
+static int marvell_nfc_setup_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_interface_config *conf)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ unsigned int period_ns = 1000000000 / clk_get_rate(nfc->core_clk) * 2;
+ const struct nand_sdr_timings *sdr;
+ struct marvell_nfc_timings nfc_tmg;
+ int read_delay;
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ if (nfc->caps->max_mode_number && nfc->caps->max_mode_number < conf->timings.mode)
+ return -EOPNOTSUPP;
+
+ /*
+ * SDR timings are given in pico-seconds while NFC timings must be
+ * expressed in NAND controller clock cycles, which is half of the
+ * frequency of the accessible ECC clock retrieved by clk_get_rate().
+ * This is not written anywhere in the datasheet but was observed
+ * with an oscilloscope.
+ *
+ * NFC datasheet gives equations from which thoses calculations
+ * are derived, they tend to be slightly more restrictives than the
+ * given core timings and may improve the overall speed.
+ */
+ nfc_tmg.tRP = TO_CYCLES(DIV_ROUND_UP(sdr->tRC_min, 2), period_ns) - 1;
+ nfc_tmg.tRH = nfc_tmg.tRP;
+ nfc_tmg.tWP = TO_CYCLES(DIV_ROUND_UP(sdr->tWC_min, 2), period_ns) - 1;
+ nfc_tmg.tWH = nfc_tmg.tWP;
+ nfc_tmg.tCS = TO_CYCLES(sdr->tCS_min, period_ns);
+ nfc_tmg.tCH = TO_CYCLES(sdr->tCH_min, period_ns) - 1;
+ nfc_tmg.tADL = TO_CYCLES(sdr->tADL_min, period_ns);
+ /*
+ * Read delay is the time of propagation from SoC pins to NFC internal
+ * logic. With non-EDO timings, this is MIN_RD_DEL_CNT clock cycles. In
+ * EDO mode, an additional delay of tRH must be taken into account so
+ * the data is sampled on the falling edge instead of the rising edge.
+ */
+ read_delay = sdr->tRC_min >= 30000 ?
+ MIN_RD_DEL_CNT : MIN_RD_DEL_CNT + nfc_tmg.tRH;
+
+ nfc_tmg.tAR = TO_CYCLES(sdr->tAR_min, period_ns);
+ /*
+ * tWHR and tRHW are supposed to be read to write delays (and vice
+ * versa) but in some cases, ie. when doing a change column, they must
+ * be greater than that to be sure tCCS delay is respected.
+ */
+ nfc_tmg.tWHR = TO_CYCLES(max_t(int, sdr->tWHR_min, sdr->tCCS_min),
+ period_ns) - 2;
+ nfc_tmg.tRHW = TO_CYCLES(max_t(int, sdr->tRHW_min, sdr->tCCS_min),
+ period_ns);
+
+ /*
+ * NFCv2: Use WAIT_MODE (wait for RB line), do not rely only on delays.
+ * NFCv1: No WAIT_MODE, tR must be maximal.
+ */
+ if (nfc->caps->is_nfcv2) {
+ nfc_tmg.tR = TO_CYCLES(sdr->tWB_max, period_ns);
+ } else {
+ nfc_tmg.tR = TO_CYCLES64(sdr->tWB_max + sdr->tR_max,
+ period_ns);
+ if (nfc_tmg.tR + 3 > nfc_tmg.tCH)
+ nfc_tmg.tR = nfc_tmg.tCH - 3;
+ else
+ nfc_tmg.tR = 0;
+ }
+
+ if (chipnr < 0)
+ return 0;
+
+ marvell_nand->ndtr0 =
+ NDTR0_TRP(nfc_tmg.tRP) |
+ NDTR0_TRH(nfc_tmg.tRH) |
+ NDTR0_ETRP(nfc_tmg.tRP) |
+ NDTR0_TWP(nfc_tmg.tWP) |
+ NDTR0_TWH(nfc_tmg.tWH) |
+ NDTR0_TCS(nfc_tmg.tCS) |
+ NDTR0_TCH(nfc_tmg.tCH);
+
+ marvell_nand->ndtr1 =
+ NDTR1_TAR(nfc_tmg.tAR) |
+ NDTR1_TWHR(nfc_tmg.tWHR) |
+ NDTR1_TR(nfc_tmg.tR);
+
+ if (nfc->caps->is_nfcv2) {
+ marvell_nand->ndtr0 |=
+ NDTR0_RD_CNT_DEL(read_delay) |
+ NDTR0_SELCNTR |
+ NDTR0_TADL(nfc_tmg.tADL);
+
+ marvell_nand->ndtr1 |=
+ NDTR1_TRHW(nfc_tmg.tRHW) |
+ NDTR1_WAIT_MODE;
+ }
+
+ /*
+ * Reset nfc->selected_chip so the next command will cause the timing
+ * registers to be updated in marvell_nfc_select_target().
+ */
+ nfc->selected_chip = NULL;
+
+ return 0;
+}
+
+static int marvell_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(nfc->dev);
+ int ret;
+
+ if (pdata && pdata->flash_bbt)
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH) {
+ /*
+ * We'll use a bad block table stored in-flash and don't
+ * allow writing the bad block marker to the flash.
+ */
+ chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+ }
+
+ /* Save the chip-specific fields of NDCR */
+ marvell_nand->ndcr = NDCR_PAGE_SZ(mtd->writesize);
+ if (chip->options & NAND_BUSWIDTH_16)
+ marvell_nand->ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
+
+ /*
+ * On small page NANDs, only one cycle is needed to pass the
+ * column address.
+ */
+ if (mtd->writesize <= 512) {
+ marvell_nand->addr_cyc = 1;
+ } else {
+ marvell_nand->addr_cyc = 2;
+ marvell_nand->ndcr |= NDCR_RA_START;
+ }
+
+ /*
+ * Now add the number of cycles needed to pass the row
+ * address.
+ *
+ * Addressing a chip using CS 2 or 3 should also need the third row
+ * cycle but due to inconsistance in the documentation and lack of
+ * hardware to test this situation, this case is not supported.
+ */
+ if (chip->options & NAND_ROW_ADDR_3)
+ marvell_nand->addr_cyc += 3;
+ else
+ marvell_nand->addr_cyc += 2;
+
+ if (pdata) {
+ chip->ecc.size = pdata->ecc_step_size;
+ chip->ecc.strength = pdata->ecc_strength;
+ }
+
+ ret = marvell_nand_ecc_init(mtd, &chip->ecc);
+ if (ret) {
+ dev_err(nfc->dev, "ECC init failed: %d\n", ret);
+ return ret;
+ }
+
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
+ /*
+ * Subpage write not available with hardware ECC, prohibit also
+ * subpage read as in userspace subpage access would still be
+ * allowed and subpage write, if used, would lead to numerous
+ * uncorrectable ECC errors.
+ */
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ }
+
+ if (pdata || nfc->caps->legacy_of_bindings) {
+ /*
+ * We keep the MTD name unchanged to avoid breaking platforms
+ * where the MTD cmdline parser is used and the bootloader
+ * has not been updated to use the new naming scheme.
+ */
+ mtd->name = "pxa3xx_nand-0";
+ } else if (!mtd->name) {
+ /*
+ * If the new bindings are used and the bootloader has not been
+ * updated to pass a new mtdparts parameter on the cmdline, you
+ * should define the following property in your NAND node, ie:
+ *
+ * label = "main-storage";
+ *
+ * This way, mtd->name will be set by the core when
+ * nand_set_flash_node() is called.
+ */
+ mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
+ "%s:nand.%d", dev_name(nfc->dev),
+ marvell_nand->sels[0].cs);
+ if (!mtd->name) {
+ dev_err(nfc->dev, "Failed to allocate mtd->name\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops marvell_nand_controller_ops = {
+ .attach_chip = marvell_nand_attach_chip,
+ .exec_op = marvell_nfc_exec_op,
+ .setup_interface = marvell_nfc_setup_interface,
+};
+
+static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
+ struct device_node *np)
+{
+ struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(dev);
+ struct marvell_nand_chip *marvell_nand;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ int nsels, ret, i;
+ u32 cs, rb;
+
+ /*
+ * The legacy "num-cs" property indicates the number of CS on the only
+ * chip connected to the controller (legacy bindings does not support
+ * more than one chip). The CS and RB pins are always the #0.
+ *
+ * When not using legacy bindings, a couple of "reg" and "nand-rb"
+ * properties must be filled. For each chip, expressed as a subnode,
+ * "reg" points to the CS lines and "nand-rb" to the RB line.
+ */
+ if (pdata || nfc->caps->legacy_of_bindings) {
+ nsels = 1;
+ } else {
+ nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
+ if (nsels <= 0) {
+ dev_err(dev, "missing/invalid reg property\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Alloc the nand chip structure */
+ marvell_nand = devm_kzalloc(dev,
+ struct_size(marvell_nand, sels, nsels),
+ GFP_KERNEL);
+ if (!marvell_nand) {
+ dev_err(dev, "could not allocate chip structure\n");
+ return -ENOMEM;
+ }
+
+ marvell_nand->nsels = nsels;
+ marvell_nand->selected_die = -1;
+
+ for (i = 0; i < nsels; i++) {
+ if (pdata || nfc->caps->legacy_of_bindings) {
+ /*
+ * Legacy bindings use the CS lines in natural
+ * order (0, 1, ...)
+ */
+ cs = i;
+ } else {
+ /* Retrieve CS id */
+ ret = of_property_read_u32_index(np, "reg", i, &cs);
+ if (ret) {
+ dev_err(dev, "could not retrieve reg property: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (cs >= nfc->caps->max_cs_nb) {
+ dev_err(dev, "invalid reg value: %u (max CS = %d)\n",
+ cs, nfc->caps->max_cs_nb);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(cs, &nfc->assigned_cs)) {
+ dev_err(dev, "CS %d already assigned\n", cs);
+ return -EINVAL;
+ }
+
+ /*
+ * The cs variable represents the chip select id, which must be
+ * converted in bit fields for NDCB0 and NDCB2 to select the
+ * right chip. Unfortunately, due to a lack of information on
+ * the subject and incoherent documentation, the user should not
+ * use CS1 and CS3 at all as asserting them is not supported in
+ * a reliable way (due to multiplexing inside ADDR5 field).
+ */
+ marvell_nand->sels[i].cs = cs;
+ switch (cs) {
+ case 0:
+ case 2:
+ marvell_nand->sels[i].ndcb0_csel = 0;
+ break;
+ case 1:
+ case 3:
+ marvell_nand->sels[i].ndcb0_csel = NDCB0_CSEL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Retrieve RB id */
+ if (pdata || nfc->caps->legacy_of_bindings) {
+ /* Legacy bindings always use RB #0 */
+ rb = 0;
+ } else {
+ ret = of_property_read_u32_index(np, "nand-rb", i,
+ &rb);
+ if (ret) {
+ dev_err(dev,
+ "could not retrieve RB property: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (rb >= nfc->caps->max_rb_nb) {
+ dev_err(dev, "invalid reg value: %u (max RB = %d)\n",
+ rb, nfc->caps->max_rb_nb);
+ return -EINVAL;
+ }
+
+ marvell_nand->sels[i].rb = rb;
+ }
+
+ chip = &marvell_nand->chip;
+ chip->controller = &nfc->controller;
+ nand_set_flash_node(chip, np);
+
+ if (of_property_read_bool(np, "marvell,nand-keep-config"))
+ chip->options |= NAND_KEEP_TIMINGS;
+
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = dev;
+
+ /*
+ * Save a reference value for timing registers before
+ * ->setup_interface() is called.
+ */
+ marvell_nand->ndtr0 = readl_relaxed(nfc->regs + NDTR0);
+ marvell_nand->ndtr1 = readl_relaxed(nfc->regs + NDTR1);
+
+ chip->options |= NAND_BUSWIDTH_AUTO;
+
+ ret = nand_scan(chip, marvell_nand->nsels);
+ if (ret) {
+ dev_err(dev, "could not scan the nand chip\n");
+ return ret;
+ }
+
+ if (pdata)
+ /* Legacy bindings support only one chip */
+ ret = mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
+ else
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(dev, "failed to register mtd device: %d\n", ret);
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ list_add_tail(&marvell_nand->node, &nfc->chips);
+
+ return 0;
+}
+
+static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc)
+{
+ struct marvell_nand_chip *entry, *temp;
+ struct nand_chip *chip;
+ int ret;
+
+ list_for_each_entry_safe(entry, temp, &nfc->chips, node) {
+ chip = &entry->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&entry->node);
+ }
+}
+
+static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *nand_np;
+ int max_cs = nfc->caps->max_cs_nb;
+ int nchips;
+ int ret;
+
+ if (!np)
+ nchips = 1;
+ else
+ nchips = of_get_child_count(np);
+
+ if (nchips > max_cs) {
+ dev_err(dev, "too many NAND chips: %d (max = %d CS)\n", nchips,
+ max_cs);
+ return -EINVAL;
+ }
+
+ /*
+ * Legacy bindings do not use child nodes to exhibit NAND chip
+ * properties and layout. Instead, NAND properties are mixed with the
+ * controller ones, and partitions are defined as direct subnodes of the
+ * NAND controller node.
+ */
+ if (nfc->caps->legacy_of_bindings) {
+ ret = marvell_nand_chip_init(dev, nfc, np);
+ return ret;
+ }
+
+ for_each_child_of_node(np, nand_np) {
+ ret = marvell_nand_chip_init(dev, nfc, nand_np);
+ if (ret) {
+ of_node_put(nand_np);
+ goto cleanup_chips;
+ }
+ }
+
+ return 0;
+
+cleanup_chips:
+ marvell_nand_chips_cleanup(nfc);
+
+ return ret;
+}
+
+static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
+{
+ struct platform_device *pdev = container_of(nfc->dev,
+ struct platform_device,
+ dev);
+ struct dma_slave_config config = {};
+ struct resource *r;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_PXA_DMA)) {
+ dev_warn(nfc->dev,
+ "DMA not enabled in configuration\n");
+ return -ENOTSUPP;
+ }
+
+ ret = dma_set_mask_and_coherent(nfc->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ nfc->dma_chan = dma_request_chan(nfc->dev, "data");
+ if (IS_ERR(nfc->dma_chan)) {
+ ret = PTR_ERR(nfc->dma_chan);
+ nfc->dma_chan = NULL;
+ return dev_err_probe(nfc->dev, ret, "DMA channel request failed\n");
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ ret = -ENXIO;
+ goto release_channel;
+ }
+
+ config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.src_addr = r->start + NDDB;
+ config.dst_addr = r->start + NDDB;
+ config.src_maxburst = 32;
+ config.dst_maxburst = 32;
+ ret = dmaengine_slave_config(nfc->dma_chan, &config);
+ if (ret < 0) {
+ dev_err(nfc->dev, "Failed to configure DMA channel\n");
+ goto release_channel;
+ }
+
+ /*
+ * DMA must act on length multiple of 32 and this length may be
+ * bigger than the destination buffer. Use this buffer instead
+ * for DMA transfers and then copy the desired amount of data to
+ * the provided buffer.
+ */
+ nfc->dma_buf = kmalloc(MAX_CHUNK_SIZE, GFP_KERNEL | GFP_DMA);
+ if (!nfc->dma_buf) {
+ ret = -ENOMEM;
+ goto release_channel;
+ }
+
+ nfc->use_dma = true;
+
+ return 0;
+
+release_channel:
+ dma_release_channel(nfc->dma_chan);
+ nfc->dma_chan = NULL;
+
+ return ret;
+}
+
+static void marvell_nfc_reset(struct marvell_nfc *nfc)
+{
+ /*
+ * ECC operations and interruptions are only enabled when specifically
+ * needed. ECC shall not be activated in the early stages (fails probe).
+ * Arbiter flag, even if marked as "reserved", must be set (empirical).
+ * SPARE_EN bit must always be set or ECC bytes will not be at the same
+ * offset in the read page and this will fail the protection.
+ */
+ writel_relaxed(NDCR_ALL_INT | NDCR_ND_ARB_EN | NDCR_SPARE_EN |
+ NDCR_RD_ID_CNT(NFCV1_READID_LEN), nfc->regs + NDCR);
+ writel_relaxed(0xFFFFFFFF, nfc->regs + NDSR);
+ writel_relaxed(0, nfc->regs + NDECCCTRL);
+}
+
+static int marvell_nfc_init(struct marvell_nfc *nfc)
+{
+ struct device_node *np = nfc->dev->of_node;
+
+ /*
+ * Some SoCs like A7k/A8k need to enable manually the NAND
+ * controller, gated clocks and reset bits to avoid being bootloader
+ * dependent. This is done through the use of the System Functions
+ * registers.
+ */
+ if (nfc->caps->need_system_controller) {
+ struct regmap *sysctrl_base =
+ syscon_regmap_lookup_by_phandle(np,
+ "marvell,system-controller");
+
+ if (IS_ERR(sysctrl_base))
+ return PTR_ERR(sysctrl_base);
+
+ regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX,
+ GENCONF_SOC_DEVICE_MUX_NFC_EN |
+ GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST |
+ GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST |
+ GENCONF_SOC_DEVICE_MUX_NFC_INT_EN |
+ GENCONF_SOC_DEVICE_MUX_NFC_DEVBUS_ARB_EN);
+
+ regmap_update_bits(sysctrl_base, GENCONF_CLK_GATING_CTRL,
+ GENCONF_CLK_GATING_CTRL_ND_GATE,
+ GENCONF_CLK_GATING_CTRL_ND_GATE);
+ }
+
+ /* Configure the DMA if appropriate */
+ if (!nfc->caps->is_nfcv2)
+ marvell_nfc_init_dma(nfc);
+
+ marvell_nfc_reset(nfc);
+
+ return 0;
+}
+
+static int marvell_nfc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct marvell_nfc *nfc;
+ int ret;
+ int irq;
+
+ nfc = devm_kzalloc(&pdev->dev, sizeof(struct marvell_nfc),
+ GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->dev = dev;
+ nand_controller_init(&nfc->controller);
+ nfc->controller.ops = &marvell_nand_controller_ops;
+ INIT_LIST_HEAD(&nfc->chips);
+
+ nfc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(nfc->regs))
+ return PTR_ERR(nfc->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ nfc->core_clk = devm_clk_get(&pdev->dev, "core");
+
+ /* Managed the legacy case (when the first clock was not named) */
+ if (nfc->core_clk == ERR_PTR(-ENOENT))
+ nfc->core_clk = devm_clk_get(&pdev->dev, NULL);
+
+ if (IS_ERR(nfc->core_clk))
+ return PTR_ERR(nfc->core_clk);
+
+ ret = clk_prepare_enable(nfc->core_clk);
+ if (ret)
+ return ret;
+
+ nfc->reg_clk = devm_clk_get(&pdev->dev, "reg");
+ if (IS_ERR(nfc->reg_clk)) {
+ if (PTR_ERR(nfc->reg_clk) != -ENOENT) {
+ ret = PTR_ERR(nfc->reg_clk);
+ goto unprepare_core_clk;
+ }
+
+ nfc->reg_clk = NULL;
+ }
+
+ ret = clk_prepare_enable(nfc->reg_clk);
+ if (ret)
+ goto unprepare_core_clk;
+
+ marvell_nfc_disable_int(nfc, NDCR_ALL_INT);
+ marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
+ ret = devm_request_irq(dev, irq, marvell_nfc_isr,
+ 0, "marvell-nfc", nfc);
+ if (ret)
+ goto unprepare_reg_clk;
+
+ /* Get NAND controller capabilities */
+ if (pdev->id_entry)
+ nfc->caps = (void *)pdev->id_entry->driver_data;
+ else
+ nfc->caps = of_device_get_match_data(&pdev->dev);
+
+ if (!nfc->caps) {
+ dev_err(dev, "Could not retrieve NFC caps\n");
+ ret = -EINVAL;
+ goto unprepare_reg_clk;
+ }
+
+ /* Init the controller and then probe the chips */
+ ret = marvell_nfc_init(nfc);
+ if (ret)
+ goto unprepare_reg_clk;
+
+ platform_set_drvdata(pdev, nfc);
+
+ ret = marvell_nand_chips_init(dev, nfc);
+ if (ret)
+ goto release_dma;
+
+ return 0;
+
+release_dma:
+ if (nfc->use_dma)
+ dma_release_channel(nfc->dma_chan);
+unprepare_reg_clk:
+ clk_disable_unprepare(nfc->reg_clk);
+unprepare_core_clk:
+ clk_disable_unprepare(nfc->core_clk);
+
+ return ret;
+}
+
+static void marvell_nfc_remove(struct platform_device *pdev)
+{
+ struct marvell_nfc *nfc = platform_get_drvdata(pdev);
+
+ marvell_nand_chips_cleanup(nfc);
+
+ if (nfc->use_dma) {
+ dmaengine_terminate_all(nfc->dma_chan);
+ dma_release_channel(nfc->dma_chan);
+ }
+
+ clk_disable_unprepare(nfc->reg_clk);
+ clk_disable_unprepare(nfc->core_clk);
+}
+
+static int __maybe_unused marvell_nfc_suspend(struct device *dev)
+{
+ struct marvell_nfc *nfc = dev_get_drvdata(dev);
+ struct marvell_nand_chip *chip;
+
+ list_for_each_entry(chip, &nfc->chips, node)
+ marvell_nfc_wait_ndrun(&chip->chip);
+
+ clk_disable_unprepare(nfc->reg_clk);
+ clk_disable_unprepare(nfc->core_clk);
+
+ return 0;
+}
+
+static int __maybe_unused marvell_nfc_resume(struct device *dev)
+{
+ struct marvell_nfc *nfc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(nfc->core_clk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(nfc->reg_clk);
+ if (ret < 0) {
+ clk_disable_unprepare(nfc->core_clk);
+ return ret;
+ }
+
+ /*
+ * Reset nfc->selected_chip so the next command will cause the timing
+ * registers to be restored in marvell_nfc_select_target().
+ */
+ nfc->selected_chip = NULL;
+
+ /* Reset registers that have lost their contents */
+ marvell_nfc_reset(nfc);
+
+ return 0;
+}
+
+static const struct dev_pm_ops marvell_nfc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(marvell_nfc_suspend, marvell_nfc_resume)
+};
+
+static const struct marvell_nfc_caps marvell_armada_8k_nfc_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .need_system_controller = true,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_ac5_caps = {
+ .max_cs_nb = 2,
+ .max_rb_nb = 1,
+ .is_nfcv2 = true,
+ .max_mode_number = 3,
+};
+
+static const struct marvell_nfc_caps marvell_armada370_nfc_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_pxa3xx_nfc_caps = {
+ .max_cs_nb = 2,
+ .max_rb_nb = 1,
+ .use_dma = true,
+};
+
+static const struct marvell_nfc_caps marvell_armada_8k_nfc_legacy_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .need_system_controller = true,
+ .legacy_of_bindings = true,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_armada370_nfc_legacy_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .legacy_of_bindings = true,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_pxa3xx_nfc_legacy_caps = {
+ .max_cs_nb = 2,
+ .max_rb_nb = 1,
+ .legacy_of_bindings = true,
+ .use_dma = true,
+};
+
+static const struct platform_device_id marvell_nfc_platform_ids[] = {
+ {
+ .name = "pxa3xx-nand",
+ .driver_data = (kernel_ulong_t)&marvell_pxa3xx_nfc_legacy_caps,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, marvell_nfc_platform_ids);
+
+static const struct of_device_id marvell_nfc_of_ids[] = {
+ {
+ .compatible = "marvell,armada-8k-nand-controller",
+ .data = &marvell_armada_8k_nfc_caps,
+ },
+ {
+ .compatible = "marvell,ac5-nand-controller",
+ .data = &marvell_ac5_caps,
+ },
+ {
+ .compatible = "marvell,armada370-nand-controller",
+ .data = &marvell_armada370_nfc_caps,
+ },
+ {
+ .compatible = "marvell,pxa3xx-nand-controller",
+ .data = &marvell_pxa3xx_nfc_caps,
+ },
+ /* Support for old/deprecated bindings: */
+ {
+ .compatible = "marvell,armada-8k-nand",
+ .data = &marvell_armada_8k_nfc_legacy_caps,
+ },
+ {
+ .compatible = "marvell,armada370-nand",
+ .data = &marvell_armada370_nfc_legacy_caps,
+ },
+ {
+ .compatible = "marvell,pxa3xx-nand",
+ .data = &marvell_pxa3xx_nfc_legacy_caps,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, marvell_nfc_of_ids);
+
+static struct platform_driver marvell_nfc_driver = {
+ .driver = {
+ .name = "marvell-nfc",
+ .of_match_table = marvell_nfc_of_ids,
+ .pm = &marvell_nfc_pm_ops,
+ },
+ .id_table = marvell_nfc_platform_ids,
+ .probe = marvell_nfc_probe,
+ .remove_new = marvell_nfc_remove,
+};
+module_platform_driver(marvell_nfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Marvell NAND controller driver");
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
new file mode 100644
index 0000000000..a506e658d4
--- /dev/null
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -0,0 +1,1602 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Amlogic Meson Nand Flash Controller Driver
+ *
+ * Copyright (c) 2018 Amlogic, inc.
+ * Author: Liang Yang <liang.yang@amlogic.com>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/sched/task_stack.h>
+
+#define NFC_REG_CMD 0x00
+#define NFC_CMD_IDLE (0xc << 14)
+#define NFC_CMD_CLE (0x5 << 14)
+#define NFC_CMD_ALE (0x6 << 14)
+#define NFC_CMD_ADL ((0 << 16) | (3 << 20))
+#define NFC_CMD_ADH ((1 << 16) | (3 << 20))
+#define NFC_CMD_AIL ((2 << 16) | (3 << 20))
+#define NFC_CMD_AIH ((3 << 16) | (3 << 20))
+#define NFC_CMD_SEED ((8 << 16) | (3 << 20))
+#define NFC_CMD_M2N ((0 << 17) | (2 << 20))
+#define NFC_CMD_N2M ((1 << 17) | (2 << 20))
+#define NFC_CMD_RB BIT(20)
+#define NFC_CMD_SCRAMBLER_ENABLE BIT(19)
+#define NFC_CMD_SCRAMBLER_DISABLE 0
+#define NFC_CMD_SHORTMODE_DISABLE 0
+#define NFC_CMD_RB_INT BIT(14)
+#define NFC_CMD_RB_INT_NO_PIN ((0xb << 10) | BIT(18) | BIT(16))
+
+#define NFC_CMD_GET_SIZE(x) (((x) >> 22) & GENMASK(4, 0))
+
+#define NFC_REG_CFG 0x04
+#define NFC_REG_DADR 0x08
+#define NFC_REG_IADR 0x0c
+#define NFC_REG_BUF 0x10
+#define NFC_REG_INFO 0x14
+#define NFC_REG_DC 0x18
+#define NFC_REG_ADR 0x1c
+#define NFC_REG_DL 0x20
+#define NFC_REG_DH 0x24
+#define NFC_REG_CADR 0x28
+#define NFC_REG_SADR 0x2c
+#define NFC_REG_PINS 0x30
+#define NFC_REG_VER 0x38
+
+#define NFC_RB_IRQ_EN BIT(21)
+
+#define CLK_DIV_SHIFT 0
+#define CLK_DIV_WIDTH 6
+
+#define CMDRWGEN(cmd_dir, ran, bch, short_mode, page_size, pages) \
+ ( \
+ (cmd_dir) | \
+ ((ran) << 19) | \
+ ((bch) << 14) | \
+ ((short_mode) << 13) | \
+ (((page_size) & 0x7f) << 6) | \
+ ((pages) & 0x3f) \
+ )
+
+#define GENCMDDADDRL(adl, addr) ((adl) | ((addr) & 0xffff))
+#define GENCMDDADDRH(adh, addr) ((adh) | (((addr) >> 16) & 0xffff))
+#define GENCMDIADDRL(ail, addr) ((ail) | ((addr) & 0xffff))
+#define GENCMDIADDRH(aih, addr) ((aih) | (((addr) >> 16) & 0xffff))
+
+#define DMA_DIR(dir) ((dir) ? NFC_CMD_N2M : NFC_CMD_M2N)
+#define DMA_ADDR_ALIGN 8
+
+#define ECC_CHECK_RETURN_FF (-1)
+
+#define NAND_CE0 (0xe << 10)
+#define NAND_CE1 (0xd << 10)
+
+#define DMA_BUSY_TIMEOUT 0x100000
+#define CMD_FIFO_EMPTY_TIMEOUT 1000
+
+#define MAX_CE_NUM 2
+
+/* eMMC clock register, misc control */
+#define CLK_SELECT_NAND BIT(31)
+
+#define NFC_CLK_CYCLE 6
+
+/* nand flash controller delay 3 ns */
+#define NFC_DEFAULT_DELAY 3000
+
+#define ROW_ADDER(page, index) (((page) >> (8 * (index))) & 0xff)
+#define MAX_CYCLE_ADDRS 5
+#define DIRREAD 1
+#define DIRWRITE 0
+
+#define ECC_PARITY_BCH8_512B 14
+#define ECC_COMPLETE BIT(31)
+#define ECC_ERR_CNT(x) (((x) >> 24) & GENMASK(5, 0))
+#define ECC_ZERO_CNT(x) (((x) >> 16) & GENMASK(5, 0))
+#define ECC_UNCORRECTABLE 0x3f
+
+#define PER_INFO_BYTE 8
+
+#define NFC_CMD_RAW_LEN GENMASK(13, 0)
+
+#define NFC_COLUMN_ADDR_0 0
+#define NFC_COLUMN_ADDR_1 0
+
+struct meson_nfc_nand_chip {
+ struct list_head node;
+ struct nand_chip nand;
+ unsigned long clk_rate;
+ unsigned long level1_divider;
+ u32 bus_timing;
+ u32 twb;
+ u32 tadl;
+ u32 tbers_max;
+
+ u32 bch_mode;
+ u8 *data_buf;
+ __le64 *info_buf;
+ u32 nsels;
+ u8 sels[];
+};
+
+struct meson_nand_ecc {
+ u32 bch;
+ u32 strength;
+ u32 size;
+};
+
+struct meson_nfc_data {
+ const struct nand_ecc_caps *ecc_caps;
+};
+
+struct meson_nfc_param {
+ u32 chip_select;
+ u32 rb_select;
+};
+
+struct nand_rw_cmd {
+ u32 cmd0;
+ u32 addrs[MAX_CYCLE_ADDRS];
+ u32 cmd1;
+};
+
+struct nand_timing {
+ u32 twb;
+ u32 tadl;
+ u32 tbers_max;
+};
+
+struct meson_nfc {
+ struct nand_controller controller;
+ struct clk *core_clk;
+ struct clk *device_clk;
+ struct clk *nand_clk;
+ struct clk_divider nand_divider;
+
+ unsigned long clk_rate;
+ u32 bus_timing;
+
+ struct device *dev;
+ void __iomem *reg_base;
+ void __iomem *reg_clk;
+ struct completion completion;
+ struct list_head chips;
+ const struct meson_nfc_data *data;
+ struct meson_nfc_param param;
+ struct nand_timing timing;
+ union {
+ int cmd[32];
+ struct nand_rw_cmd rw;
+ } cmdfifo;
+
+ dma_addr_t daddr;
+ dma_addr_t iaddr;
+ u32 info_bytes;
+
+ unsigned long assigned_cs;
+ bool no_rb_pin;
+};
+
+enum {
+ NFC_ECC_BCH8_512 = 1,
+ NFC_ECC_BCH8_1K,
+ NFC_ECC_BCH24_1K,
+ NFC_ECC_BCH30_1K,
+ NFC_ECC_BCH40_1K,
+ NFC_ECC_BCH50_1K,
+ NFC_ECC_BCH60_1K,
+};
+
+#define MESON_ECC_DATA(b, s, sz) { .bch = (b), .strength = (s), .size = (sz) }
+
+static struct meson_nand_ecc meson_ecc[] = {
+ MESON_ECC_DATA(NFC_ECC_BCH8_512, 8, 512),
+ MESON_ECC_DATA(NFC_ECC_BCH8_1K, 8, 1024),
+ MESON_ECC_DATA(NFC_ECC_BCH24_1K, 24, 1024),
+ MESON_ECC_DATA(NFC_ECC_BCH30_1K, 30, 1024),
+ MESON_ECC_DATA(NFC_ECC_BCH40_1K, 40, 1024),
+ MESON_ECC_DATA(NFC_ECC_BCH50_1K, 50, 1024),
+ MESON_ECC_DATA(NFC_ECC_BCH60_1K, 60, 1024),
+};
+
+static int meson_nand_calc_ecc_bytes(int step_size, int strength)
+{
+ int ecc_bytes;
+
+ if (step_size == 512 && strength == 8)
+ return ECC_PARITY_BCH8_512B;
+
+ ecc_bytes = DIV_ROUND_UP(strength * fls(step_size * 8), 8);
+ ecc_bytes = ALIGN(ecc_bytes, 2);
+
+ return ecc_bytes;
+}
+
+NAND_ECC_CAPS_SINGLE(meson_gxl_ecc_caps,
+ meson_nand_calc_ecc_bytes, 1024, 8, 24, 30, 40, 50, 60);
+
+static const int axg_stepinfo_strengths[] = { 8 };
+
+static const struct nand_ecc_step_info axg_stepinfo[] = {
+ {
+ .stepsize = 1024,
+ .strengths = axg_stepinfo_strengths,
+ .nstrengths = ARRAY_SIZE(axg_stepinfo_strengths)
+ },
+ {
+ .stepsize = 512,
+ .strengths = axg_stepinfo_strengths,
+ .nstrengths = ARRAY_SIZE(axg_stepinfo_strengths)
+ },
+};
+
+static const struct nand_ecc_caps meson_axg_ecc_caps = {
+ .stepinfos = axg_stepinfo,
+ .nstepinfos = ARRAY_SIZE(axg_stepinfo),
+ .calc_ecc_bytes = meson_nand_calc_ecc_bytes,
+};
+
+static struct meson_nfc_nand_chip *to_meson_nand(struct nand_chip *nand)
+{
+ return container_of(nand, struct meson_nfc_nand_chip, nand);
+}
+
+static void meson_nfc_select_chip(struct nand_chip *nand, int chip)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ int ret, value;
+
+ if (chip < 0 || WARN_ON_ONCE(chip >= meson_chip->nsels))
+ return;
+
+ nfc->param.chip_select = meson_chip->sels[chip] ? NAND_CE1 : NAND_CE0;
+ nfc->param.rb_select = nfc->param.chip_select;
+ nfc->timing.twb = meson_chip->twb;
+ nfc->timing.tadl = meson_chip->tadl;
+ nfc->timing.tbers_max = meson_chip->tbers_max;
+
+ if (nfc->clk_rate != meson_chip->clk_rate) {
+ ret = clk_set_rate(nfc->nand_clk, meson_chip->clk_rate);
+ if (ret) {
+ dev_err(nfc->dev, "failed to set clock rate\n");
+ return;
+ }
+ nfc->clk_rate = meson_chip->clk_rate;
+ }
+ if (nfc->bus_timing != meson_chip->bus_timing) {
+ value = (NFC_CLK_CYCLE - 1) | (meson_chip->bus_timing << 5);
+ writel(value, nfc->reg_base + NFC_REG_CFG);
+ writel((1 << 31), nfc->reg_base + NFC_REG_CMD);
+ nfc->bus_timing = meson_chip->bus_timing;
+ }
+}
+
+static void meson_nfc_cmd_idle(struct meson_nfc *nfc, u32 time)
+{
+ writel(nfc->param.chip_select | NFC_CMD_IDLE | (time & 0x3ff),
+ nfc->reg_base + NFC_REG_CMD);
+}
+
+static void meson_nfc_cmd_seed(struct meson_nfc *nfc, u32 seed)
+{
+ writel(NFC_CMD_SEED | (0xc2 + (seed & 0x7fff)),
+ nfc->reg_base + NFC_REG_CMD);
+}
+
+static void meson_nfc_cmd_access(struct nand_chip *nand, int raw, bool dir,
+ int scrambler)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ u32 bch = meson_chip->bch_mode, cmd;
+ int len = mtd->writesize, pagesize, pages;
+
+ pagesize = nand->ecc.size;
+
+ if (raw) {
+ len = mtd->writesize + mtd->oobsize;
+ cmd = len | scrambler | DMA_DIR(dir);
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ return;
+ }
+
+ pages = len / nand->ecc.size;
+
+ cmd = CMDRWGEN(DMA_DIR(dir), scrambler, bch,
+ NFC_CMD_SHORTMODE_DISABLE, pagesize, pages);
+
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+}
+
+static void meson_nfc_drain_cmd(struct meson_nfc *nfc)
+{
+ /*
+ * Insert two commands to make sure all valid commands are finished.
+ *
+ * The Nand flash controller is designed as two stages pipleline -
+ * a) fetch and b) excute.
+ * There might be cases when the driver see command queue is empty,
+ * but the Nand flash controller still has two commands buffered,
+ * one is fetched into NFC request queue (ready to run), and another
+ * is actively executing. So pushing 2 "IDLE" commands guarantees that
+ * the pipeline is emptied.
+ */
+ meson_nfc_cmd_idle(nfc, 0);
+ meson_nfc_cmd_idle(nfc, 0);
+}
+
+static int meson_nfc_wait_cmd_finish(struct meson_nfc *nfc,
+ unsigned int timeout_ms)
+{
+ u32 cmd_size = 0;
+ int ret;
+
+ /* wait cmd fifo is empty */
+ ret = readl_relaxed_poll_timeout(nfc->reg_base + NFC_REG_CMD, cmd_size,
+ !NFC_CMD_GET_SIZE(cmd_size),
+ 10, timeout_ms * 1000);
+ if (ret)
+ dev_err(nfc->dev, "wait for empty CMD FIFO time out\n");
+
+ return ret;
+}
+
+static int meson_nfc_wait_dma_finish(struct meson_nfc *nfc)
+{
+ meson_nfc_drain_cmd(nfc);
+
+ return meson_nfc_wait_cmd_finish(nfc, DMA_BUSY_TIMEOUT);
+}
+
+static u8 *meson_nfc_oob_ptr(struct nand_chip *nand, int i)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ int len;
+
+ len = nand->ecc.size * (i + 1) + (nand->ecc.bytes + 2) * i;
+
+ return meson_chip->data_buf + len;
+}
+
+static u8 *meson_nfc_data_ptr(struct nand_chip *nand, int i)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ int len, temp;
+
+ temp = nand->ecc.size + nand->ecc.bytes;
+ len = (temp + 2) * i;
+
+ return meson_chip->data_buf + len;
+}
+
+static void meson_nfc_get_data_oob(struct nand_chip *nand,
+ u8 *buf, u8 *oobbuf)
+{
+ int i, oob_len = 0;
+ u8 *dsrc, *osrc;
+
+ oob_len = nand->ecc.bytes + 2;
+ for (i = 0; i < nand->ecc.steps; i++) {
+ if (buf) {
+ dsrc = meson_nfc_data_ptr(nand, i);
+ memcpy(buf, dsrc, nand->ecc.size);
+ buf += nand->ecc.size;
+ }
+ osrc = meson_nfc_oob_ptr(nand, i);
+ memcpy(oobbuf, osrc, oob_len);
+ oobbuf += oob_len;
+ }
+}
+
+static void meson_nfc_set_data_oob(struct nand_chip *nand,
+ const u8 *buf, u8 *oobbuf)
+{
+ int i, oob_len = 0;
+ u8 *dsrc, *osrc;
+
+ oob_len = nand->ecc.bytes + 2;
+ for (i = 0; i < nand->ecc.steps; i++) {
+ if (buf) {
+ dsrc = meson_nfc_data_ptr(nand, i);
+ memcpy(dsrc, buf, nand->ecc.size);
+ buf += nand->ecc.size;
+ }
+ osrc = meson_nfc_oob_ptr(nand, i);
+ memcpy(osrc, oobbuf, oob_len);
+ oobbuf += oob_len;
+ }
+}
+
+static int meson_nfc_wait_no_rb_pin(struct nand_chip *nand, int timeout_ms,
+ bool need_cmd_read0)
+{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ u32 cmd, cfg;
+
+ meson_nfc_cmd_idle(nfc, nfc->timing.twb);
+ meson_nfc_drain_cmd(nfc);
+ meson_nfc_wait_cmd_finish(nfc, CMD_FIFO_EMPTY_TIMEOUT);
+
+ cfg = readl(nfc->reg_base + NFC_REG_CFG);
+ cfg |= NFC_RB_IRQ_EN;
+ writel(cfg, nfc->reg_base + NFC_REG_CFG);
+
+ reinit_completion(&nfc->completion);
+ nand_status_op(nand, NULL);
+
+ /* use the max erase time as the maximum clock for waiting R/B */
+ cmd = NFC_CMD_RB | NFC_CMD_RB_INT_NO_PIN | nfc->timing.tbers_max;
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+
+ if (!wait_for_completion_timeout(&nfc->completion,
+ msecs_to_jiffies(timeout_ms)))
+ return -ETIMEDOUT;
+
+ if (need_cmd_read0)
+ nand_exit_status_op(nand);
+
+ return 0;
+}
+
+static int meson_nfc_wait_rb_pin(struct meson_nfc *nfc, int timeout_ms)
+{
+ u32 cmd, cfg;
+ int ret = 0;
+
+ meson_nfc_cmd_idle(nfc, nfc->timing.twb);
+ meson_nfc_drain_cmd(nfc);
+ meson_nfc_wait_cmd_finish(nfc, CMD_FIFO_EMPTY_TIMEOUT);
+
+ cfg = readl(nfc->reg_base + NFC_REG_CFG);
+ cfg |= NFC_RB_IRQ_EN;
+ writel(cfg, nfc->reg_base + NFC_REG_CFG);
+
+ reinit_completion(&nfc->completion);
+
+ /* use the max erase time as the maximum clock for waiting R/B */
+ cmd = NFC_CMD_RB | NFC_CMD_RB_INT
+ | nfc->param.chip_select | nfc->timing.tbers_max;
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+
+ ret = wait_for_completion_timeout(&nfc->completion,
+ msecs_to_jiffies(timeout_ms));
+ if (ret == 0)
+ ret = -1;
+
+ return ret;
+}
+
+static int meson_nfc_queue_rb(struct nand_chip *nand, int timeout_ms,
+ bool need_cmd_read0)
+{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+
+ if (nfc->no_rb_pin) {
+ /* This mode is used when there is no wired R/B pin.
+ * It works like 'nand_soft_waitrdy()', but instead of
+ * polling NAND_CMD_STATUS bit in the software loop,
+ * it will wait for interrupt - controllers checks IO
+ * bus and when it detects NAND_CMD_STATUS on it, it
+ * raises interrupt. After interrupt, NAND_CMD_READ0 is
+ * sent as terminator of the ready waiting procedure if
+ * needed (for all cases except page programming - this
+ * is reason of 'need_cmd_read0' flag).
+ */
+ return meson_nfc_wait_no_rb_pin(nand, timeout_ms,
+ need_cmd_read0);
+ } else {
+ return meson_nfc_wait_rb_pin(nfc, timeout_ms);
+ }
+}
+
+static void meson_nfc_set_user_byte(struct nand_chip *nand, u8 *oob_buf)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ __le64 *info;
+ int i, count;
+
+ for (i = 0, count = 0; i < nand->ecc.steps; i++, count += 2) {
+ info = &meson_chip->info_buf[i];
+ *info |= oob_buf[count];
+ *info |= oob_buf[count + 1] << 8;
+ }
+}
+
+static void meson_nfc_get_user_byte(struct nand_chip *nand, u8 *oob_buf)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ __le64 *info;
+ int i, count;
+
+ for (i = 0, count = 0; i < nand->ecc.steps; i++, count += 2) {
+ info = &meson_chip->info_buf[i];
+ oob_buf[count] = *info;
+ oob_buf[count + 1] = *info >> 8;
+ }
+}
+
+static int meson_nfc_ecc_correct(struct nand_chip *nand, u32 *bitflips,
+ u64 *correct_bitmap)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ __le64 *info;
+ int ret = 0, i;
+
+ for (i = 0; i < nand->ecc.steps; i++) {
+ info = &meson_chip->info_buf[i];
+ if (ECC_ERR_CNT(*info) != ECC_UNCORRECTABLE) {
+ mtd->ecc_stats.corrected += ECC_ERR_CNT(*info);
+ *bitflips = max_t(u32, *bitflips, ECC_ERR_CNT(*info));
+ *correct_bitmap |= BIT_ULL(i);
+ continue;
+ }
+ if ((nand->options & NAND_NEED_SCRAMBLING) &&
+ ECC_ZERO_CNT(*info) < nand->ecc.strength) {
+ mtd->ecc_stats.corrected += ECC_ZERO_CNT(*info);
+ *bitflips = max_t(u32, *bitflips,
+ ECC_ZERO_CNT(*info));
+ ret = ECC_CHECK_RETURN_FF;
+ } else {
+ ret = -EBADMSG;
+ }
+ }
+ return ret;
+}
+
+static int meson_nfc_dma_buffer_setup(struct nand_chip *nand, void *databuf,
+ int datalen, void *infobuf, int infolen,
+ enum dma_data_direction dir)
+{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ u32 cmd;
+ int ret = 0;
+
+ nfc->daddr = dma_map_single(nfc->dev, databuf, datalen, dir);
+ ret = dma_mapping_error(nfc->dev, nfc->daddr);
+ if (ret) {
+ dev_err(nfc->dev, "DMA mapping error\n");
+ return ret;
+ }
+ cmd = GENCMDDADDRL(NFC_CMD_ADL, nfc->daddr);
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+
+ cmd = GENCMDDADDRH(NFC_CMD_ADH, nfc->daddr);
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+
+ if (infobuf) {
+ nfc->iaddr = dma_map_single(nfc->dev, infobuf, infolen, dir);
+ ret = dma_mapping_error(nfc->dev, nfc->iaddr);
+ if (ret) {
+ dev_err(nfc->dev, "DMA mapping error\n");
+ dma_unmap_single(nfc->dev,
+ nfc->daddr, datalen, dir);
+ return ret;
+ }
+ nfc->info_bytes = infolen;
+ cmd = GENCMDIADDRL(NFC_CMD_AIL, nfc->iaddr);
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+
+ cmd = GENCMDIADDRH(NFC_CMD_AIH, nfc->iaddr);
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ }
+
+ return ret;
+}
+
+static void meson_nfc_dma_buffer_release(struct nand_chip *nand,
+ int datalen, int infolen,
+ enum dma_data_direction dir)
+{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+
+ dma_unmap_single(nfc->dev, nfc->daddr, datalen, dir);
+ if (infolen) {
+ dma_unmap_single(nfc->dev, nfc->iaddr, infolen, dir);
+ nfc->info_bytes = 0;
+ }
+}
+
+static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len)
+{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ int ret = 0;
+ u32 cmd;
+ u8 *info;
+
+ info = kzalloc(PER_INFO_BYTE, GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ ret = meson_nfc_dma_buffer_setup(nand, buf, len, info,
+ PER_INFO_BYTE, DMA_FROM_DEVICE);
+ if (ret)
+ goto out;
+
+ cmd = NFC_CMD_N2M | len;
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+
+ meson_nfc_drain_cmd(nfc);
+ meson_nfc_wait_cmd_finish(nfc, 1000);
+ meson_nfc_dma_buffer_release(nand, len, PER_INFO_BYTE, DMA_FROM_DEVICE);
+
+out:
+ kfree(info);
+
+ return ret;
+}
+
+static int meson_nfc_write_buf(struct nand_chip *nand, u8 *buf, int len)
+{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ int ret = 0;
+ u32 cmd;
+
+ ret = meson_nfc_dma_buffer_setup(nand, buf, len, NULL,
+ 0, DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ cmd = NFC_CMD_M2N | len;
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+
+ meson_nfc_drain_cmd(nfc);
+ meson_nfc_wait_cmd_finish(nfc, 1000);
+ meson_nfc_dma_buffer_release(nand, len, 0, DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static int meson_nfc_rw_cmd_prepare_and_execute(struct nand_chip *nand,
+ int page, bool in)
+{
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(nand));
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ u32 *addrs = nfc->cmdfifo.rw.addrs;
+ u32 cs = nfc->param.chip_select;
+ u32 cmd0, cmd_num, row_start;
+ int i;
+
+ cmd_num = sizeof(struct nand_rw_cmd) / sizeof(int);
+
+ cmd0 = in ? NAND_CMD_READ0 : NAND_CMD_SEQIN;
+ nfc->cmdfifo.rw.cmd0 = cs | NFC_CMD_CLE | cmd0;
+
+ addrs[0] = cs | NFC_CMD_ALE | NFC_COLUMN_ADDR_0;
+ if (mtd->writesize <= 512) {
+ cmd_num--;
+ row_start = 1;
+ } else {
+ addrs[1] = cs | NFC_CMD_ALE | NFC_COLUMN_ADDR_1;
+ row_start = 2;
+ }
+
+ addrs[row_start] = cs | NFC_CMD_ALE | ROW_ADDER(page, 0);
+ addrs[row_start + 1] = cs | NFC_CMD_ALE | ROW_ADDER(page, 1);
+
+ if (nand->options & NAND_ROW_ADDR_3)
+ addrs[row_start + 2] =
+ cs | NFC_CMD_ALE | ROW_ADDER(page, 2);
+ else
+ cmd_num--;
+
+ /* subtract cmd1 */
+ cmd_num--;
+
+ for (i = 0; i < cmd_num; i++)
+ writel_relaxed(nfc->cmdfifo.cmd[i],
+ nfc->reg_base + NFC_REG_CMD);
+
+ if (in) {
+ nfc->cmdfifo.rw.cmd1 = cs | NFC_CMD_CLE | NAND_CMD_READSTART;
+ writel(nfc->cmdfifo.rw.cmd1, nfc->reg_base + NFC_REG_CMD);
+ meson_nfc_queue_rb(nand, PSEC_TO_MSEC(sdr->tR_max), true);
+ } else {
+ meson_nfc_cmd_idle(nfc, nfc->timing.tadl);
+ }
+
+ return 0;
+}
+
+static int meson_nfc_write_page_sub(struct nand_chip *nand,
+ int page, int raw)
+{
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(nand));
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ int data_len, info_len;
+ u32 cmd;
+ int ret;
+
+ meson_nfc_select_chip(nand, nand->cur_cs);
+
+ data_len = mtd->writesize + mtd->oobsize;
+ info_len = nand->ecc.steps * PER_INFO_BYTE;
+
+ ret = meson_nfc_rw_cmd_prepare_and_execute(nand, page, DIRWRITE);
+ if (ret)
+ return ret;
+
+ ret = meson_nfc_dma_buffer_setup(nand, meson_chip->data_buf,
+ data_len, meson_chip->info_buf,
+ info_len, DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ if (nand->options & NAND_NEED_SCRAMBLING) {
+ meson_nfc_cmd_seed(nfc, page);
+ meson_nfc_cmd_access(nand, raw, DIRWRITE,
+ NFC_CMD_SCRAMBLER_ENABLE);
+ } else {
+ meson_nfc_cmd_access(nand, raw, DIRWRITE,
+ NFC_CMD_SCRAMBLER_DISABLE);
+ }
+
+ cmd = nfc->param.chip_select | NFC_CMD_CLE | NAND_CMD_PAGEPROG;
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ meson_nfc_queue_rb(nand, PSEC_TO_MSEC(sdr->tPROG_max), false);
+
+ meson_nfc_dma_buffer_release(nand, data_len, info_len, DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static int meson_nfc_write_page_raw(struct nand_chip *nand, const u8 *buf,
+ int oob_required, int page)
+{
+ u8 *oob_buf = nand->oob_poi;
+
+ meson_nfc_set_data_oob(nand, buf, oob_buf);
+
+ return meson_nfc_write_page_sub(nand, page, 1);
+}
+
+static int meson_nfc_write_page_hwecc(struct nand_chip *nand,
+ const u8 *buf, int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ u8 *oob_buf = nand->oob_poi;
+
+ memcpy(meson_chip->data_buf, buf, mtd->writesize);
+ memset(meson_chip->info_buf, 0, nand->ecc.steps * PER_INFO_BYTE);
+ meson_nfc_set_user_byte(nand, oob_buf);
+
+ return meson_nfc_write_page_sub(nand, page, 0);
+}
+
+static void meson_nfc_check_ecc_pages_valid(struct meson_nfc *nfc,
+ struct nand_chip *nand, int raw)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ __le64 *info;
+ u32 neccpages;
+ int ret;
+
+ neccpages = raw ? 1 : nand->ecc.steps;
+ info = &meson_chip->info_buf[neccpages - 1];
+ do {
+ usleep_range(10, 15);
+ /* info is updated by nfc dma engine*/
+ smp_rmb();
+ dma_sync_single_for_cpu(nfc->dev, nfc->iaddr, nfc->info_bytes,
+ DMA_FROM_DEVICE);
+ ret = *info & ECC_COMPLETE;
+ } while (!ret);
+}
+
+static int meson_nfc_read_page_sub(struct nand_chip *nand,
+ int page, int raw)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ int data_len, info_len;
+ int ret;
+
+ meson_nfc_select_chip(nand, nand->cur_cs);
+
+ data_len = mtd->writesize + mtd->oobsize;
+ info_len = nand->ecc.steps * PER_INFO_BYTE;
+
+ ret = meson_nfc_rw_cmd_prepare_and_execute(nand, page, DIRREAD);
+ if (ret)
+ return ret;
+
+ ret = meson_nfc_dma_buffer_setup(nand, meson_chip->data_buf,
+ data_len, meson_chip->info_buf,
+ info_len, DMA_FROM_DEVICE);
+ if (ret)
+ return ret;
+
+ if (nand->options & NAND_NEED_SCRAMBLING) {
+ meson_nfc_cmd_seed(nfc, page);
+ meson_nfc_cmd_access(nand, raw, DIRREAD,
+ NFC_CMD_SCRAMBLER_ENABLE);
+ } else {
+ meson_nfc_cmd_access(nand, raw, DIRREAD,
+ NFC_CMD_SCRAMBLER_DISABLE);
+ }
+
+ ret = meson_nfc_wait_dma_finish(nfc);
+ meson_nfc_check_ecc_pages_valid(nfc, nand, raw);
+
+ meson_nfc_dma_buffer_release(nand, data_len, info_len, DMA_FROM_DEVICE);
+
+ return ret;
+}
+
+static int meson_nfc_read_page_raw(struct nand_chip *nand, u8 *buf,
+ int oob_required, int page)
+{
+ u8 *oob_buf = nand->oob_poi;
+ int ret;
+
+ ret = meson_nfc_read_page_sub(nand, page, 1);
+ if (ret)
+ return ret;
+
+ meson_nfc_get_data_oob(nand, buf, oob_buf);
+
+ return 0;
+}
+
+static int meson_nfc_read_page_hwecc(struct nand_chip *nand, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ u64 correct_bitmap = 0;
+ u32 bitflips = 0;
+ u8 *oob_buf = nand->oob_poi;
+ int ret, i;
+
+ ret = meson_nfc_read_page_sub(nand, page, 0);
+ if (ret)
+ return ret;
+
+ meson_nfc_get_user_byte(nand, oob_buf);
+ ret = meson_nfc_ecc_correct(nand, &bitflips, &correct_bitmap);
+ if (ret == ECC_CHECK_RETURN_FF) {
+ if (buf)
+ memset(buf, 0xff, mtd->writesize);
+ memset(oob_buf, 0xff, mtd->oobsize);
+ } else if (ret < 0) {
+ if ((nand->options & NAND_NEED_SCRAMBLING) || !buf) {
+ mtd->ecc_stats.failed++;
+ return bitflips;
+ }
+ ret = meson_nfc_read_page_raw(nand, buf, 0, page);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nand->ecc.steps ; i++) {
+ u8 *data = buf + i * ecc->size;
+ u8 *oob = nand->oob_poi + i * (ecc->bytes + 2);
+
+ if (correct_bitmap & BIT_ULL(i))
+ continue;
+ ret = nand_check_erased_ecc_chunk(data, ecc->size,
+ oob, ecc->bytes + 2,
+ NULL, 0,
+ ecc->strength);
+ if (ret < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += ret;
+ bitflips = max_t(u32, bitflips, ret);
+ }
+ }
+ } else if (buf && buf != meson_chip->data_buf) {
+ memcpy(buf, meson_chip->data_buf, mtd->writesize);
+ }
+
+ return bitflips;
+}
+
+static int meson_nfc_read_oob_raw(struct nand_chip *nand, int page)
+{
+ return meson_nfc_read_page_raw(nand, NULL, 1, page);
+}
+
+static int meson_nfc_read_oob(struct nand_chip *nand, int page)
+{
+ return meson_nfc_read_page_hwecc(nand, NULL, 1, page);
+}
+
+static bool meson_nfc_is_buffer_dma_safe(const void *buffer)
+{
+ if ((uintptr_t)buffer % DMA_ADDR_ALIGN)
+ return false;
+
+ if (virt_addr_valid(buffer) && (!object_is_on_stack(buffer)))
+ return true;
+ return false;
+}
+
+static void *
+meson_nand_op_get_dma_safe_input_buf(const struct nand_op_instr *instr)
+{
+ if (WARN_ON(instr->type != NAND_OP_DATA_IN_INSTR))
+ return NULL;
+
+ if (meson_nfc_is_buffer_dma_safe(instr->ctx.data.buf.in))
+ return instr->ctx.data.buf.in;
+
+ return kzalloc(instr->ctx.data.len, GFP_KERNEL);
+}
+
+static void
+meson_nand_op_put_dma_safe_input_buf(const struct nand_op_instr *instr,
+ void *buf)
+{
+ if (WARN_ON(instr->type != NAND_OP_DATA_IN_INSTR) ||
+ WARN_ON(!buf))
+ return;
+
+ if (buf == instr->ctx.data.buf.in)
+ return;
+
+ memcpy(instr->ctx.data.buf.in, buf, instr->ctx.data.len);
+ kfree(buf);
+}
+
+static void *
+meson_nand_op_get_dma_safe_output_buf(const struct nand_op_instr *instr)
+{
+ if (WARN_ON(instr->type != NAND_OP_DATA_OUT_INSTR))
+ return NULL;
+
+ if (meson_nfc_is_buffer_dma_safe(instr->ctx.data.buf.out))
+ return (void *)instr->ctx.data.buf.out;
+
+ return kmemdup(instr->ctx.data.buf.out,
+ instr->ctx.data.len, GFP_KERNEL);
+}
+
+static void
+meson_nand_op_put_dma_safe_output_buf(const struct nand_op_instr *instr,
+ const void *buf)
+{
+ if (WARN_ON(instr->type != NAND_OP_DATA_OUT_INSTR) ||
+ WARN_ON(!buf))
+ return;
+
+ if (buf != instr->ctx.data.buf.out)
+ kfree(buf);
+}
+
+static int meson_nfc_check_op(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ int op_id;
+
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ const struct nand_op_instr *instr;
+
+ instr = &op->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_DATA_OUT_INSTR:
+ if (instr->ctx.data.len > NFC_CMD_RAW_LEN)
+ return -ENOTSUPP;
+
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int meson_nfc_exec_op(struct nand_chip *nand,
+ const struct nand_operation *op, bool check_only)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ const struct nand_op_instr *instr = NULL;
+ void *buf;
+ u32 op_id, delay_idle, cmd;
+ int err;
+ int i;
+
+ err = meson_nfc_check_op(nand, op);
+ if (err)
+ return err;
+
+ if (check_only)
+ return 0;
+
+ meson_nfc_select_chip(nand, op->cs);
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+ delay_idle = DIV_ROUND_UP(PSEC_TO_NSEC(instr->delay_ns),
+ meson_chip->level1_divider *
+ NFC_CLK_CYCLE);
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ cmd = nfc->param.chip_select | NFC_CMD_CLE;
+ cmd |= instr->ctx.cmd.opcode & 0xff;
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ meson_nfc_cmd_idle(nfc, delay_idle);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ cmd = nfc->param.chip_select | NFC_CMD_ALE;
+ cmd |= instr->ctx.addr.addrs[i] & 0xff;
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ }
+ meson_nfc_cmd_idle(nfc, delay_idle);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ buf = meson_nand_op_get_dma_safe_input_buf(instr);
+ if (!buf)
+ return -ENOMEM;
+ meson_nfc_read_buf(nand, buf, instr->ctx.data.len);
+ meson_nand_op_put_dma_safe_input_buf(instr, buf);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ buf = meson_nand_op_get_dma_safe_output_buf(instr);
+ if (!buf)
+ return -ENOMEM;
+ meson_nfc_write_buf(nand, buf, instr->ctx.data.len);
+ meson_nand_op_put_dma_safe_output_buf(instr, buf);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ meson_nfc_queue_rb(nand, instr->ctx.waitrdy.timeout_ms,
+ true);
+ if (instr->delay_ns)
+ meson_nfc_cmd_idle(nfc, delay_idle);
+ break;
+ }
+ }
+ meson_nfc_wait_cmd_finish(nfc, 1000);
+ return 0;
+}
+
+static int meson_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+
+ if (section >= nand->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = 2 + (section * (2 + nand->ecc.bytes));
+ oobregion->length = nand->ecc.bytes;
+
+ return 0;
+}
+
+static int meson_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+
+ if (section >= nand->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = section * (2 + nand->ecc.bytes);
+ oobregion->length = 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops meson_ooblayout_ops = {
+ .ecc = meson_ooblayout_ecc,
+ .free = meson_ooblayout_free,
+};
+
+static int meson_nfc_clk_init(struct meson_nfc *nfc)
+{
+ struct clk_parent_data nfc_divider_parent_data[1] = {0};
+ struct clk_init_data init = {0};
+ int ret;
+
+ /* request core clock */
+ nfc->core_clk = devm_clk_get(nfc->dev, "core");
+ if (IS_ERR(nfc->core_clk)) {
+ dev_err(nfc->dev, "failed to get core clock\n");
+ return PTR_ERR(nfc->core_clk);
+ }
+
+ nfc->device_clk = devm_clk_get(nfc->dev, "device");
+ if (IS_ERR(nfc->device_clk)) {
+ dev_err(nfc->dev, "failed to get device clock\n");
+ return PTR_ERR(nfc->device_clk);
+ }
+
+ init.name = devm_kasprintf(nfc->dev,
+ GFP_KERNEL, "%s#div",
+ dev_name(nfc->dev));
+ if (!init.name)
+ return -ENOMEM;
+
+ init.ops = &clk_divider_ops;
+ nfc_divider_parent_data[0].fw_name = "device";
+ init.parent_data = nfc_divider_parent_data;
+ init.num_parents = 1;
+ nfc->nand_divider.reg = nfc->reg_clk;
+ nfc->nand_divider.shift = CLK_DIV_SHIFT;
+ nfc->nand_divider.width = CLK_DIV_WIDTH;
+ nfc->nand_divider.hw.init = &init;
+ nfc->nand_divider.flags = CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ROUND_CLOSEST |
+ CLK_DIVIDER_ALLOW_ZERO;
+
+ nfc->nand_clk = devm_clk_register(nfc->dev, &nfc->nand_divider.hw);
+ if (IS_ERR(nfc->nand_clk))
+ return PTR_ERR(nfc->nand_clk);
+
+ /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
+ writel(CLK_SELECT_NAND | readl(nfc->reg_clk),
+ nfc->reg_clk);
+
+ ret = clk_prepare_enable(nfc->core_clk);
+ if (ret) {
+ dev_err(nfc->dev, "failed to enable core clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(nfc->device_clk);
+ if (ret) {
+ dev_err(nfc->dev, "failed to enable device clock\n");
+ goto err_device_clk;
+ }
+
+ ret = clk_prepare_enable(nfc->nand_clk);
+ if (ret) {
+ dev_err(nfc->dev, "pre enable NFC divider fail\n");
+ goto err_nand_clk;
+ }
+
+ ret = clk_set_rate(nfc->nand_clk, 24000000);
+ if (ret)
+ goto err_disable_clk;
+
+ return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(nfc->nand_clk);
+err_nand_clk:
+ clk_disable_unprepare(nfc->device_clk);
+err_device_clk:
+ clk_disable_unprepare(nfc->core_clk);
+ return ret;
+}
+
+static void meson_nfc_disable_clk(struct meson_nfc *nfc)
+{
+ clk_disable_unprepare(nfc->nand_clk);
+ clk_disable_unprepare(nfc->device_clk);
+ clk_disable_unprepare(nfc->core_clk);
+}
+
+static void meson_nfc_free_buffer(struct nand_chip *nand)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+
+ kfree(meson_chip->info_buf);
+ kfree(meson_chip->data_buf);
+}
+
+static int meson_chip_buffer_init(struct nand_chip *nand)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ u32 page_bytes, info_bytes, nsectors;
+
+ nsectors = mtd->writesize / nand->ecc.size;
+
+ page_bytes = mtd->writesize + mtd->oobsize;
+ info_bytes = nsectors * PER_INFO_BYTE;
+
+ meson_chip->data_buf = kmalloc(page_bytes, GFP_KERNEL);
+ if (!meson_chip->data_buf)
+ return -ENOMEM;
+
+ meson_chip->info_buf = kmalloc(info_bytes, GFP_KERNEL);
+ if (!meson_chip->info_buf) {
+ kfree(meson_chip->data_buf);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static
+int meson_nfc_setup_interface(struct nand_chip *nand, int csline,
+ const struct nand_interface_config *conf)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ const struct nand_sdr_timings *timings;
+ u32 div, bt_min, bt_max, tbers_clocks;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return -ENOTSUPP;
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ div = DIV_ROUND_UP((timings->tRC_min / 1000), NFC_CLK_CYCLE);
+ bt_min = (timings->tREA_max + NFC_DEFAULT_DELAY) / div;
+ bt_max = (NFC_DEFAULT_DELAY + timings->tRHOH_min +
+ timings->tRC_min / 2) / div;
+
+ meson_chip->twb = DIV_ROUND_UP(PSEC_TO_NSEC(timings->tWB_max),
+ div * NFC_CLK_CYCLE);
+ meson_chip->tadl = DIV_ROUND_UP(PSEC_TO_NSEC(timings->tADL_min),
+ div * NFC_CLK_CYCLE);
+ tbers_clocks = DIV_ROUND_UP_ULL(PSEC_TO_NSEC(timings->tBERS_max),
+ div * NFC_CLK_CYCLE);
+ meson_chip->tbers_max = ilog2(tbers_clocks);
+ if (!is_power_of_2(tbers_clocks))
+ meson_chip->tbers_max++;
+
+ bt_min = DIV_ROUND_UP(bt_min, 1000);
+ bt_max = DIV_ROUND_UP(bt_max, 1000);
+
+ if (bt_max < bt_min)
+ return -EINVAL;
+
+ meson_chip->level1_divider = div;
+ meson_chip->clk_rate = 1000000000 / meson_chip->level1_divider;
+ meson_chip->bus_timing = (bt_min + bt_max) / 2 + 1;
+
+ return 0;
+}
+
+static int meson_nand_bch_mode(struct nand_chip *nand)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ int i;
+
+ if (nand->ecc.strength > 60 || nand->ecc.strength < 8)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(meson_ecc); i++) {
+ if (meson_ecc[i].strength == nand->ecc.strength &&
+ meson_ecc[i].size == nand->ecc.size) {
+ meson_chip->bch_mode = meson_ecc[i].bch;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void meson_nand_detach_chip(struct nand_chip *nand)
+{
+ meson_nfc_free_buffer(nand);
+}
+
+static int meson_nand_attach_chip(struct nand_chip *nand)
+{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ int raw_writesize;
+ int ret;
+
+ if (!mtd->name) {
+ mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
+ "%s:nand%d",
+ dev_name(nfc->dev),
+ meson_chip->sels[0]);
+ if (!mtd->name)
+ return -ENOMEM;
+ }
+
+ raw_writesize = mtd->writesize + mtd->oobsize;
+ if (raw_writesize > NFC_CMD_RAW_LEN) {
+ dev_err(nfc->dev, "too big write size in raw mode: %d > %ld\n",
+ raw_writesize, NFC_CMD_RAW_LEN);
+ return -EINVAL;
+ }
+
+ if (nand->bbt_options & NAND_BBT_USE_FLASH)
+ nand->bbt_options |= NAND_BBT_NO_OOB;
+
+ nand->options |= NAND_NO_SUBPAGE_WRITE;
+
+ ret = nand_ecc_choose_conf(nand, nfc->data->ecc_caps,
+ mtd->oobsize - 2);
+ if (ret) {
+ dev_err(nfc->dev, "failed to ECC init\n");
+ return -EINVAL;
+ }
+
+ mtd_set_ooblayout(mtd, &meson_ooblayout_ops);
+
+ ret = meson_nand_bch_mode(nand);
+ if (ret)
+ return -EINVAL;
+
+ nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ nand->ecc.write_page_raw = meson_nfc_write_page_raw;
+ nand->ecc.write_page = meson_nfc_write_page_hwecc;
+ nand->ecc.write_oob_raw = nand_write_oob_std;
+ nand->ecc.write_oob = nand_write_oob_std;
+
+ nand->ecc.read_page_raw = meson_nfc_read_page_raw;
+ nand->ecc.read_page = meson_nfc_read_page_hwecc;
+ nand->ecc.read_oob_raw = meson_nfc_read_oob_raw;
+ nand->ecc.read_oob = meson_nfc_read_oob;
+
+ if (nand->options & NAND_BUSWIDTH_16) {
+ dev_err(nfc->dev, "16bits bus width not supported");
+ return -EINVAL;
+ }
+ ret = meson_chip_buffer_init(nand);
+ if (ret)
+ return -ENOMEM;
+
+ return ret;
+}
+
+static const struct nand_controller_ops meson_nand_controller_ops = {
+ .attach_chip = meson_nand_attach_chip,
+ .detach_chip = meson_nand_detach_chip,
+ .setup_interface = meson_nfc_setup_interface,
+ .exec_op = meson_nfc_exec_op,
+};
+
+static int
+meson_nfc_nand_chip_init(struct device *dev,
+ struct meson_nfc *nfc, struct device_node *np)
+{
+ struct meson_nfc_nand_chip *meson_chip;
+ struct nand_chip *nand;
+ struct mtd_info *mtd;
+ int ret, i;
+ u32 tmp, nsels;
+ u32 nand_rb_val = 0;
+
+ nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
+ if (!nsels || nsels > MAX_CE_NUM) {
+ dev_err(dev, "invalid register property size\n");
+ return -EINVAL;
+ }
+
+ meson_chip = devm_kzalloc(dev, struct_size(meson_chip, sels, nsels),
+ GFP_KERNEL);
+ if (!meson_chip)
+ return -ENOMEM;
+
+ meson_chip->nsels = nsels;
+
+ for (i = 0; i < nsels; i++) {
+ ret = of_property_read_u32_index(np, "reg", i, &tmp);
+ if (ret) {
+ dev_err(dev, "could not retrieve register property: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
+ dev_err(dev, "CS %d already assigned\n", tmp);
+ return -EINVAL;
+ }
+ }
+
+ nand = &meson_chip->nand;
+ nand->controller = &nfc->controller;
+ nand->controller->ops = &meson_nand_controller_ops;
+ nand_set_flash_node(nand, np);
+ nand_set_controller_data(nand, nfc);
+
+ nand->options |= NAND_USES_DMA;
+ mtd = nand_to_mtd(nand);
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = dev;
+
+ ret = of_property_read_u32(np, "nand-rb", &nand_rb_val);
+ if (ret == -EINVAL)
+ nfc->no_rb_pin = true;
+ else if (ret)
+ return ret;
+
+ if (nand_rb_val)
+ return -EINVAL;
+
+ ret = nand_scan(nand, nsels);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(dev, "failed to register MTD device: %d\n", ret);
+ nand_cleanup(nand);
+ return ret;
+ }
+
+ list_add_tail(&meson_chip->node, &nfc->chips);
+
+ return 0;
+}
+
+static void meson_nfc_nand_chip_cleanup(struct meson_nfc *nfc)
+{
+ struct meson_nfc_nand_chip *meson_chip;
+ struct mtd_info *mtd;
+
+ while (!list_empty(&nfc->chips)) {
+ meson_chip = list_first_entry(&nfc->chips,
+ struct meson_nfc_nand_chip, node);
+ mtd = nand_to_mtd(&meson_chip->nand);
+ WARN_ON(mtd_device_unregister(mtd));
+
+ nand_cleanup(&meson_chip->nand);
+ list_del(&meson_chip->node);
+ }
+}
+
+static int meson_nfc_nand_chips_init(struct device *dev,
+ struct meson_nfc *nfc)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *nand_np;
+ int ret;
+
+ for_each_child_of_node(np, nand_np) {
+ ret = meson_nfc_nand_chip_init(dev, nfc, nand_np);
+ if (ret) {
+ meson_nfc_nand_chip_cleanup(nfc);
+ of_node_put(nand_np);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static irqreturn_t meson_nfc_irq(int irq, void *id)
+{
+ struct meson_nfc *nfc = id;
+ u32 cfg;
+
+ cfg = readl(nfc->reg_base + NFC_REG_CFG);
+ if (!(cfg & NFC_RB_IRQ_EN))
+ return IRQ_NONE;
+
+ cfg &= ~(NFC_RB_IRQ_EN);
+ writel(cfg, nfc->reg_base + NFC_REG_CFG);
+
+ complete(&nfc->completion);
+ return IRQ_HANDLED;
+}
+
+static const struct meson_nfc_data meson_gxl_data = {
+ .ecc_caps = &meson_gxl_ecc_caps,
+};
+
+static const struct meson_nfc_data meson_axg_data = {
+ .ecc_caps = &meson_axg_ecc_caps,
+};
+
+static const struct of_device_id meson_nfc_id_table[] = {
+ {
+ .compatible = "amlogic,meson-gxl-nfc",
+ .data = &meson_gxl_data,
+ }, {
+ .compatible = "amlogic,meson-axg-nfc",
+ .data = &meson_axg_data,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, meson_nfc_id_table);
+
+static int meson_nfc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct meson_nfc *nfc;
+ int ret, irq;
+
+ nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->data = of_device_get_match_data(&pdev->dev);
+ if (!nfc->data)
+ return -ENODEV;
+
+ nand_controller_init(&nfc->controller);
+ INIT_LIST_HEAD(&nfc->chips);
+ init_completion(&nfc->completion);
+
+ nfc->dev = dev;
+
+ nfc->reg_base = devm_platform_ioremap_resource_byname(pdev, "nfc");
+ if (IS_ERR(nfc->reg_base))
+ return PTR_ERR(nfc->reg_base);
+
+ nfc->reg_clk = devm_platform_ioremap_resource_byname(pdev, "emmc");
+ if (IS_ERR(nfc->reg_clk))
+ return PTR_ERR(nfc->reg_clk);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -EINVAL;
+
+ ret = meson_nfc_clk_init(nfc);
+ if (ret) {
+ dev_err(dev, "failed to initialize NAND clock\n");
+ return ret;
+ }
+
+ writel(0, nfc->reg_base + NFC_REG_CFG);
+ ret = devm_request_irq(dev, irq, meson_nfc_irq, 0, dev_name(dev), nfc);
+ if (ret) {
+ dev_err(dev, "failed to request NFC IRQ\n");
+ ret = -EINVAL;
+ goto err_clk;
+ }
+
+ ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "failed to set DMA mask\n");
+ goto err_clk;
+ }
+
+ platform_set_drvdata(pdev, nfc);
+
+ ret = meson_nfc_nand_chips_init(dev, nfc);
+ if (ret) {
+ dev_err(dev, "failed to init NAND chips\n");
+ goto err_clk;
+ }
+
+ return 0;
+err_clk:
+ meson_nfc_disable_clk(nfc);
+ return ret;
+}
+
+static void meson_nfc_remove(struct platform_device *pdev)
+{
+ struct meson_nfc *nfc = platform_get_drvdata(pdev);
+
+ meson_nfc_nand_chip_cleanup(nfc);
+
+ meson_nfc_disable_clk(nfc);
+}
+
+static struct platform_driver meson_nfc_driver = {
+ .probe = meson_nfc_probe,
+ .remove_new = meson_nfc_remove,
+ .driver = {
+ .name = "meson-nand",
+ .of_match_table = meson_nfc_id_table,
+ },
+};
+module_platform_driver(meson_nfc_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("Liang Yang <liang.yang@amlogic.com>");
+MODULE_DESCRIPTION("Amlogic's Meson NAND Flash Controller driver");
diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c
new file mode 100644
index 0000000000..215610f808
--- /dev/null
+++ b/drivers/mtd/nand/raw/mpc5121_nfc.c
@@ -0,0 +1,849 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2004-2008 Freescale Semiconductor, Inc.
+ * Copyright 2009 Semihalf.
+ *
+ * Approved as OSADL project by a majority of OSADL members and funded
+ * by OSADL membership fees in 2009; for details see www.osadl.org.
+ *
+ * Based on original driver from Freescale Semiconductor
+ * written by John Rigby <jrigby@freescale.com> on basis of mxc_nand.c.
+ * Reworked and extended by Piotr Ziecik <kosmo@semihalf.com>.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/gfp.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+#include <asm/mpc5121.h>
+
+/* Addresses for NFC MAIN RAM BUFFER areas */
+#define NFC_MAIN_AREA(n) ((n) * 0x200)
+
+/* Addresses for NFC SPARE BUFFER areas */
+#define NFC_SPARE_BUFFERS 8
+#define NFC_SPARE_LEN 0x40
+#define NFC_SPARE_AREA(n) (0x1000 + ((n) * NFC_SPARE_LEN))
+
+/* MPC5121 NFC registers */
+#define NFC_BUF_ADDR 0x1E04
+#define NFC_FLASH_ADDR 0x1E06
+#define NFC_FLASH_CMD 0x1E08
+#define NFC_CONFIG 0x1E0A
+#define NFC_ECC_STATUS1 0x1E0C
+#define NFC_ECC_STATUS2 0x1E0E
+#define NFC_SPAS 0x1E10
+#define NFC_WRPROT 0x1E12
+#define NFC_NF_WRPRST 0x1E18
+#define NFC_CONFIG1 0x1E1A
+#define NFC_CONFIG2 0x1E1C
+#define NFC_UNLOCKSTART_BLK0 0x1E20
+#define NFC_UNLOCKEND_BLK0 0x1E22
+#define NFC_UNLOCKSTART_BLK1 0x1E24
+#define NFC_UNLOCKEND_BLK1 0x1E26
+#define NFC_UNLOCKSTART_BLK2 0x1E28
+#define NFC_UNLOCKEND_BLK2 0x1E2A
+#define NFC_UNLOCKSTART_BLK3 0x1E2C
+#define NFC_UNLOCKEND_BLK3 0x1E2E
+
+/* Bit Definitions: NFC_BUF_ADDR */
+#define NFC_RBA_MASK (7 << 0)
+#define NFC_ACTIVE_CS_SHIFT 5
+#define NFC_ACTIVE_CS_MASK (3 << NFC_ACTIVE_CS_SHIFT)
+
+/* Bit Definitions: NFC_CONFIG */
+#define NFC_BLS_UNLOCKED (1 << 1)
+
+/* Bit Definitions: NFC_CONFIG1 */
+#define NFC_ECC_4BIT (1 << 0)
+#define NFC_FULL_PAGE_DMA (1 << 1)
+#define NFC_SPARE_ONLY (1 << 2)
+#define NFC_ECC_ENABLE (1 << 3)
+#define NFC_INT_MASK (1 << 4)
+#define NFC_BIG_ENDIAN (1 << 5)
+#define NFC_RESET (1 << 6)
+#define NFC_CE (1 << 7)
+#define NFC_ONE_CYCLE (1 << 8)
+#define NFC_PPB_32 (0 << 9)
+#define NFC_PPB_64 (1 << 9)
+#define NFC_PPB_128 (2 << 9)
+#define NFC_PPB_256 (3 << 9)
+#define NFC_PPB_MASK (3 << 9)
+#define NFC_FULL_PAGE_INT (1 << 11)
+
+/* Bit Definitions: NFC_CONFIG2 */
+#define NFC_COMMAND (1 << 0)
+#define NFC_ADDRESS (1 << 1)
+#define NFC_INPUT (1 << 2)
+#define NFC_OUTPUT (1 << 3)
+#define NFC_ID (1 << 4)
+#define NFC_STATUS (1 << 5)
+#define NFC_CMD_FAIL (1 << 15)
+#define NFC_INT (1 << 15)
+
+/* Bit Definitions: NFC_WRPROT */
+#define NFC_WPC_LOCK_TIGHT (1 << 0)
+#define NFC_WPC_LOCK (1 << 1)
+#define NFC_WPC_UNLOCK (1 << 2)
+
+#define DRV_NAME "mpc5121_nfc"
+
+/* Timeouts */
+#define NFC_RESET_TIMEOUT 1000 /* 1 ms */
+#define NFC_TIMEOUT (HZ / 10) /* 1/10 s */
+
+struct mpc5121_nfc_prv {
+ struct nand_controller controller;
+ struct nand_chip chip;
+ int irq;
+ void __iomem *regs;
+ struct clk *clk;
+ wait_queue_head_t irq_waitq;
+ uint column;
+ int spareonly;
+ void __iomem *csreg;
+ struct device *dev;
+};
+
+static void mpc5121_nfc_done(struct mtd_info *mtd);
+
+/* Read NFC register */
+static inline u16 nfc_read(struct mtd_info *mtd, uint reg)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
+
+ return in_be16(prv->regs + reg);
+}
+
+/* Write NFC register */
+static inline void nfc_write(struct mtd_info *mtd, uint reg, u16 val)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
+
+ out_be16(prv->regs + reg, val);
+}
+
+/* Set bits in NFC register */
+static inline void nfc_set(struct mtd_info *mtd, uint reg, u16 bits)
+{
+ nfc_write(mtd, reg, nfc_read(mtd, reg) | bits);
+}
+
+/* Clear bits in NFC register */
+static inline void nfc_clear(struct mtd_info *mtd, uint reg, u16 bits)
+{
+ nfc_write(mtd, reg, nfc_read(mtd, reg) & ~bits);
+}
+
+/* Invoke address cycle */
+static inline void mpc5121_nfc_send_addr(struct mtd_info *mtd, u16 addr)
+{
+ nfc_write(mtd, NFC_FLASH_ADDR, addr);
+ nfc_write(mtd, NFC_CONFIG2, NFC_ADDRESS);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Invoke command cycle */
+static inline void mpc5121_nfc_send_cmd(struct mtd_info *mtd, u16 cmd)
+{
+ nfc_write(mtd, NFC_FLASH_CMD, cmd);
+ nfc_write(mtd, NFC_CONFIG2, NFC_COMMAND);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Send data from NFC buffers to NAND flash */
+static inline void mpc5121_nfc_send_prog_page(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_INPUT);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive data from NAND flash */
+static inline void mpc5121_nfc_send_read_page(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_OUTPUT);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive ID from NAND flash */
+static inline void mpc5121_nfc_send_read_id(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_ID);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive status from NAND flash */
+static inline void mpc5121_nfc_send_read_status(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_STATUS);
+ mpc5121_nfc_done(mtd);
+}
+
+/* NFC interrupt handler */
+static irqreturn_t mpc5121_nfc_irq(int irq, void *data)
+{
+ struct mtd_info *mtd = data;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
+
+ nfc_set(mtd, NFC_CONFIG1, NFC_INT_MASK);
+ wake_up(&prv->irq_waitq);
+
+ return IRQ_HANDLED;
+}
+
+/* Wait for operation complete */
+static void mpc5121_nfc_done(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
+ int rv;
+
+ if ((nfc_read(mtd, NFC_CONFIG2) & NFC_INT) == 0) {
+ nfc_clear(mtd, NFC_CONFIG1, NFC_INT_MASK);
+ rv = wait_event_timeout(prv->irq_waitq,
+ (nfc_read(mtd, NFC_CONFIG2) & NFC_INT), NFC_TIMEOUT);
+
+ if (!rv)
+ dev_warn(prv->dev,
+ "Timeout while waiting for interrupt.\n");
+ }
+
+ nfc_clear(mtd, NFC_CONFIG2, NFC_INT);
+}
+
+/* Do address cycle(s) */
+static void mpc5121_nfc_addr_cycle(struct mtd_info *mtd, int column, int page)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ u32 pagemask = chip->pagemask;
+
+ if (column != -1) {
+ mpc5121_nfc_send_addr(mtd, column);
+ if (mtd->writesize > 512)
+ mpc5121_nfc_send_addr(mtd, column >> 8);
+ }
+
+ if (page != -1) {
+ do {
+ mpc5121_nfc_send_addr(mtd, page & 0xFF);
+ page >>= 8;
+ pagemask >>= 8;
+ } while (pagemask);
+ }
+}
+
+/* Control chip select signals */
+static void mpc5121_nfc_select_chip(struct nand_chip *nand, int chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+
+ if (chip < 0) {
+ nfc_clear(mtd, NFC_CONFIG1, NFC_CE);
+ return;
+ }
+
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_ACTIVE_CS_MASK);
+ nfc_set(mtd, NFC_BUF_ADDR, (chip << NFC_ACTIVE_CS_SHIFT) &
+ NFC_ACTIVE_CS_MASK);
+ nfc_set(mtd, NFC_CONFIG1, NFC_CE);
+}
+
+/* Init external chip select logic on ADS5121 board */
+static int ads5121_chipselect_init(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
+ struct device_node *dn;
+
+ dn = of_find_compatible_node(NULL, NULL, "fsl,mpc5121ads-cpld");
+ if (dn) {
+ prv->csreg = of_iomap(dn, 0);
+ of_node_put(dn);
+ if (!prv->csreg)
+ return -ENOMEM;
+
+ /* CPLD Register 9 controls NAND /CE Lines */
+ prv->csreg += 9;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/* Control chips select signal on ADS5121 board */
+static void ads5121_select_chip(struct nand_chip *nand, int chip)
+{
+ struct mpc5121_nfc_prv *prv = nand_get_controller_data(nand);
+ u8 v;
+
+ v = in_8(prv->csreg);
+ v |= 0x0F;
+
+ if (chip >= 0) {
+ mpc5121_nfc_select_chip(nand, 0);
+ v &= ~(1 << chip);
+ } else
+ mpc5121_nfc_select_chip(nand, -1);
+
+ out_8(prv->csreg, v);
+}
+
+/* Read NAND Ready/Busy signal */
+static int mpc5121_nfc_dev_ready(struct nand_chip *nand)
+{
+ /*
+ * NFC handles ready/busy signal internally. Therefore, this function
+ * always returns status as ready.
+ */
+ return 1;
+}
+
+/* Write command to NAND flash */
+static void mpc5121_nfc_command(struct nand_chip *chip, unsigned command,
+ int column, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
+
+ prv->column = (column >= 0) ? column : 0;
+ prv->spareonly = 0;
+
+ switch (command) {
+ case NAND_CMD_PAGEPROG:
+ mpc5121_nfc_send_prog_page(mtd);
+ break;
+ /*
+ * NFC does not support sub-page reads and writes,
+ * so emulate them using full page transfers.
+ */
+ case NAND_CMD_READ0:
+ column = 0;
+ break;
+
+ case NAND_CMD_READ1:
+ prv->column += 256;
+ command = NAND_CMD_READ0;
+ column = 0;
+ break;
+
+ case NAND_CMD_READOOB:
+ prv->spareonly = 1;
+ command = NAND_CMD_READ0;
+ column = 0;
+ break;
+
+ case NAND_CMD_SEQIN:
+ mpc5121_nfc_command(chip, NAND_CMD_READ0, column, page);
+ column = 0;
+ break;
+
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_READID:
+ case NAND_CMD_STATUS:
+ break;
+
+ default:
+ return;
+ }
+
+ mpc5121_nfc_send_cmd(mtd, command);
+ mpc5121_nfc_addr_cycle(mtd, column, page);
+
+ switch (command) {
+ case NAND_CMD_READ0:
+ if (mtd->writesize > 512)
+ mpc5121_nfc_send_cmd(mtd, NAND_CMD_READSTART);
+ mpc5121_nfc_send_read_page(mtd);
+ break;
+
+ case NAND_CMD_READID:
+ mpc5121_nfc_send_read_id(mtd);
+ break;
+
+ case NAND_CMD_STATUS:
+ mpc5121_nfc_send_read_status(mtd);
+ if (chip->options & NAND_BUSWIDTH_16)
+ prv->column = 1;
+ else
+ prv->column = 0;
+ break;
+ }
+}
+
+/* Copy data from/to NFC spare buffers. */
+static void mpc5121_nfc_copy_spare(struct mtd_info *mtd, uint offset,
+ u8 *buffer, uint size, int wr)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mpc5121_nfc_prv *prv = nand_get_controller_data(nand);
+ uint o, s, sbsize, blksize;
+
+ /*
+ * NAND spare area is available through NFC spare buffers.
+ * The NFC divides spare area into (page_size / 512) chunks.
+ * Each chunk is placed into separate spare memory area, using
+ * first (spare_size / num_of_chunks) bytes of the buffer.
+ *
+ * For NAND device in which the spare area is not divided fully
+ * by the number of chunks, number of used bytes in each spare
+ * buffer is rounded down to the nearest even number of bytes,
+ * and all remaining bytes are added to the last used spare area.
+ *
+ * For more information read section 26.6.10 of MPC5121e
+ * Microcontroller Reference Manual, Rev. 3.
+ */
+
+ /* Calculate number of valid bytes in each spare buffer */
+ sbsize = (mtd->oobsize / (mtd->writesize / 512)) & ~1;
+
+ while (size) {
+ /* Calculate spare buffer number */
+ s = offset / sbsize;
+ if (s > NFC_SPARE_BUFFERS - 1)
+ s = NFC_SPARE_BUFFERS - 1;
+
+ /*
+ * Calculate offset to requested data block in selected spare
+ * buffer and its size.
+ */
+ o = offset - (s * sbsize);
+ blksize = min(sbsize - o, size);
+
+ if (wr)
+ memcpy_toio(prv->regs + NFC_SPARE_AREA(s) + o,
+ buffer, blksize);
+ else
+ memcpy_fromio(buffer,
+ prv->regs + NFC_SPARE_AREA(s) + o, blksize);
+
+ buffer += blksize;
+ offset += blksize;
+ size -= blksize;
+ }
+}
+
+/* Copy data from/to NFC main and spare buffers */
+static void mpc5121_nfc_buf_copy(struct mtd_info *mtd, u_char *buf, int len,
+ int wr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
+ uint c = prv->column;
+ uint l;
+
+ /* Handle spare area access */
+ if (prv->spareonly || c >= mtd->writesize) {
+ /* Calculate offset from beginning of spare area */
+ if (c >= mtd->writesize)
+ c -= mtd->writesize;
+
+ prv->column += len;
+ mpc5121_nfc_copy_spare(mtd, c, buf, len, wr);
+ return;
+ }
+
+ /*
+ * Handle main area access - limit copy length to prevent
+ * crossing main/spare boundary.
+ */
+ l = min((uint)len, mtd->writesize - c);
+ prv->column += l;
+
+ if (wr)
+ memcpy_toio(prv->regs + NFC_MAIN_AREA(0) + c, buf, l);
+ else
+ memcpy_fromio(buf, prv->regs + NFC_MAIN_AREA(0) + c, l);
+
+ /* Handle crossing main/spare boundary */
+ if (l != len) {
+ buf += l;
+ len -= l;
+ mpc5121_nfc_buf_copy(mtd, buf, len, wr);
+ }
+}
+
+/* Read data from NFC buffers */
+static void mpc5121_nfc_read_buf(struct nand_chip *chip, u_char *buf, int len)
+{
+ mpc5121_nfc_buf_copy(nand_to_mtd(chip), buf, len, 0);
+}
+
+/* Write data to NFC buffers */
+static void mpc5121_nfc_write_buf(struct nand_chip *chip, const u_char *buf,
+ int len)
+{
+ mpc5121_nfc_buf_copy(nand_to_mtd(chip), (u_char *)buf, len, 1);
+}
+
+/* Read byte from NFC buffers */
+static u8 mpc5121_nfc_read_byte(struct nand_chip *chip)
+{
+ u8 tmp;
+
+ mpc5121_nfc_read_buf(chip, &tmp, sizeof(tmp));
+
+ return tmp;
+}
+
+/*
+ * Read NFC configuration from Reset Config Word
+ *
+ * NFC is configured during reset in basis of information stored
+ * in Reset Config Word. There is no other way to set NAND block
+ * size, spare size and bus width.
+ */
+static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
+ struct mpc512x_reset_module *rm;
+ struct device_node *rmnode;
+ uint rcw_pagesize = 0;
+ uint rcw_sparesize = 0;
+ uint rcw_width;
+ uint rcwh;
+ uint romloc, ps;
+ int ret = 0;
+
+ rmnode = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-reset");
+ if (!rmnode) {
+ dev_err(prv->dev, "Missing 'fsl,mpc5121-reset' "
+ "node in device tree!\n");
+ return -ENODEV;
+ }
+
+ rm = of_iomap(rmnode, 0);
+ if (!rm) {
+ dev_err(prv->dev, "Error mapping reset module node!\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ rcwh = in_be32(&rm->rcwhr);
+
+ /* Bit 6: NFC bus width */
+ rcw_width = ((rcwh >> 6) & 0x1) ? 2 : 1;
+
+ /* Bit 7: NFC Page/Spare size */
+ ps = (rcwh >> 7) & 0x1;
+
+ /* Bits [22:21]: ROM Location */
+ romloc = (rcwh >> 21) & 0x3;
+
+ /* Decode RCW bits */
+ switch ((ps << 2) | romloc) {
+ case 0x00:
+ case 0x01:
+ rcw_pagesize = 512;
+ rcw_sparesize = 16;
+ break;
+ case 0x02:
+ case 0x03:
+ rcw_pagesize = 4096;
+ rcw_sparesize = 128;
+ break;
+ case 0x04:
+ case 0x05:
+ rcw_pagesize = 2048;
+ rcw_sparesize = 64;
+ break;
+ case 0x06:
+ case 0x07:
+ rcw_pagesize = 4096;
+ rcw_sparesize = 218;
+ break;
+ }
+
+ mtd->writesize = rcw_pagesize;
+ mtd->oobsize = rcw_sparesize;
+ if (rcw_width == 2)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ dev_notice(prv->dev, "Configured for "
+ "%u-bit NAND, page size %u "
+ "with %u spare.\n",
+ rcw_width * 8, rcw_pagesize,
+ rcw_sparesize);
+ iounmap(rm);
+out:
+ of_node_put(rmnode);
+ return ret;
+}
+
+/* Free driver resources */
+static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
+
+ if (prv->csreg)
+ iounmap(prv->csreg);
+}
+
+static int mpc5121_nfc_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+ chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ return 0;
+}
+
+static const struct nand_controller_ops mpc5121_nfc_ops = {
+ .attach_chip = mpc5121_nfc_attach_chip,
+};
+
+static int mpc5121_nfc_probe(struct platform_device *op)
+{
+ struct device_node *dn = op->dev.of_node;
+ struct clk *clk;
+ struct device *dev = &op->dev;
+ struct mpc5121_nfc_prv *prv;
+ struct resource res;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ unsigned long regs_paddr, regs_size;
+ const __be32 *chips_no;
+ int resettime = 0;
+ int retval = 0;
+ int rev, len;
+
+ /*
+ * Check SoC revision. This driver supports only NFC
+ * in MPC5121 revision 2 and MPC5123 revision 3.
+ */
+ rev = (mfspr(SPRN_SVR) >> 4) & 0xF;
+ if ((rev != 2) && (rev != 3)) {
+ dev_err(dev, "SoC revision %u is not supported!\n", rev);
+ return -ENXIO;
+ }
+
+ prv = devm_kzalloc(dev, sizeof(*prv), GFP_KERNEL);
+ if (!prv)
+ return -ENOMEM;
+
+ chip = &prv->chip;
+ mtd = nand_to_mtd(chip);
+
+ nand_controller_init(&prv->controller);
+ prv->controller.ops = &mpc5121_nfc_ops;
+ chip->controller = &prv->controller;
+
+ mtd->dev.parent = dev;
+ nand_set_controller_data(chip, prv);
+ nand_set_flash_node(chip, dn);
+ prv->dev = dev;
+
+ /* Read NFC configuration from Reset Config Word */
+ retval = mpc5121_nfc_read_hw_config(mtd);
+ if (retval) {
+ dev_err(dev, "Unable to read NFC config!\n");
+ return retval;
+ }
+
+ prv->irq = irq_of_parse_and_map(dn, 0);
+ if (!prv->irq) {
+ dev_err(dev, "Error mapping IRQ!\n");
+ return -EINVAL;
+ }
+
+ retval = of_address_to_resource(dn, 0, &res);
+ if (retval) {
+ dev_err(dev, "Error parsing memory region!\n");
+ return retval;
+ }
+
+ chips_no = of_get_property(dn, "chips", &len);
+ if (!chips_no || len != sizeof(*chips_no)) {
+ dev_err(dev, "Invalid/missing 'chips' property!\n");
+ return -EINVAL;
+ }
+
+ regs_paddr = res.start;
+ regs_size = resource_size(&res);
+
+ if (!devm_request_mem_region(dev, regs_paddr, regs_size, DRV_NAME)) {
+ dev_err(dev, "Error requesting memory region!\n");
+ return -EBUSY;
+ }
+
+ prv->regs = devm_ioremap(dev, regs_paddr, regs_size);
+ if (!prv->regs) {
+ dev_err(dev, "Error mapping memory region!\n");
+ return -ENOMEM;
+ }
+
+ mtd->name = "MPC5121 NAND";
+ chip->legacy.dev_ready = mpc5121_nfc_dev_ready;
+ chip->legacy.cmdfunc = mpc5121_nfc_command;
+ chip->legacy.read_byte = mpc5121_nfc_read_byte;
+ chip->legacy.read_buf = mpc5121_nfc_read_buf;
+ chip->legacy.write_buf = mpc5121_nfc_write_buf;
+ chip->legacy.select_chip = mpc5121_nfc_select_chip;
+ chip->legacy.set_features = nand_get_set_features_notsupp;
+ chip->legacy.get_features = nand_get_set_features_notsupp;
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+
+ /* Support external chip-select logic on ADS5121 board */
+ if (of_machine_is_compatible("fsl,mpc5121ads")) {
+ retval = ads5121_chipselect_init(mtd);
+ if (retval) {
+ dev_err(dev, "Chipselect init error!\n");
+ return retval;
+ }
+
+ chip->legacy.select_chip = ads5121_select_chip;
+ }
+
+ /* Enable NFC clock */
+ clk = devm_clk_get_enabled(dev, "ipg");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "Unable to acquire and enable NFC clock!\n");
+ retval = PTR_ERR(clk);
+ goto error;
+ }
+ prv->clk = clk;
+
+ /* Reset NAND Flash controller */
+ nfc_set(mtd, NFC_CONFIG1, NFC_RESET);
+ while (nfc_read(mtd, NFC_CONFIG1) & NFC_RESET) {
+ if (resettime++ >= NFC_RESET_TIMEOUT) {
+ dev_err(dev, "Timeout while resetting NFC!\n");
+ retval = -EINVAL;
+ goto error;
+ }
+
+ udelay(1);
+ }
+
+ /* Enable write to NFC memory */
+ nfc_write(mtd, NFC_CONFIG, NFC_BLS_UNLOCKED);
+
+ /* Enable write to all NAND pages */
+ nfc_write(mtd, NFC_UNLOCKSTART_BLK0, 0x0000);
+ nfc_write(mtd, NFC_UNLOCKEND_BLK0, 0xFFFF);
+ nfc_write(mtd, NFC_WRPROT, NFC_WPC_UNLOCK);
+
+ /*
+ * Setup NFC:
+ * - Big Endian transfers,
+ * - Interrupt after full page read/write.
+ */
+ nfc_write(mtd, NFC_CONFIG1, NFC_BIG_ENDIAN | NFC_INT_MASK |
+ NFC_FULL_PAGE_INT);
+
+ /* Set spare area size */
+ nfc_write(mtd, NFC_SPAS, mtd->oobsize >> 1);
+
+ init_waitqueue_head(&prv->irq_waitq);
+ retval = devm_request_irq(dev, prv->irq, &mpc5121_nfc_irq, 0, DRV_NAME,
+ mtd);
+ if (retval) {
+ dev_err(dev, "Error requesting IRQ!\n");
+ goto error;
+ }
+
+ /*
+ * This driver assumes that the default ECC engine should be TYPE_SOFT.
+ * Set ->engine_type before registering the NAND devices in order to
+ * provide a driver specific default value.
+ */
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
+ /* Detect NAND chips */
+ retval = nand_scan(chip, be32_to_cpup(chips_no));
+ if (retval) {
+ dev_err(dev, "NAND Flash not found !\n");
+ goto error;
+ }
+
+ /* Set erase block size */
+ switch (mtd->erasesize / mtd->writesize) {
+ case 32:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_32);
+ break;
+
+ case 64:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_64);
+ break;
+
+ case 128:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_128);
+ break;
+
+ case 256:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_256);
+ break;
+
+ default:
+ dev_err(dev, "Unsupported NAND flash!\n");
+ retval = -ENXIO;
+ goto error;
+ }
+
+ dev_set_drvdata(dev, mtd);
+
+ /* Register device in MTD */
+ retval = mtd_device_register(mtd, NULL, 0);
+ if (retval) {
+ dev_err(dev, "Error adding MTD device!\n");
+ goto error;
+ }
+
+ return 0;
+error:
+ mpc5121_nfc_free(dev, mtd);
+ return retval;
+}
+
+static void mpc5121_nfc_remove(struct platform_device *op)
+{
+ struct device *dev = &op->dev;
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ int ret;
+
+ ret = mtd_device_unregister(mtd);
+ WARN_ON(ret);
+ nand_cleanup(mtd_to_nand(mtd));
+ mpc5121_nfc_free(dev, mtd);
+}
+
+static const struct of_device_id mpc5121_nfc_match[] = {
+ { .compatible = "fsl,mpc5121-nfc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mpc5121_nfc_match);
+
+static struct platform_driver mpc5121_nfc_driver = {
+ .probe = mpc5121_nfc_probe,
+ .remove_new = mpc5121_nfc_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = mpc5121_nfc_match,
+ },
+};
+
+module_platform_driver(mpc5121_nfc_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("MPC5121 NAND MTD driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
new file mode 100644
index 0000000000..29c8bddde6
--- /dev/null
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -0,0 +1,1653 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * MTK NAND Flash controller driver.
+ * Copyright (C) 2016 MediaTek Inc.
+ * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
+ * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/mtd.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/mtd/nand-ecc-mtk.h>
+
+/* NAND controller register definition */
+#define NFI_CNFG (0x00)
+#define CNFG_AHB BIT(0)
+#define CNFG_READ_EN BIT(1)
+#define CNFG_DMA_BURST_EN BIT(2)
+#define CNFG_BYTE_RW BIT(6)
+#define CNFG_HW_ECC_EN BIT(8)
+#define CNFG_AUTO_FMT_EN BIT(9)
+#define CNFG_OP_CUST (6 << 12)
+#define NFI_PAGEFMT (0x04)
+#define PAGEFMT_FDM_ECC_SHIFT (12)
+#define PAGEFMT_FDM_SHIFT (8)
+#define PAGEFMT_SEC_SEL_512 BIT(2)
+#define PAGEFMT_512_2K (0)
+#define PAGEFMT_2K_4K (1)
+#define PAGEFMT_4K_8K (2)
+#define PAGEFMT_8K_16K (3)
+/* NFI control */
+#define NFI_CON (0x08)
+#define CON_FIFO_FLUSH BIT(0)
+#define CON_NFI_RST BIT(1)
+#define CON_BRD BIT(8) /* burst read */
+#define CON_BWR BIT(9) /* burst write */
+#define CON_SEC_SHIFT (12)
+/* Timming control register */
+#define NFI_ACCCON (0x0C)
+#define NFI_INTR_EN (0x10)
+#define INTR_AHB_DONE_EN BIT(6)
+#define NFI_INTR_STA (0x14)
+#define NFI_CMD (0x20)
+#define NFI_ADDRNOB (0x30)
+#define NFI_COLADDR (0x34)
+#define NFI_ROWADDR (0x38)
+#define NFI_STRDATA (0x40)
+#define STAR_EN (1)
+#define STAR_DE (0)
+#define NFI_CNRNB (0x44)
+#define NFI_DATAW (0x50)
+#define NFI_DATAR (0x54)
+#define NFI_PIO_DIRDY (0x58)
+#define PIO_DI_RDY (0x01)
+#define NFI_STA (0x60)
+#define STA_CMD BIT(0)
+#define STA_ADDR BIT(1)
+#define STA_BUSY BIT(8)
+#define STA_EMP_PAGE BIT(12)
+#define NFI_FSM_CUSTDATA (0xe << 16)
+#define NFI_FSM_MASK (0xf << 16)
+#define NFI_ADDRCNTR (0x70)
+#define CNTR_MASK GENMASK(16, 12)
+#define ADDRCNTR_SEC_SHIFT (12)
+#define ADDRCNTR_SEC(val) \
+ (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
+#define NFI_STRADDR (0x80)
+#define NFI_BYTELEN (0x84)
+#define NFI_CSEL (0x90)
+#define NFI_FDML(x) (0xA0 + (x) * sizeof(u32) * 2)
+#define NFI_FDMM(x) (0xA4 + (x) * sizeof(u32) * 2)
+#define NFI_FDM_MAX_SIZE (8)
+#define NFI_FDM_MIN_SIZE (1)
+#define NFI_DEBUG_CON1 (0x220)
+#define STROBE_MASK GENMASK(4, 3)
+#define STROBE_SHIFT (3)
+#define MAX_STROBE_DLY (3)
+#define NFI_MASTER_STA (0x224)
+#define MASTER_STA_MASK (0x0FFF)
+#define NFI_EMPTY_THRESH (0x23C)
+
+#define MTK_NAME "mtk-nand"
+#define KB(x) ((x) * 1024UL)
+#define MB(x) (KB(x) * 1024UL)
+
+#define MTK_TIMEOUT (500000)
+#define MTK_RESET_TIMEOUT (1000000)
+#define MTK_NAND_MAX_NSELS (2)
+#define MTK_NFC_MIN_SPARE (16)
+#define ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt) \
+ ((tpoecs) << 28 | (tprecs) << 22 | (tc2r) << 16 | \
+ (tw2r) << 12 | (twh) << 8 | (twst) << 4 | (trlt))
+
+struct mtk_nfc_caps {
+ const u8 *spare_size;
+ u8 num_spare_size;
+ u8 pageformat_spare_shift;
+ u8 nfi_clk_div;
+ u8 max_sector;
+ u32 max_sector_size;
+};
+
+struct mtk_nfc_bad_mark_ctl {
+ void (*bm_swap)(struct mtd_info *, u8 *buf, int raw);
+ u32 sec;
+ u32 pos;
+};
+
+/*
+ * FDM: region used to store free OOB data
+ */
+struct mtk_nfc_fdm {
+ u32 reg_size;
+ u32 ecc_size;
+};
+
+struct mtk_nfc_nand_chip {
+ struct list_head node;
+ struct nand_chip nand;
+
+ struct mtk_nfc_bad_mark_ctl bad_mark;
+ struct mtk_nfc_fdm fdm;
+ u32 spare_per_sector;
+
+ int nsels;
+ u8 sels[];
+ /* nothing after this field */
+};
+
+struct mtk_nfc_clk {
+ struct clk *nfi_clk;
+ struct clk *pad_clk;
+};
+
+struct mtk_nfc {
+ struct nand_controller controller;
+ struct mtk_ecc_config ecc_cfg;
+ struct mtk_nfc_clk clk;
+ struct mtk_ecc *ecc;
+
+ struct device *dev;
+ const struct mtk_nfc_caps *caps;
+ void __iomem *regs;
+
+ struct completion done;
+ struct list_head chips;
+
+ u8 *buffer;
+
+ unsigned long assigned_cs;
+};
+
+/*
+ * supported spare size of each IP.
+ * order should be the same with the spare size bitfiled defination of
+ * register NFI_PAGEFMT.
+ */
+static const u8 spare_size_mt2701[] = {
+ 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 63, 64
+};
+
+static const u8 spare_size_mt2712[] = {
+ 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64, 67,
+ 74
+};
+
+static const u8 spare_size_mt7622[] = {
+ 16, 26, 27, 28
+};
+
+static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand)
+{
+ return container_of(nand, struct mtk_nfc_nand_chip, nand);
+}
+
+static inline u8 *data_ptr(struct nand_chip *chip, const u8 *p, int i)
+{
+ return (u8 *)p + i * chip->ecc.size;
+}
+
+static inline u8 *oob_ptr(struct nand_chip *chip, int i)
+{
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ u8 *poi;
+
+ /* map the sector's FDM data to free oob:
+ * the beginning of the oob area stores the FDM data of bad mark sectors
+ */
+
+ if (i < mtk_nand->bad_mark.sec)
+ poi = chip->oob_poi + (i + 1) * mtk_nand->fdm.reg_size;
+ else if (i == mtk_nand->bad_mark.sec)
+ poi = chip->oob_poi;
+ else
+ poi = chip->oob_poi + i * mtk_nand->fdm.reg_size;
+
+ return poi;
+}
+
+static inline int mtk_data_len(struct nand_chip *chip)
+{
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+
+ return chip->ecc.size + mtk_nand->spare_per_sector;
+}
+
+static inline u8 *mtk_data_ptr(struct nand_chip *chip, int i)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+ return nfc->buffer + i * mtk_data_len(chip);
+}
+
+static inline u8 *mtk_oob_ptr(struct nand_chip *chip, int i)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+ return nfc->buffer + i * mtk_data_len(chip) + chip->ecc.size;
+}
+
+static inline void nfi_writel(struct mtk_nfc *nfc, u32 val, u32 reg)
+{
+ writel(val, nfc->regs + reg);
+}
+
+static inline void nfi_writew(struct mtk_nfc *nfc, u16 val, u32 reg)
+{
+ writew(val, nfc->regs + reg);
+}
+
+static inline void nfi_writeb(struct mtk_nfc *nfc, u8 val, u32 reg)
+{
+ writeb(val, nfc->regs + reg);
+}
+
+static inline u32 nfi_readl(struct mtk_nfc *nfc, u32 reg)
+{
+ return readl_relaxed(nfc->regs + reg);
+}
+
+static inline u16 nfi_readw(struct mtk_nfc *nfc, u32 reg)
+{
+ return readw_relaxed(nfc->regs + reg);
+}
+
+static inline u8 nfi_readb(struct mtk_nfc *nfc, u32 reg)
+{
+ return readb_relaxed(nfc->regs + reg);
+}
+
+static void mtk_nfc_hw_reset(struct mtk_nfc *nfc)
+{
+ struct device *dev = nfc->dev;
+ u32 val;
+ int ret;
+
+ /* reset all registers and force the NFI master to terminate */
+ nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
+
+ /* wait for the master to finish the last transaction */
+ ret = readl_poll_timeout(nfc->regs + NFI_MASTER_STA, val,
+ !(val & MASTER_STA_MASK), 50,
+ MTK_RESET_TIMEOUT);
+ if (ret)
+ dev_warn(dev, "master active in reset [0x%x] = 0x%x\n",
+ NFI_MASTER_STA, val);
+
+ /* ensure any status register affected by the NFI master is reset */
+ nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
+ nfi_writew(nfc, STAR_DE, NFI_STRDATA);
+}
+
+static int mtk_nfc_send_command(struct mtk_nfc *nfc, u8 command)
+{
+ struct device *dev = nfc->dev;
+ u32 val;
+ int ret;
+
+ nfi_writel(nfc, command, NFI_CMD);
+
+ ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
+ !(val & STA_CMD), 10, MTK_TIMEOUT);
+ if (ret) {
+ dev_warn(dev, "nfi core timed out entering command mode\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mtk_nfc_send_address(struct mtk_nfc *nfc, int addr)
+{
+ struct device *dev = nfc->dev;
+ u32 val;
+ int ret;
+
+ nfi_writel(nfc, addr, NFI_COLADDR);
+ nfi_writel(nfc, 0, NFI_ROWADDR);
+ nfi_writew(nfc, 1, NFI_ADDRNOB);
+
+ ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
+ !(val & STA_ADDR), 10, MTK_TIMEOUT);
+ if (ret) {
+ dev_warn(dev, "nfi core timed out entering address mode\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mtk_nfc_hw_runtime_config(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ u32 fmt, spare, i;
+
+ if (!mtd->writesize)
+ return 0;
+
+ spare = mtk_nand->spare_per_sector;
+
+ switch (mtd->writesize) {
+ case 512:
+ fmt = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512;
+ break;
+ case KB(2):
+ if (chip->ecc.size == 512)
+ fmt = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512;
+ else
+ fmt = PAGEFMT_512_2K;
+ break;
+ case KB(4):
+ if (chip->ecc.size == 512)
+ fmt = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512;
+ else
+ fmt = PAGEFMT_2K_4K;
+ break;
+ case KB(8):
+ if (chip->ecc.size == 512)
+ fmt = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512;
+ else
+ fmt = PAGEFMT_4K_8K;
+ break;
+ case KB(16):
+ fmt = PAGEFMT_8K_16K;
+ break;
+ default:
+ dev_err(nfc->dev, "invalid page len: %d\n", mtd->writesize);
+ return -EINVAL;
+ }
+
+ /*
+ * the hardware will double the value for this eccsize, so we need to
+ * halve it
+ */
+ if (chip->ecc.size == 1024)
+ spare >>= 1;
+
+ for (i = 0; i < nfc->caps->num_spare_size; i++) {
+ if (nfc->caps->spare_size[i] == spare)
+ break;
+ }
+
+ if (i == nfc->caps->num_spare_size) {
+ dev_err(nfc->dev, "invalid spare size %d\n", spare);
+ return -EINVAL;
+ }
+
+ fmt |= i << nfc->caps->pageformat_spare_shift;
+
+ fmt |= mtk_nand->fdm.reg_size << PAGEFMT_FDM_SHIFT;
+ fmt |= mtk_nand->fdm.ecc_size << PAGEFMT_FDM_ECC_SHIFT;
+ nfi_writel(nfc, fmt, NFI_PAGEFMT);
+
+ nfc->ecc_cfg.strength = chip->ecc.strength;
+ nfc->ecc_cfg.len = chip->ecc.size + mtk_nand->fdm.ecc_size;
+
+ return 0;
+}
+
+static inline void mtk_nfc_wait_ioready(struct mtk_nfc *nfc)
+{
+ int rc;
+ u8 val;
+
+ rc = readb_poll_timeout_atomic(nfc->regs + NFI_PIO_DIRDY, val,
+ val & PIO_DI_RDY, 10, MTK_TIMEOUT);
+ if (rc < 0)
+ dev_err(nfc->dev, "data not ready\n");
+}
+
+static inline u8 mtk_nfc_read_byte(struct nand_chip *chip)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ u32 reg;
+
+ /* after each byte read, the NFI_STA reg is reset by the hardware */
+ reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
+ if (reg != NFI_FSM_CUSTDATA) {
+ reg = nfi_readw(nfc, NFI_CNFG);
+ reg |= CNFG_BYTE_RW | CNFG_READ_EN;
+ nfi_writew(nfc, reg, NFI_CNFG);
+
+ /*
+ * set to max sector to allow the HW to continue reading over
+ * unaligned accesses
+ */
+ reg = (nfc->caps->max_sector << CON_SEC_SHIFT) | CON_BRD;
+ nfi_writel(nfc, reg, NFI_CON);
+
+ /* trigger to fetch data */
+ nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+ }
+
+ mtk_nfc_wait_ioready(nfc);
+
+ return nfi_readb(nfc, NFI_DATAR);
+}
+
+static void mtk_nfc_read_buf(struct nand_chip *chip, u8 *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = mtk_nfc_read_byte(chip);
+}
+
+static void mtk_nfc_write_byte(struct nand_chip *chip, u8 byte)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ u32 reg;
+
+ reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
+
+ if (reg != NFI_FSM_CUSTDATA) {
+ reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW;
+ nfi_writew(nfc, reg, NFI_CNFG);
+
+ reg = nfc->caps->max_sector << CON_SEC_SHIFT | CON_BWR;
+ nfi_writel(nfc, reg, NFI_CON);
+
+ nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+ }
+
+ mtk_nfc_wait_ioready(nfc);
+ nfi_writeb(nfc, byte, NFI_DATAW);
+}
+
+static void mtk_nfc_write_buf(struct nand_chip *chip, const u8 *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ mtk_nfc_write_byte(chip, buf[i]);
+}
+
+static int mtk_nfc_exec_instr(struct nand_chip *chip,
+ const struct nand_op_instr *instr)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ unsigned int i;
+ u32 status;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ mtk_nfc_send_command(nfc, instr->ctx.cmd.opcode);
+ return 0;
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++)
+ mtk_nfc_send_address(nfc, instr->ctx.addr.addrs[i]);
+ return 0;
+ case NAND_OP_DATA_IN_INSTR:
+ mtk_nfc_read_buf(chip, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ return 0;
+ case NAND_OP_DATA_OUT_INSTR:
+ mtk_nfc_write_buf(chip, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ return 0;
+ case NAND_OP_WAITRDY_INSTR:
+ return readl_poll_timeout(nfc->regs + NFI_STA, status,
+ !(status & STA_BUSY), 20,
+ instr->ctx.waitrdy.timeout_ms * 1000);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static void mtk_nfc_select_target(struct nand_chip *nand, unsigned int cs)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(nand);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(nand);
+
+ mtk_nfc_hw_runtime_config(nand_to_mtd(nand));
+
+ nfi_writel(nfc, mtk_nand->sels[cs], NFI_CSEL);
+}
+
+static int mtk_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ unsigned int i;
+ int ret = 0;
+
+ if (check_only)
+ return 0;
+
+ mtk_nfc_hw_reset(nfc);
+ nfi_writew(nfc, CNFG_OP_CUST, NFI_CNFG);
+ mtk_nfc_select_target(chip, op->cs);
+
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = mtk_nfc_exec_instr(chip, &op->instrs[i]);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int mtk_nfc_setup_interface(struct nand_chip *chip, int csline,
+ const struct nand_interface_config *conf)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ const struct nand_sdr_timings *timings;
+ u32 rate, tpoecs, tprecs, tc2r, tw2r, twh, twst = 0, trlt = 0;
+ u32 temp, tsel = 0;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return -ENOTSUPP;
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ rate = clk_get_rate(nfc->clk.nfi_clk);
+ /* There is a frequency divider in some IPs */
+ rate /= nfc->caps->nfi_clk_div;
+
+ /* turn clock rate into KHZ */
+ rate /= 1000;
+
+ tpoecs = max(timings->tALH_min, timings->tCLH_min) / 1000;
+ tpoecs = DIV_ROUND_UP(tpoecs * rate, 1000000);
+ tpoecs &= 0xf;
+
+ tprecs = max(timings->tCLS_min, timings->tALS_min) / 1000;
+ tprecs = DIV_ROUND_UP(tprecs * rate, 1000000);
+ tprecs &= 0x3f;
+
+ /* sdr interface has no tCR which means CE# low to RE# low */
+ tc2r = 0;
+
+ tw2r = timings->tWHR_min / 1000;
+ tw2r = DIV_ROUND_UP(tw2r * rate, 1000000);
+ tw2r = DIV_ROUND_UP(tw2r - 1, 2);
+ tw2r &= 0xf;
+
+ twh = max(timings->tREH_min, timings->tWH_min) / 1000;
+ twh = DIV_ROUND_UP(twh * rate, 1000000) - 1;
+ twh &= 0xf;
+
+ /* Calculate real WE#/RE# hold time in nanosecond */
+ temp = (twh + 1) * 1000000 / rate;
+ /* nanosecond to picosecond */
+ temp *= 1000;
+
+ /*
+ * WE# low level time should be expaned to meet WE# pulse time
+ * and WE# cycle time at the same time.
+ */
+ if (temp < timings->tWC_min)
+ twst = timings->tWC_min - temp;
+ twst = max(timings->tWP_min, twst) / 1000;
+ twst = DIV_ROUND_UP(twst * rate, 1000000) - 1;
+ twst &= 0xf;
+
+ /*
+ * RE# low level time should be expaned to meet RE# pulse time
+ * and RE# cycle time at the same time.
+ */
+ if (temp < timings->tRC_min)
+ trlt = timings->tRC_min - temp;
+ trlt = max(trlt, timings->tRP_min) / 1000;
+ trlt = DIV_ROUND_UP(trlt * rate, 1000000) - 1;
+ trlt &= 0xf;
+
+ /* Calculate RE# pulse time in nanosecond. */
+ temp = (trlt + 1) * 1000000 / rate;
+ /* nanosecond to picosecond */
+ temp *= 1000;
+ /*
+ * If RE# access time is bigger than RE# pulse time,
+ * delay sampling data timing.
+ */
+ if (temp < timings->tREA_max) {
+ tsel = timings->tREA_max / 1000;
+ tsel = DIV_ROUND_UP(tsel * rate, 1000000);
+ tsel -= (trlt + 1);
+ if (tsel > MAX_STROBE_DLY) {
+ trlt += tsel - MAX_STROBE_DLY;
+ tsel = MAX_STROBE_DLY;
+ }
+ }
+ temp = nfi_readl(nfc, NFI_DEBUG_CON1);
+ temp &= ~STROBE_MASK;
+ temp |= tsel << STROBE_SHIFT;
+ nfi_writel(nfc, temp, NFI_DEBUG_CON1);
+
+ /*
+ * ACCON: access timing control register
+ * -------------------------------------
+ * 31:28: tpoecs, minimum required time for CS post pulling down after
+ * accessing the device
+ * 27:22: tprecs, minimum required time for CS pre pulling down before
+ * accessing the device
+ * 21:16: tc2r, minimum required time from NCEB low to NREB low
+ * 15:12: tw2r, minimum required time from NWEB high to NREB low.
+ * 11:08: twh, write enable hold time
+ * 07:04: twst, write wait states
+ * 03:00: trlt, read wait states
+ */
+ trlt = ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt);
+ nfi_writel(nfc, trlt, NFI_ACCCON);
+
+ return 0;
+}
+
+static int mtk_nfc_sector_encode(struct nand_chip *chip, u8 *data)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ int size = chip->ecc.size + mtk_nand->fdm.reg_size;
+
+ nfc->ecc_cfg.mode = ECC_DMA_MODE;
+ nfc->ecc_cfg.op = ECC_ENCODE;
+
+ return mtk_ecc_encode(nfc->ecc, &nfc->ecc_cfg, data, size);
+}
+
+static void mtk_nfc_no_bad_mark_swap(struct mtd_info *a, u8 *b, int c)
+{
+ /* nop */
+}
+
+static void mtk_nfc_bad_mark_swap(struct mtd_info *mtd, u8 *buf, int raw)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *nand = to_mtk_nand(chip);
+ u32 bad_pos = nand->bad_mark.pos;
+
+ if (raw)
+ bad_pos += nand->bad_mark.sec * mtk_data_len(chip);
+ else
+ bad_pos += nand->bad_mark.sec * chip->ecc.size;
+
+ swap(chip->oob_poi[0], buf[bad_pos]);
+}
+
+static int mtk_nfc_format_subpage(struct mtd_info *mtd, u32 offset,
+ u32 len, const u8 *buf)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ u32 start, end;
+ int i, ret;
+
+ start = offset / chip->ecc.size;
+ end = DIV_ROUND_UP(offset + len, chip->ecc.size);
+
+ memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
+ for (i = 0; i < chip->ecc.steps; i++) {
+ memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
+ chip->ecc.size);
+
+ if (start > i || i >= end)
+ continue;
+
+ if (i == mtk_nand->bad_mark.sec)
+ mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
+
+ memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
+
+ /* program the CRC back to the OOB */
+ ret = mtk_nfc_sector_encode(chip, mtk_data_ptr(chip, i));
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mtk_nfc_format_page(struct mtd_info *mtd, const u8 *buf)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ u32 i;
+
+ memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
+ for (i = 0; i < chip->ecc.steps; i++) {
+ if (buf)
+ memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
+ chip->ecc.size);
+
+ if (i == mtk_nand->bad_mark.sec)
+ mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
+
+ memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
+ }
+}
+
+static inline void mtk_nfc_read_fdm(struct nand_chip *chip, u32 start,
+ u32 sectors)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ u32 vall, valm;
+ u8 *oobptr;
+ int i, j;
+
+ for (i = 0; i < sectors; i++) {
+ oobptr = oob_ptr(chip, start + i);
+ vall = nfi_readl(nfc, NFI_FDML(i));
+ valm = nfi_readl(nfc, NFI_FDMM(i));
+
+ for (j = 0; j < fdm->reg_size; j++)
+ oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
+ }
+}
+
+static inline void mtk_nfc_write_fdm(struct nand_chip *chip)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ u32 vall, valm;
+ u8 *oobptr;
+ int i, j;
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ oobptr = oob_ptr(chip, i);
+ vall = 0;
+ valm = 0;
+ for (j = 0; j < 8; j++) {
+ if (j < 4)
+ vall |= (j < fdm->reg_size ? oobptr[j] : 0xff)
+ << (j * 8);
+ else
+ valm |= (j < fdm->reg_size ? oobptr[j] : 0xff)
+ << ((j - 4) * 8);
+ }
+ nfi_writel(nfc, vall, NFI_FDML(i));
+ nfi_writel(nfc, valm, NFI_FDMM(i));
+ }
+}
+
+static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const u8 *buf, int page, int len)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct device *dev = nfc->dev;
+ dma_addr_t addr;
+ u32 reg;
+ int ret;
+
+ addr = dma_map_single(dev, (void *)buf, len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(nfc->dev, addr);
+ if (ret) {
+ dev_err(nfc->dev, "dma mapping error\n");
+ return -EINVAL;
+ }
+
+ reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AHB | CNFG_DMA_BURST_EN;
+ nfi_writew(nfc, reg, NFI_CNFG);
+
+ nfi_writel(nfc, chip->ecc.steps << CON_SEC_SHIFT, NFI_CON);
+ nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
+ nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
+
+ init_completion(&nfc->done);
+
+ reg = nfi_readl(nfc, NFI_CON) | CON_BWR;
+ nfi_writel(nfc, reg, NFI_CON);
+ nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+
+ ret = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
+ if (!ret) {
+ dev_err(dev, "program ahb done timeout\n");
+ nfi_writew(nfc, 0, NFI_INTR_EN);
+ ret = -ETIMEDOUT;
+ goto timeout;
+ }
+
+ ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg,
+ ADDRCNTR_SEC(reg) >= chip->ecc.steps,
+ 10, MTK_TIMEOUT);
+ if (ret)
+ dev_err(dev, "hwecc write timeout\n");
+
+timeout:
+
+ dma_unmap_single(nfc->dev, addr, len, DMA_TO_DEVICE);
+ nfi_writel(nfc, 0, NFI_CON);
+
+ return ret;
+}
+
+static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const u8 *buf, int page, int raw)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ size_t len;
+ const u8 *bufpoi;
+ u32 reg;
+ int ret;
+
+ mtk_nfc_select_target(chip, chip->cur_cs);
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
+ if (!raw) {
+ /* OOB => FDM: from register, ECC: from HW */
+ reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AUTO_FMT_EN;
+ nfi_writew(nfc, reg | CNFG_HW_ECC_EN, NFI_CNFG);
+
+ nfc->ecc_cfg.op = ECC_ENCODE;
+ nfc->ecc_cfg.mode = ECC_NFI_MODE;
+ ret = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
+ if (ret) {
+ /* clear NFI config */
+ reg = nfi_readw(nfc, NFI_CNFG);
+ reg &= ~(CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
+ nfi_writew(nfc, reg, NFI_CNFG);
+
+ return ret;
+ }
+
+ memcpy(nfc->buffer, buf, mtd->writesize);
+ mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, raw);
+ bufpoi = nfc->buffer;
+
+ /* write OOB into the FDM registers (OOB area in MTK NAND) */
+ mtk_nfc_write_fdm(chip);
+ } else {
+ bufpoi = buf;
+ }
+
+ len = mtd->writesize + (raw ? mtd->oobsize : 0);
+ ret = mtk_nfc_do_write_page(mtd, chip, bufpoi, page, len);
+
+ if (!raw)
+ mtk_ecc_disable(nfc->ecc);
+
+ if (ret)
+ return ret;
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int mtk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
+ int oob_on, int page)
+{
+ return mtk_nfc_write_page(nand_to_mtd(chip), chip, buf, page, 0);
+}
+
+static int mtk_nfc_write_page_raw(struct nand_chip *chip, const u8 *buf,
+ int oob_on, int pg)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+ mtk_nfc_format_page(mtd, buf);
+ return mtk_nfc_write_page(mtd, chip, nfc->buffer, pg, 1);
+}
+
+static int mtk_nfc_write_subpage_hwecc(struct nand_chip *chip, u32 offset,
+ u32 data_len, const u8 *buf,
+ int oob_on, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ int ret;
+
+ ret = mtk_nfc_format_subpage(mtd, offset, data_len, buf);
+ if (ret < 0)
+ return ret;
+
+ /* use the data in the private buffer (now with FDM and CRC) */
+ return mtk_nfc_write_page(mtd, chip, nfc->buffer, page, 1);
+}
+
+static int mtk_nfc_write_oob_std(struct nand_chip *chip, int page)
+{
+ return mtk_nfc_write_page_raw(chip, NULL, 1, page);
+}
+
+static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 start,
+ u32 sectors)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_ecc_stats stats;
+ u32 reg_size = mtk_nand->fdm.reg_size;
+ int rc, i;
+
+ rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
+ if (rc) {
+ memset(buf, 0xff, sectors * chip->ecc.size);
+ for (i = 0; i < sectors; i++)
+ memset(oob_ptr(chip, start + i), 0xff, reg_size);
+ return 0;
+ }
+
+ mtk_ecc_get_stats(nfc->ecc, &stats, sectors);
+ mtd->ecc_stats.corrected += stats.corrected;
+ mtd->ecc_stats.failed += stats.failed;
+
+ return stats.bitflips;
+}
+
+static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
+ u32 data_offs, u32 readlen,
+ u8 *bufpoi, int page, int raw)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ u32 spare = mtk_nand->spare_per_sector;
+ u32 column, sectors, start, end, reg;
+ dma_addr_t addr;
+ int bitflips = 0;
+ size_t len;
+ u8 *buf;
+ int rc;
+
+ mtk_nfc_select_target(chip, chip->cur_cs);
+ start = data_offs / chip->ecc.size;
+ end = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
+
+ sectors = end - start;
+ column = start * (chip->ecc.size + spare);
+
+ len = sectors * chip->ecc.size + (raw ? sectors * spare : 0);
+ buf = bufpoi + start * chip->ecc.size;
+
+ nand_read_page_op(chip, page, column, NULL, 0);
+
+ addr = dma_map_single(nfc->dev, buf, len, DMA_FROM_DEVICE);
+ rc = dma_mapping_error(nfc->dev, addr);
+ if (rc) {
+ dev_err(nfc->dev, "dma mapping error\n");
+
+ return -EINVAL;
+ }
+
+ reg = nfi_readw(nfc, NFI_CNFG);
+ reg |= CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_AHB;
+ if (!raw) {
+ reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN;
+ nfi_writew(nfc, reg, NFI_CNFG);
+
+ nfc->ecc_cfg.mode = ECC_NFI_MODE;
+ nfc->ecc_cfg.sectors = sectors;
+ nfc->ecc_cfg.op = ECC_DECODE;
+ rc = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
+ if (rc) {
+ dev_err(nfc->dev, "ecc enable\n");
+ /* clear NFI_CNFG */
+ reg &= ~(CNFG_DMA_BURST_EN | CNFG_AHB | CNFG_READ_EN |
+ CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
+ nfi_writew(nfc, reg, NFI_CNFG);
+ dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
+
+ return rc;
+ }
+ } else {
+ nfi_writew(nfc, reg, NFI_CNFG);
+ }
+
+ nfi_writel(nfc, sectors << CON_SEC_SHIFT, NFI_CON);
+ nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
+ nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
+
+ init_completion(&nfc->done);
+ reg = nfi_readl(nfc, NFI_CON) | CON_BRD;
+ nfi_writel(nfc, reg, NFI_CON);
+ nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+
+ rc = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
+ if (!rc)
+ dev_warn(nfc->dev, "read ahb/dma done timeout\n");
+
+ rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg,
+ ADDRCNTR_SEC(reg) >= sectors, 10,
+ MTK_TIMEOUT);
+ if (rc < 0) {
+ dev_err(nfc->dev, "subpage done timeout\n");
+ bitflips = -EIO;
+ } else if (!raw) {
+ rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
+ bitflips = rc < 0 ? -ETIMEDOUT :
+ mtk_nfc_update_ecc_stats(mtd, buf, start, sectors);
+ mtk_nfc_read_fdm(chip, start, sectors);
+ }
+
+ dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
+
+ if (raw)
+ goto done;
+
+ mtk_ecc_disable(nfc->ecc);
+
+ if (clamp(mtk_nand->bad_mark.sec, start, end) == mtk_nand->bad_mark.sec)
+ mtk_nand->bad_mark.bm_swap(mtd, bufpoi, raw);
+done:
+ nfi_writel(nfc, 0, NFI_CON);
+
+ return bitflips;
+}
+
+static int mtk_nfc_read_subpage_hwecc(struct nand_chip *chip, u32 off,
+ u32 len, u8 *p, int pg)
+{
+ return mtk_nfc_read_subpage(nand_to_mtd(chip), chip, off, len, p, pg,
+ 0);
+}
+
+static int mtk_nfc_read_page_hwecc(struct nand_chip *chip, u8 *p, int oob_on,
+ int pg)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ return mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, p, pg, 0);
+}
+
+static int mtk_nfc_read_page_raw(struct nand_chip *chip, u8 *buf, int oob_on,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ int i, ret;
+
+ memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
+ ret = mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, nfc->buffer,
+ page, 1);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ memcpy(oob_ptr(chip, i), mtk_oob_ptr(chip, i), fdm->reg_size);
+
+ if (i == mtk_nand->bad_mark.sec)
+ mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
+
+ if (buf)
+ memcpy(data_ptr(chip, buf, i), mtk_data_ptr(chip, i),
+ chip->ecc.size);
+ }
+
+ return ret;
+}
+
+static int mtk_nfc_read_oob_std(struct nand_chip *chip, int page)
+{
+ return mtk_nfc_read_page_raw(chip, NULL, 1, page);
+}
+
+static inline void mtk_nfc_hw_init(struct mtk_nfc *nfc)
+{
+ /*
+ * CNRNB: nand ready/busy register
+ * -------------------------------
+ * 7:4: timeout register for polling the NAND busy/ready signal
+ * 0 : poll the status of the busy/ready signal after [7:4]*16 cycles.
+ */
+ nfi_writew(nfc, 0xf1, NFI_CNRNB);
+ nfi_writel(nfc, PAGEFMT_8K_16K, NFI_PAGEFMT);
+
+ mtk_nfc_hw_reset(nfc);
+
+ nfi_readl(nfc, NFI_INTR_STA);
+ nfi_writel(nfc, 0, NFI_INTR_EN);
+}
+
+static irqreturn_t mtk_nfc_irq(int irq, void *id)
+{
+ struct mtk_nfc *nfc = id;
+ u16 sta, ien;
+
+ sta = nfi_readw(nfc, NFI_INTR_STA);
+ ien = nfi_readw(nfc, NFI_INTR_EN);
+
+ if (!(sta & ien))
+ return IRQ_NONE;
+
+ nfi_writew(nfc, ~sta & ien, NFI_INTR_EN);
+ complete(&nfc->done);
+
+ return IRQ_HANDLED;
+}
+
+static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oob_region)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ u32 eccsteps;
+
+ eccsteps = mtd->writesize / chip->ecc.size;
+
+ if (section >= eccsteps)
+ return -ERANGE;
+
+ oob_region->length = fdm->reg_size - fdm->ecc_size;
+ oob_region->offset = section * fdm->reg_size + fdm->ecc_size;
+
+ return 0;
+}
+
+static int mtk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oob_region)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ u32 eccsteps;
+
+ if (section)
+ return -ERANGE;
+
+ eccsteps = mtd->writesize / chip->ecc.size;
+ oob_region->offset = mtk_nand->fdm.reg_size * eccsteps;
+ oob_region->length = mtd->oobsize - oob_region->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops mtk_nfc_ooblayout_ops = {
+ .free = mtk_nfc_ooblayout_free,
+ .ecc = mtk_nfc_ooblayout_ecc,
+};
+
+static void mtk_nfc_set_fdm(struct mtk_nfc_fdm *fdm, struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *chip = to_mtk_nand(nand);
+ struct mtk_nfc *nfc = nand_get_controller_data(nand);
+ u32 ecc_bytes;
+
+ ecc_bytes = DIV_ROUND_UP(nand->ecc.strength *
+ mtk_ecc_get_parity_bits(nfc->ecc), 8);
+
+ fdm->reg_size = chip->spare_per_sector - ecc_bytes;
+ if (fdm->reg_size > NFI_FDM_MAX_SIZE)
+ fdm->reg_size = NFI_FDM_MAX_SIZE;
+
+ /* bad block mark storage */
+ fdm->ecc_size = 1;
+}
+
+static void mtk_nfc_set_bad_mark_ctl(struct mtk_nfc_bad_mark_ctl *bm_ctl,
+ struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+
+ if (mtd->writesize == 512) {
+ bm_ctl->bm_swap = mtk_nfc_no_bad_mark_swap;
+ } else {
+ bm_ctl->bm_swap = mtk_nfc_bad_mark_swap;
+ bm_ctl->sec = mtd->writesize / mtk_data_len(nand);
+ bm_ctl->pos = mtd->writesize % mtk_data_len(nand);
+ }
+}
+
+static int mtk_nfc_set_spare_per_sector(u32 *sps, struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mtk_nfc *nfc = nand_get_controller_data(nand);
+ const u8 *spare = nfc->caps->spare_size;
+ u32 eccsteps, i, closest_spare = 0;
+
+ eccsteps = mtd->writesize / nand->ecc.size;
+ *sps = mtd->oobsize / eccsteps;
+
+ if (nand->ecc.size == 1024)
+ *sps >>= 1;
+
+ if (*sps < MTK_NFC_MIN_SPARE)
+ return -EINVAL;
+
+ for (i = 0; i < nfc->caps->num_spare_size; i++) {
+ if (*sps >= spare[i] && spare[i] >= spare[closest_spare]) {
+ closest_spare = i;
+ if (*sps == spare[i])
+ break;
+ }
+ }
+
+ *sps = spare[closest_spare];
+
+ if (nand->ecc.size == 1024)
+ *sps <<= 1;
+
+ return 0;
+}
+
+static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&nand->base);
+ struct mtk_nfc *nfc = nand_get_controller_data(nand);
+ u32 spare;
+ int free, ret;
+
+ /* support only ecc hw mode */
+ if (nand->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST) {
+ dev_err(dev, "ecc.engine_type not supported\n");
+ return -EINVAL;
+ }
+
+ /* if optional dt settings not present */
+ if (!nand->ecc.size || !nand->ecc.strength) {
+ /* use datasheet requirements */
+ nand->ecc.strength = requirements->strength;
+ nand->ecc.size = requirements->step_size;
+
+ /*
+ * align eccstrength and eccsize
+ * this controller only supports 512 and 1024 sizes
+ */
+ if (nand->ecc.size < 1024) {
+ if (mtd->writesize > 512 &&
+ nfc->caps->max_sector_size > 512) {
+ nand->ecc.size = 1024;
+ nand->ecc.strength <<= 1;
+ } else {
+ nand->ecc.size = 512;
+ }
+ } else {
+ nand->ecc.size = 1024;
+ }
+
+ ret = mtk_nfc_set_spare_per_sector(&spare, mtd);
+ if (ret)
+ return ret;
+
+ /* calculate oob bytes except ecc parity data */
+ free = (nand->ecc.strength * mtk_ecc_get_parity_bits(nfc->ecc)
+ + 7) >> 3;
+ free = spare - free;
+
+ /*
+ * enhance ecc strength if oob left is bigger than max FDM size
+ * or reduce ecc strength if oob size is not enough for ecc
+ * parity data.
+ */
+ if (free > NFI_FDM_MAX_SIZE) {
+ spare -= NFI_FDM_MAX_SIZE;
+ nand->ecc.strength = (spare << 3) /
+ mtk_ecc_get_parity_bits(nfc->ecc);
+ } else if (free < 0) {
+ spare -= NFI_FDM_MIN_SIZE;
+ nand->ecc.strength = (spare << 3) /
+ mtk_ecc_get_parity_bits(nfc->ecc);
+ }
+ }
+
+ mtk_ecc_adjust_strength(nfc->ecc, &nand->ecc.strength);
+
+ dev_info(dev, "eccsize %d eccstrength %d\n",
+ nand->ecc.size, nand->ecc.strength);
+
+ return 0;
+}
+
+static int mtk_nfc_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct device *dev = mtd->dev.parent;
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ int len;
+ int ret;
+
+ if (chip->options & NAND_BUSWIDTH_16) {
+ dev_err(dev, "16bits buswidth not supported");
+ return -EINVAL;
+ }
+
+ /* store bbt magic in page, cause OOB is not protected */
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ ret = mtk_nfc_ecc_init(dev, mtd);
+ if (ret)
+ return ret;
+
+ ret = mtk_nfc_set_spare_per_sector(&mtk_nand->spare_per_sector, mtd);
+ if (ret)
+ return ret;
+
+ mtk_nfc_set_fdm(&mtk_nand->fdm, mtd);
+ mtk_nfc_set_bad_mark_ctl(&mtk_nand->bad_mark, mtd);
+
+ len = mtd->writesize + mtd->oobsize;
+ nfc->buffer = devm_kzalloc(dev, len, GFP_KERNEL);
+ if (!nfc->buffer)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static const struct nand_controller_ops mtk_nfc_controller_ops = {
+ .attach_chip = mtk_nfc_attach_chip,
+ .setup_interface = mtk_nfc_setup_interface,
+ .exec_op = mtk_nfc_exec_op,
+};
+
+static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
+ struct device_node *np)
+{
+ struct mtk_nfc_nand_chip *chip;
+ struct nand_chip *nand;
+ struct mtd_info *mtd;
+ int nsels;
+ u32 tmp;
+ int ret;
+ int i;
+
+ if (!of_get_property(np, "reg", &nsels))
+ return -ENODEV;
+
+ nsels /= sizeof(u32);
+ if (!nsels || nsels > MTK_NAND_MAX_NSELS) {
+ dev_err(dev, "invalid reg property size %d\n", nsels);
+ return -EINVAL;
+ }
+
+ chip = devm_kzalloc(dev, sizeof(*chip) + nsels * sizeof(u8),
+ GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->nsels = nsels;
+ for (i = 0; i < nsels; i++) {
+ ret = of_property_read_u32_index(np, "reg", i, &tmp);
+ if (ret) {
+ dev_err(dev, "reg property failure : %d\n", ret);
+ return ret;
+ }
+
+ if (tmp >= MTK_NAND_MAX_NSELS) {
+ dev_err(dev, "invalid CS: %u\n", tmp);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
+ dev_err(dev, "CS %u already assigned\n", tmp);
+ return -EINVAL;
+ }
+
+ chip->sels[i] = tmp;
+ }
+
+ nand = &chip->nand;
+ nand->controller = &nfc->controller;
+
+ nand_set_flash_node(nand, np);
+ nand_set_controller_data(nand, nfc);
+
+ nand->options |= NAND_USES_DMA | NAND_SUBPAGE_READ;
+
+ /* set default mode in case dt entry is missing */
+ nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+
+ nand->ecc.write_subpage = mtk_nfc_write_subpage_hwecc;
+ nand->ecc.write_page_raw = mtk_nfc_write_page_raw;
+ nand->ecc.write_page = mtk_nfc_write_page_hwecc;
+ nand->ecc.write_oob_raw = mtk_nfc_write_oob_std;
+ nand->ecc.write_oob = mtk_nfc_write_oob_std;
+
+ nand->ecc.read_subpage = mtk_nfc_read_subpage_hwecc;
+ nand->ecc.read_page_raw = mtk_nfc_read_page_raw;
+ nand->ecc.read_page = mtk_nfc_read_page_hwecc;
+ nand->ecc.read_oob_raw = mtk_nfc_read_oob_std;
+ nand->ecc.read_oob = mtk_nfc_read_oob_std;
+
+ mtd = nand_to_mtd(nand);
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = dev;
+ mtd->name = MTK_NAME;
+ mtd_set_ooblayout(mtd, &mtk_nfc_ooblayout_ops);
+
+ mtk_nfc_hw_init(nfc);
+
+ ret = nand_scan(nand, nsels);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(dev, "mtd parse partition error\n");
+ nand_cleanup(nand);
+ return ret;
+ }
+
+ list_add_tail(&chip->node, &nfc->chips);
+
+ return 0;
+}
+
+static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *nand_np;
+ int ret;
+
+ for_each_child_of_node(np, nand_np) {
+ ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np);
+ if (ret) {
+ of_node_put(nand_np);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct mtk_nfc_caps mtk_nfc_caps_mt2701 = {
+ .spare_size = spare_size_mt2701,
+ .num_spare_size = 16,
+ .pageformat_spare_shift = 4,
+ .nfi_clk_div = 1,
+ .max_sector = 16,
+ .max_sector_size = 1024,
+};
+
+static const struct mtk_nfc_caps mtk_nfc_caps_mt2712 = {
+ .spare_size = spare_size_mt2712,
+ .num_spare_size = 19,
+ .pageformat_spare_shift = 16,
+ .nfi_clk_div = 2,
+ .max_sector = 16,
+ .max_sector_size = 1024,
+};
+
+static const struct mtk_nfc_caps mtk_nfc_caps_mt7622 = {
+ .spare_size = spare_size_mt7622,
+ .num_spare_size = 4,
+ .pageformat_spare_shift = 4,
+ .nfi_clk_div = 1,
+ .max_sector = 8,
+ .max_sector_size = 512,
+};
+
+static const struct of_device_id mtk_nfc_id_table[] = {
+ {
+ .compatible = "mediatek,mt2701-nfc",
+ .data = &mtk_nfc_caps_mt2701,
+ }, {
+ .compatible = "mediatek,mt2712-nfc",
+ .data = &mtk_nfc_caps_mt2712,
+ }, {
+ .compatible = "mediatek,mt7622-nfc",
+ .data = &mtk_nfc_caps_mt7622,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mtk_nfc_id_table);
+
+static int mtk_nfc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct mtk_nfc *nfc;
+ int ret, irq;
+
+ nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nand_controller_init(&nfc->controller);
+ INIT_LIST_HEAD(&nfc->chips);
+ nfc->controller.ops = &mtk_nfc_controller_ops;
+
+ /* probe defer if not ready */
+ nfc->ecc = of_mtk_ecc_get(np);
+ if (IS_ERR(nfc->ecc))
+ return PTR_ERR(nfc->ecc);
+ else if (!nfc->ecc)
+ return -ENODEV;
+
+ nfc->caps = of_device_get_match_data(dev);
+ nfc->dev = dev;
+
+ nfc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(nfc->regs)) {
+ ret = PTR_ERR(nfc->regs);
+ goto release_ecc;
+ }
+
+ nfc->clk.nfi_clk = devm_clk_get_enabled(dev, "nfi_clk");
+ if (IS_ERR(nfc->clk.nfi_clk)) {
+ dev_err(dev, "no clk\n");
+ ret = PTR_ERR(nfc->clk.nfi_clk);
+ goto release_ecc;
+ }
+
+ nfc->clk.pad_clk = devm_clk_get_enabled(dev, "pad_clk");
+ if (IS_ERR(nfc->clk.pad_clk)) {
+ dev_err(dev, "no pad clk\n");
+ ret = PTR_ERR(nfc->clk.pad_clk);
+ goto release_ecc;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = -EINVAL;
+ goto release_ecc;
+ }
+
+ ret = devm_request_irq(dev, irq, mtk_nfc_irq, 0x0, "mtk-nand", nfc);
+ if (ret) {
+ dev_err(dev, "failed to request nfi irq\n");
+ goto release_ecc;
+ }
+
+ ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "failed to set dma mask\n");
+ goto release_ecc;
+ }
+
+ platform_set_drvdata(pdev, nfc);
+
+ ret = mtk_nfc_nand_chips_init(dev, nfc);
+ if (ret) {
+ dev_err(dev, "failed to init nand chips\n");
+ goto release_ecc;
+ }
+
+ return 0;
+
+release_ecc:
+ mtk_ecc_release(nfc->ecc);
+
+ return ret;
+}
+
+static void mtk_nfc_remove(struct platform_device *pdev)
+{
+ struct mtk_nfc *nfc = platform_get_drvdata(pdev);
+ struct mtk_nfc_nand_chip *mtk_chip;
+ struct nand_chip *chip;
+ int ret;
+
+ while (!list_empty(&nfc->chips)) {
+ mtk_chip = list_first_entry(&nfc->chips,
+ struct mtk_nfc_nand_chip, node);
+ chip = &mtk_chip->nand;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&mtk_chip->node);
+ }
+
+ mtk_ecc_release(nfc->ecc);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_nfc_suspend(struct device *dev)
+{
+ struct mtk_nfc *nfc = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(nfc->clk.nfi_clk);
+ clk_disable_unprepare(nfc->clk.pad_clk);
+
+ return 0;
+}
+
+static int mtk_nfc_resume(struct device *dev)
+{
+ struct mtk_nfc *nfc = dev_get_drvdata(dev);
+ struct mtk_nfc_nand_chip *chip;
+ struct nand_chip *nand;
+ int ret;
+ u32 i;
+
+ udelay(200);
+
+ ret = clk_prepare_enable(nfc->clk.nfi_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable nfi clk\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(nfc->clk.pad_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable pad clk\n");
+ clk_disable_unprepare(nfc->clk.nfi_clk);
+ return ret;
+ }
+
+ /* reset NAND chip if VCC was powered off */
+ list_for_each_entry(chip, &nfc->chips, node) {
+ nand = &chip->nand;
+ for (i = 0; i < chip->nsels; i++)
+ nand_reset(nand, i);
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mtk_nfc_pm_ops, mtk_nfc_suspend, mtk_nfc_resume);
+#endif
+
+static struct platform_driver mtk_nfc_driver = {
+ .probe = mtk_nfc_probe,
+ .remove_new = mtk_nfc_remove,
+ .driver = {
+ .name = MTK_NAME,
+ .of_match_table = mtk_nfc_id_table,
+#ifdef CONFIG_PM_SLEEP
+ .pm = &mtk_nfc_pm_ops,
+#endif
+ },
+};
+
+module_platform_driver(mtk_nfc_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
+MODULE_DESCRIPTION("MTK Nand Flash Controller Driver");
diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c
new file mode 100644
index 0000000000..003008355b
--- /dev/null
+++ b/drivers/mtd/nand/raw/mxc_nand.c
@@ -0,0 +1,1845 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Sascha Hauer, kernel@pengutronix.de
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/completion.h>
+#include <linux/of.h>
+
+#define DRIVER_NAME "mxc_nand"
+
+/* Addresses for NFC registers */
+#define NFC_V1_V2_BUF_SIZE (host->regs + 0x00)
+#define NFC_V1_V2_BUF_ADDR (host->regs + 0x04)
+#define NFC_V1_V2_FLASH_ADDR (host->regs + 0x06)
+#define NFC_V1_V2_FLASH_CMD (host->regs + 0x08)
+#define NFC_V1_V2_CONFIG (host->regs + 0x0a)
+#define NFC_V1_V2_ECC_STATUS_RESULT (host->regs + 0x0c)
+#define NFC_V1_V2_RSLTMAIN_AREA (host->regs + 0x0e)
+#define NFC_V21_RSLTSPARE_AREA (host->regs + 0x10)
+#define NFC_V1_V2_WRPROT (host->regs + 0x12)
+#define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14)
+#define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16)
+#define NFC_V21_UNLOCKSTART_BLKADDR0 (host->regs + 0x20)
+#define NFC_V21_UNLOCKSTART_BLKADDR1 (host->regs + 0x24)
+#define NFC_V21_UNLOCKSTART_BLKADDR2 (host->regs + 0x28)
+#define NFC_V21_UNLOCKSTART_BLKADDR3 (host->regs + 0x2c)
+#define NFC_V21_UNLOCKEND_BLKADDR0 (host->regs + 0x22)
+#define NFC_V21_UNLOCKEND_BLKADDR1 (host->regs + 0x26)
+#define NFC_V21_UNLOCKEND_BLKADDR2 (host->regs + 0x2a)
+#define NFC_V21_UNLOCKEND_BLKADDR3 (host->regs + 0x2e)
+#define NFC_V1_V2_NF_WRPRST (host->regs + 0x18)
+#define NFC_V1_V2_CONFIG1 (host->regs + 0x1a)
+#define NFC_V1_V2_CONFIG2 (host->regs + 0x1c)
+
+#define NFC_V2_CONFIG1_ECC_MODE_4 (1 << 0)
+#define NFC_V1_V2_CONFIG1_SP_EN (1 << 2)
+#define NFC_V1_V2_CONFIG1_ECC_EN (1 << 3)
+#define NFC_V1_V2_CONFIG1_INT_MSK (1 << 4)
+#define NFC_V1_V2_CONFIG1_BIG (1 << 5)
+#define NFC_V1_V2_CONFIG1_RST (1 << 6)
+#define NFC_V1_V2_CONFIG1_CE (1 << 7)
+#define NFC_V2_CONFIG1_ONE_CYCLE (1 << 8)
+#define NFC_V2_CONFIG1_PPB(x) (((x) & 0x3) << 9)
+#define NFC_V2_CONFIG1_FP_INT (1 << 11)
+
+#define NFC_V1_V2_CONFIG2_INT (1 << 15)
+
+/*
+ * Operation modes for the NFC. Valid for v1, v2 and v3
+ * type controllers.
+ */
+#define NFC_CMD (1 << 0)
+#define NFC_ADDR (1 << 1)
+#define NFC_INPUT (1 << 2)
+#define NFC_OUTPUT (1 << 3)
+#define NFC_ID (1 << 4)
+#define NFC_STATUS (1 << 5)
+
+#define NFC_V3_FLASH_CMD (host->regs_axi + 0x00)
+#define NFC_V3_FLASH_ADDR0 (host->regs_axi + 0x04)
+
+#define NFC_V3_CONFIG1 (host->regs_axi + 0x34)
+#define NFC_V3_CONFIG1_SP_EN (1 << 0)
+#define NFC_V3_CONFIG1_RBA(x) (((x) & 0x7 ) << 4)
+
+#define NFC_V3_ECC_STATUS_RESULT (host->regs_axi + 0x38)
+
+#define NFC_V3_LAUNCH (host->regs_axi + 0x40)
+
+#define NFC_V3_WRPROT (host->regs_ip + 0x0)
+#define NFC_V3_WRPROT_LOCK_TIGHT (1 << 0)
+#define NFC_V3_WRPROT_LOCK (1 << 1)
+#define NFC_V3_WRPROT_UNLOCK (1 << 2)
+#define NFC_V3_WRPROT_BLS_UNLOCK (2 << 6)
+
+#define NFC_V3_WRPROT_UNLOCK_BLK_ADD0 (host->regs_ip + 0x04)
+
+#define NFC_V3_CONFIG2 (host->regs_ip + 0x24)
+#define NFC_V3_CONFIG2_PS_512 (0 << 0)
+#define NFC_V3_CONFIG2_PS_2048 (1 << 0)
+#define NFC_V3_CONFIG2_PS_4096 (2 << 0)
+#define NFC_V3_CONFIG2_ONE_CYCLE (1 << 2)
+#define NFC_V3_CONFIG2_ECC_EN (1 << 3)
+#define NFC_V3_CONFIG2_2CMD_PHASES (1 << 4)
+#define NFC_V3_CONFIG2_NUM_ADDR_PHASE0 (1 << 5)
+#define NFC_V3_CONFIG2_ECC_MODE_8 (1 << 6)
+#define NFC_V3_CONFIG2_PPB(x, shift) (((x) & 0x3) << shift)
+#define NFC_V3_CONFIG2_NUM_ADDR_PHASE1(x) (((x) & 0x3) << 12)
+#define NFC_V3_CONFIG2_INT_MSK (1 << 15)
+#define NFC_V3_CONFIG2_ST_CMD(x) (((x) & 0xff) << 24)
+#define NFC_V3_CONFIG2_SPAS(x) (((x) & 0xff) << 16)
+
+#define NFC_V3_CONFIG3 (host->regs_ip + 0x28)
+#define NFC_V3_CONFIG3_ADD_OP(x) (((x) & 0x3) << 0)
+#define NFC_V3_CONFIG3_FW8 (1 << 3)
+#define NFC_V3_CONFIG3_SBB(x) (((x) & 0x7) << 8)
+#define NFC_V3_CONFIG3_NUM_OF_DEVICES(x) (((x) & 0x7) << 12)
+#define NFC_V3_CONFIG3_RBB_MODE (1 << 15)
+#define NFC_V3_CONFIG3_NO_SDMA (1 << 20)
+
+#define NFC_V3_IPC (host->regs_ip + 0x2C)
+#define NFC_V3_IPC_CREQ (1 << 0)
+#define NFC_V3_IPC_INT (1 << 31)
+
+#define NFC_V3_DELAY_LINE (host->regs_ip + 0x34)
+
+struct mxc_nand_host;
+
+struct mxc_nand_devtype_data {
+ void (*preset)(struct mtd_info *);
+ int (*read_page)(struct nand_chip *chip, void *buf, void *oob, bool ecc,
+ int page);
+ void (*send_cmd)(struct mxc_nand_host *, uint16_t, int);
+ void (*send_addr)(struct mxc_nand_host *, uint16_t, int);
+ void (*send_page)(struct mtd_info *, unsigned int);
+ void (*send_read_id)(struct mxc_nand_host *);
+ uint16_t (*get_dev_status)(struct mxc_nand_host *);
+ int (*check_int)(struct mxc_nand_host *);
+ void (*irq_control)(struct mxc_nand_host *, int);
+ u32 (*get_ecc_status)(struct mxc_nand_host *);
+ const struct mtd_ooblayout_ops *ooblayout;
+ void (*select_chip)(struct nand_chip *chip, int cs);
+ int (*setup_interface)(struct nand_chip *chip, int csline,
+ const struct nand_interface_config *conf);
+ void (*enable_hwecc)(struct nand_chip *chip, bool enable);
+
+ /*
+ * On i.MX21 the CONFIG2:INT bit cannot be read if interrupts are masked
+ * (CONFIG1:INT_MSK is set). To handle this the driver uses
+ * enable_irq/disable_irq_nosync instead of CONFIG1:INT_MSK
+ */
+ int irqpending_quirk;
+ int needs_ip;
+
+ size_t regs_offset;
+ size_t spare0_offset;
+ size_t axi_offset;
+
+ int spare_len;
+ int eccbytes;
+ int eccsize;
+ int ppb_shift;
+};
+
+struct mxc_nand_host {
+ struct nand_chip nand;
+ struct device *dev;
+
+ void __iomem *spare0;
+ void __iomem *main_area0;
+
+ void __iomem *base;
+ void __iomem *regs;
+ void __iomem *regs_axi;
+ void __iomem *regs_ip;
+ int status_request;
+ struct clk *clk;
+ int clk_act;
+ int irq;
+ int eccsize;
+ int used_oobsize;
+ int active_cs;
+
+ struct completion op_completion;
+
+ uint8_t *data_buf;
+ unsigned int buf_start;
+
+ const struct mxc_nand_devtype_data *devtype_data;
+};
+
+static const char * const part_probes[] = {
+ "cmdlinepart", "RedBoot", "ofpart", NULL };
+
+static void memcpy32_fromio(void *trg, const void __iomem *src, size_t size)
+{
+ int i;
+ u32 *t = trg;
+ const __iomem u32 *s = src;
+
+ for (i = 0; i < (size >> 2); i++)
+ *t++ = __raw_readl(s++);
+}
+
+static void memcpy16_fromio(void *trg, const void __iomem *src, size_t size)
+{
+ int i;
+ u16 *t = trg;
+ const __iomem u16 *s = src;
+
+ /* We assume that src (IO) is always 32bit aligned */
+ if (PTR_ALIGN(trg, 4) == trg && IS_ALIGNED(size, 4)) {
+ memcpy32_fromio(trg, src, size);
+ return;
+ }
+
+ for (i = 0; i < (size >> 1); i++)
+ *t++ = __raw_readw(s++);
+}
+
+static inline void memcpy32_toio(void __iomem *trg, const void *src, int size)
+{
+ /* __iowrite32_copy use 32bit size values so divide by 4 */
+ __iowrite32_copy(trg, src, size / 4);
+}
+
+static void memcpy16_toio(void __iomem *trg, const void *src, int size)
+{
+ int i;
+ __iomem u16 *t = trg;
+ const u16 *s = src;
+
+ /* We assume that trg (IO) is always 32bit aligned */
+ if (PTR_ALIGN(src, 4) == src && IS_ALIGNED(size, 4)) {
+ memcpy32_toio(trg, src, size);
+ return;
+ }
+
+ for (i = 0; i < (size >> 1); i++)
+ __raw_writew(*s++, t++);
+}
+
+/*
+ * The controller splits a page into data chunks of 512 bytes + partial oob.
+ * There are writesize / 512 such chunks, the size of the partial oob parts is
+ * oobsize / #chunks rounded down to a multiple of 2. The last oob chunk then
+ * contains additionally the byte lost by rounding (if any).
+ * This function handles the needed shuffling between host->data_buf (which
+ * holds a page in natural order, i.e. writesize bytes data + oobsize bytes
+ * spare) and the NFC buffer.
+ */
+static void copy_spare(struct mtd_info *mtd, bool bfrom, void *buf)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(this);
+ u16 i, oob_chunk_size;
+ u16 num_chunks = mtd->writesize / 512;
+
+ u8 *d = buf;
+ u8 __iomem *s = host->spare0;
+ u16 sparebuf_size = host->devtype_data->spare_len;
+
+ /* size of oob chunk for all but possibly the last one */
+ oob_chunk_size = (host->used_oobsize / num_chunks) & ~1;
+
+ if (bfrom) {
+ for (i = 0; i < num_chunks - 1; i++)
+ memcpy16_fromio(d + i * oob_chunk_size,
+ s + i * sparebuf_size,
+ oob_chunk_size);
+
+ /* the last chunk */
+ memcpy16_fromio(d + i * oob_chunk_size,
+ s + i * sparebuf_size,
+ host->used_oobsize - i * oob_chunk_size);
+ } else {
+ for (i = 0; i < num_chunks - 1; i++)
+ memcpy16_toio(&s[i * sparebuf_size],
+ &d[i * oob_chunk_size],
+ oob_chunk_size);
+
+ /* the last chunk */
+ memcpy16_toio(&s[i * sparebuf_size],
+ &d[i * oob_chunk_size],
+ host->used_oobsize - i * oob_chunk_size);
+ }
+}
+
+/*
+ * MXC NANDFC can only perform full page+spare or spare-only read/write. When
+ * the upper layers perform a read/write buf operation, the saved column address
+ * is used to index into the full page. So usually this function is called with
+ * column == 0 (unless no column cycle is needed indicated by column == -1)
+ */
+static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+
+ /* Write out column address, if necessary */
+ if (column != -1) {
+ host->devtype_data->send_addr(host, column & 0xff,
+ page_addr == -1);
+ if (mtd->writesize > 512)
+ /* another col addr cycle for 2k page */
+ host->devtype_data->send_addr(host,
+ (column >> 8) & 0xff,
+ false);
+ }
+
+ /* Write out page address, if necessary */
+ if (page_addr != -1) {
+ /* paddr_0 - p_addr_7 */
+ host->devtype_data->send_addr(host, (page_addr & 0xff), false);
+
+ if (mtd->writesize > 512) {
+ if (mtd->size >= 0x10000000) {
+ /* paddr_8 - paddr_15 */
+ host->devtype_data->send_addr(host,
+ (page_addr >> 8) & 0xff,
+ false);
+ host->devtype_data->send_addr(host,
+ (page_addr >> 16) & 0xff,
+ true);
+ } else
+ /* paddr_8 - paddr_15 */
+ host->devtype_data->send_addr(host,
+ (page_addr >> 8) & 0xff, true);
+ } else {
+ if (nand_chip->options & NAND_ROW_ADDR_3) {
+ /* paddr_8 - paddr_15 */
+ host->devtype_data->send_addr(host,
+ (page_addr >> 8) & 0xff,
+ false);
+ host->devtype_data->send_addr(host,
+ (page_addr >> 16) & 0xff,
+ true);
+ } else
+ /* paddr_8 - paddr_15 */
+ host->devtype_data->send_addr(host,
+ (page_addr >> 8) & 0xff, true);
+ }
+ }
+}
+
+static int check_int_v3(struct mxc_nand_host *host)
+{
+ uint32_t tmp;
+
+ tmp = readl(NFC_V3_IPC);
+ if (!(tmp & NFC_V3_IPC_INT))
+ return 0;
+
+ tmp &= ~NFC_V3_IPC_INT;
+ writel(tmp, NFC_V3_IPC);
+
+ return 1;
+}
+
+static int check_int_v1_v2(struct mxc_nand_host *host)
+{
+ uint32_t tmp;
+
+ tmp = readw(NFC_V1_V2_CONFIG2);
+ if (!(tmp & NFC_V1_V2_CONFIG2_INT))
+ return 0;
+
+ if (!host->devtype_data->irqpending_quirk)
+ writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2);
+
+ return 1;
+}
+
+static void irq_control_v1_v2(struct mxc_nand_host *host, int activate)
+{
+ uint16_t tmp;
+
+ tmp = readw(NFC_V1_V2_CONFIG1);
+
+ if (activate)
+ tmp &= ~NFC_V1_V2_CONFIG1_INT_MSK;
+ else
+ tmp |= NFC_V1_V2_CONFIG1_INT_MSK;
+
+ writew(tmp, NFC_V1_V2_CONFIG1);
+}
+
+static void irq_control_v3(struct mxc_nand_host *host, int activate)
+{
+ uint32_t tmp;
+
+ tmp = readl(NFC_V3_CONFIG2);
+
+ if (activate)
+ tmp &= ~NFC_V3_CONFIG2_INT_MSK;
+ else
+ tmp |= NFC_V3_CONFIG2_INT_MSK;
+
+ writel(tmp, NFC_V3_CONFIG2);
+}
+
+static void irq_control(struct mxc_nand_host *host, int activate)
+{
+ if (host->devtype_data->irqpending_quirk) {
+ if (activate)
+ enable_irq(host->irq);
+ else
+ disable_irq_nosync(host->irq);
+ } else {
+ host->devtype_data->irq_control(host, activate);
+ }
+}
+
+static u32 get_ecc_status_v1(struct mxc_nand_host *host)
+{
+ return readw(NFC_V1_V2_ECC_STATUS_RESULT);
+}
+
+static u32 get_ecc_status_v2(struct mxc_nand_host *host)
+{
+ return readl(NFC_V1_V2_ECC_STATUS_RESULT);
+}
+
+static u32 get_ecc_status_v3(struct mxc_nand_host *host)
+{
+ return readl(NFC_V3_ECC_STATUS_RESULT);
+}
+
+static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
+{
+ struct mxc_nand_host *host = dev_id;
+
+ if (!host->devtype_data->check_int(host))
+ return IRQ_NONE;
+
+ irq_control(host, 0);
+
+ complete(&host->op_completion);
+
+ return IRQ_HANDLED;
+}
+
+/* This function polls the NANDFC to wait for the basic operation to
+ * complete by checking the INT bit of config2 register.
+ */
+static int wait_op_done(struct mxc_nand_host *host, int useirq)
+{
+ int ret = 0;
+
+ /*
+ * If operation is already complete, don't bother to setup an irq or a
+ * loop.
+ */
+ if (host->devtype_data->check_int(host))
+ return 0;
+
+ if (useirq) {
+ unsigned long timeout;
+
+ reinit_completion(&host->op_completion);
+
+ irq_control(host, 1);
+
+ timeout = wait_for_completion_timeout(&host->op_completion, HZ);
+ if (!timeout && !host->devtype_data->check_int(host)) {
+ dev_dbg(host->dev, "timeout waiting for irq\n");
+ ret = -ETIMEDOUT;
+ }
+ } else {
+ int max_retries = 8000;
+ int done;
+
+ do {
+ udelay(1);
+
+ done = host->devtype_data->check_int(host);
+ if (done)
+ break;
+
+ } while (--max_retries);
+
+ if (!done) {
+ dev_dbg(host->dev, "timeout polling for completion\n");
+ ret = -ETIMEDOUT;
+ }
+ }
+
+ WARN_ONCE(ret < 0, "timeout! useirq=%d\n", useirq);
+
+ return ret;
+}
+
+static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq)
+{
+ /* fill command */
+ writel(cmd, NFC_V3_FLASH_CMD);
+
+ /* send out command */
+ writel(NFC_CMD, NFC_V3_LAUNCH);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, useirq);
+}
+
+/* This function issues the specified command to the NAND device and
+ * waits for completion. */
+static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
+{
+ dev_dbg(host->dev, "send_cmd(host, 0x%x, %d)\n", cmd, useirq);
+
+ writew(cmd, NFC_V1_V2_FLASH_CMD);
+ writew(NFC_CMD, NFC_V1_V2_CONFIG2);
+
+ if (host->devtype_data->irqpending_quirk && (cmd == NAND_CMD_RESET)) {
+ int max_retries = 100;
+ /* Reset completion is indicated by NFC_CONFIG2 */
+ /* being set to 0 */
+ while (max_retries-- > 0) {
+ if (readw(NFC_V1_V2_CONFIG2) == 0) {
+ break;
+ }
+ udelay(1);
+ }
+ if (max_retries < 0)
+ dev_dbg(host->dev, "%s: RESET failed\n", __func__);
+ } else {
+ /* Wait for operation to complete */
+ wait_op_done(host, useirq);
+ }
+}
+
+static void send_addr_v3(struct mxc_nand_host *host, uint16_t addr, int islast)
+{
+ /* fill address */
+ writel(addr, NFC_V3_FLASH_ADDR0);
+
+ /* send out address */
+ writel(NFC_ADDR, NFC_V3_LAUNCH);
+
+ wait_op_done(host, 0);
+}
+
+/* This function sends an address (or partial address) to the
+ * NAND device. The address is used to select the source/destination for
+ * a NAND command. */
+static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast)
+{
+ dev_dbg(host->dev, "send_addr(host, 0x%x %d)\n", addr, islast);
+
+ writew(addr, NFC_V1_V2_FLASH_ADDR);
+ writew(NFC_ADDR, NFC_V1_V2_CONFIG2);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, islast);
+}
+
+static void send_page_v3(struct mtd_info *mtd, unsigned int ops)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+ uint32_t tmp;
+
+ tmp = readl(NFC_V3_CONFIG1);
+ tmp &= ~(7 << 4);
+ writel(tmp, NFC_V3_CONFIG1);
+
+ /* transfer data from NFC ram to nand */
+ writel(ops, NFC_V3_LAUNCH);
+
+ wait_op_done(host, false);
+}
+
+static void send_page_v2(struct mtd_info *mtd, unsigned int ops)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+
+ /* NANDFC buffer 0 is used for page read/write */
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+
+ writew(ops, NFC_V1_V2_CONFIG2);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, true);
+}
+
+static void send_page_v1(struct mtd_info *mtd, unsigned int ops)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+ int bufs, i;
+
+ if (mtd->writesize > 512)
+ bufs = 4;
+ else
+ bufs = 1;
+
+ for (i = 0; i < bufs; i++) {
+
+ /* NANDFC buffer 0 is used for page read/write */
+ writew((host->active_cs << 4) | i, NFC_V1_V2_BUF_ADDR);
+
+ writew(ops, NFC_V1_V2_CONFIG2);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, true);
+ }
+}
+
+static void send_read_id_v3(struct mxc_nand_host *host)
+{
+ /* Read ID into main buffer */
+ writel(NFC_ID, NFC_V3_LAUNCH);
+
+ wait_op_done(host, true);
+
+ memcpy32_fromio(host->data_buf, host->main_area0, 16);
+}
+
+/* Request the NANDFC to perform a read of the NAND device ID. */
+static void send_read_id_v1_v2(struct mxc_nand_host *host)
+{
+ /* NANDFC buffer 0 is used for device ID output */
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+
+ writew(NFC_ID, NFC_V1_V2_CONFIG2);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, true);
+
+ memcpy32_fromio(host->data_buf, host->main_area0, 16);
+}
+
+static uint16_t get_dev_status_v3(struct mxc_nand_host *host)
+{
+ writew(NFC_STATUS, NFC_V3_LAUNCH);
+ wait_op_done(host, true);
+
+ return readl(NFC_V3_CONFIG1) >> 16;
+}
+
+/* This function requests the NANDFC to perform a read of the
+ * NAND device status and returns the current status. */
+static uint16_t get_dev_status_v1_v2(struct mxc_nand_host *host)
+{
+ void __iomem *main_buf = host->main_area0;
+ uint32_t store;
+ uint16_t ret;
+
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+
+ /*
+ * The device status is stored in main_area0. To
+ * prevent corruption of the buffer save the value
+ * and restore it afterwards.
+ */
+ store = readl(main_buf);
+
+ writew(NFC_STATUS, NFC_V1_V2_CONFIG2);
+ wait_op_done(host, true);
+
+ ret = readw(main_buf);
+
+ writel(store, main_buf);
+
+ return ret;
+}
+
+static void mxc_nand_enable_hwecc_v1_v2(struct nand_chip *chip, bool enable)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ uint16_t config1;
+
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return;
+
+ config1 = readw(NFC_V1_V2_CONFIG1);
+
+ if (enable)
+ config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
+ else
+ config1 &= ~NFC_V1_V2_CONFIG1_ECC_EN;
+
+ writew(config1, NFC_V1_V2_CONFIG1);
+}
+
+static void mxc_nand_enable_hwecc_v3(struct nand_chip *chip, bool enable)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ uint32_t config2;
+
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return;
+
+ config2 = readl(NFC_V3_CONFIG2);
+
+ if (enable)
+ config2 |= NFC_V3_CONFIG2_ECC_EN;
+ else
+ config2 &= ~NFC_V3_CONFIG2_ECC_EN;
+
+ writel(config2, NFC_V3_CONFIG2);
+}
+
+/* This functions is used by upper layer to checks if device is ready */
+static int mxc_nand_dev_ready(struct nand_chip *chip)
+{
+ /*
+ * NFC handles R/B internally. Therefore, this function
+ * always returns status as ready.
+ */
+ return 1;
+}
+
+static int mxc_nand_read_page_v1(struct nand_chip *chip, void *buf, void *oob,
+ bool ecc, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ unsigned int bitflips_corrected = 0;
+ int no_subpages;
+ int i;
+
+ host->devtype_data->enable_hwecc(chip, ecc);
+
+ host->devtype_data->send_cmd(host, NAND_CMD_READ0, false);
+ mxc_do_addr_cycle(mtd, 0, page);
+
+ if (mtd->writesize > 512)
+ host->devtype_data->send_cmd(host, NAND_CMD_READSTART, true);
+
+ no_subpages = mtd->writesize >> 9;
+
+ for (i = 0; i < no_subpages; i++) {
+ uint16_t ecc_stats;
+
+ /* NANDFC buffer 0 is used for page read/write */
+ writew((host->active_cs << 4) | i, NFC_V1_V2_BUF_ADDR);
+
+ writew(NFC_OUTPUT, NFC_V1_V2_CONFIG2);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, true);
+
+ ecc_stats = get_ecc_status_v1(host);
+
+ ecc_stats >>= 2;
+
+ if (buf && ecc) {
+ switch (ecc_stats & 0x3) {
+ case 0:
+ default:
+ break;
+ case 1:
+ mtd->ecc_stats.corrected++;
+ bitflips_corrected = 1;
+ break;
+ case 2:
+ mtd->ecc_stats.failed++;
+ break;
+ }
+ }
+ }
+
+ if (buf)
+ memcpy32_fromio(buf, host->main_area0, mtd->writesize);
+ if (oob)
+ copy_spare(mtd, true, oob);
+
+ return bitflips_corrected;
+}
+
+static int mxc_nand_read_page_v2_v3(struct nand_chip *chip, void *buf,
+ void *oob, bool ecc, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ unsigned int max_bitflips = 0;
+ u32 ecc_stat, err;
+ int no_subpages;
+ u8 ecc_bit_mask, err_limit;
+
+ host->devtype_data->enable_hwecc(chip, ecc);
+
+ host->devtype_data->send_cmd(host, NAND_CMD_READ0, false);
+ mxc_do_addr_cycle(mtd, 0, page);
+
+ if (mtd->writesize > 512)
+ host->devtype_data->send_cmd(host,
+ NAND_CMD_READSTART, true);
+
+ host->devtype_data->send_page(mtd, NFC_OUTPUT);
+
+ if (buf)
+ memcpy32_fromio(buf, host->main_area0, mtd->writesize);
+ if (oob)
+ copy_spare(mtd, true, oob);
+
+ ecc_bit_mask = (host->eccsize == 4) ? 0x7 : 0xf;
+ err_limit = (host->eccsize == 4) ? 0x4 : 0x8;
+
+ no_subpages = mtd->writesize >> 9;
+
+ ecc_stat = host->devtype_data->get_ecc_status(host);
+
+ do {
+ err = ecc_stat & ecc_bit_mask;
+ if (err > err_limit) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += err;
+ max_bitflips = max_t(unsigned int, max_bitflips, err);
+ }
+
+ ecc_stat >>= 4;
+ } while (--no_subpages);
+
+ return max_bitflips;
+}
+
+static int mxc_nand_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ void *oob_buf;
+
+ if (oob_required)
+ oob_buf = chip->oob_poi;
+ else
+ oob_buf = NULL;
+
+ return host->devtype_data->read_page(chip, buf, oob_buf, 1, page);
+}
+
+static int mxc_nand_read_page_raw(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ void *oob_buf;
+
+ if (oob_required)
+ oob_buf = chip->oob_poi;
+ else
+ oob_buf = NULL;
+
+ return host->devtype_data->read_page(chip, buf, oob_buf, 0, page);
+}
+
+static int mxc_nand_read_oob(struct nand_chip *chip, int page)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+
+ return host->devtype_data->read_page(chip, NULL, chip->oob_poi, 0,
+ page);
+}
+
+static int mxc_nand_write_page(struct nand_chip *chip, const uint8_t *buf,
+ bool ecc, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+
+ host->devtype_data->enable_hwecc(chip, ecc);
+
+ host->devtype_data->send_cmd(host, NAND_CMD_SEQIN, false);
+ mxc_do_addr_cycle(mtd, 0, page);
+
+ memcpy32_toio(host->main_area0, buf, mtd->writesize);
+ copy_spare(mtd, false, chip->oob_poi);
+
+ host->devtype_data->send_page(mtd, NFC_INPUT);
+ host->devtype_data->send_cmd(host, NAND_CMD_PAGEPROG, true);
+ mxc_do_addr_cycle(mtd, 0, page);
+
+ return 0;
+}
+
+static int mxc_nand_write_page_ecc(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ return mxc_nand_write_page(chip, buf, true, page);
+}
+
+static int mxc_nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ return mxc_nand_write_page(chip, buf, false, page);
+}
+
+static int mxc_nand_write_oob(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+
+ memset(host->data_buf, 0xff, mtd->writesize);
+
+ return mxc_nand_write_page(chip, host->data_buf, false, page);
+}
+
+static u_char mxc_nand_read_byte(struct nand_chip *nand_chip)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+ uint8_t ret;
+
+ /* Check for status request */
+ if (host->status_request)
+ return host->devtype_data->get_dev_status(host) & 0xFF;
+
+ if (nand_chip->options & NAND_BUSWIDTH_16) {
+ /* only take the lower byte of each word */
+ ret = *(uint16_t *)(host->data_buf + host->buf_start);
+
+ host->buf_start += 2;
+ } else {
+ ret = *(uint8_t *)(host->data_buf + host->buf_start);
+ host->buf_start++;
+ }
+
+ dev_dbg(host->dev, "%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start);
+ return ret;
+}
+
+/* Write data of length len to buffer buf. The data to be
+ * written on NAND Flash is first copied to RAMbuffer. After the Data Input
+ * Operation by the NFC, the data is written to NAND Flash */
+static void mxc_nand_write_buf(struct nand_chip *nand_chip, const u_char *buf,
+ int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand_chip);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+ u16 col = host->buf_start;
+ int n = mtd->oobsize + mtd->writesize - col;
+
+ n = min(n, len);
+
+ memcpy(host->data_buf + col, buf, n);
+
+ host->buf_start += n;
+}
+
+/* Read the data buffer from the NAND Flash. To read the data from NAND
+ * Flash first the data output cycle is initiated by the NFC, which copies
+ * the data to RAMbuffer. This data of length len is then copied to buffer buf.
+ */
+static void mxc_nand_read_buf(struct nand_chip *nand_chip, u_char *buf,
+ int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand_chip);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+ u16 col = host->buf_start;
+ int n = mtd->oobsize + mtd->writesize - col;
+
+ n = min(n, len);
+
+ memcpy(buf, host->data_buf + col, n);
+
+ host->buf_start += n;
+}
+
+/* This function is used by upper layer for select and
+ * deselect of the NAND chip */
+static void mxc_nand_select_chip_v1_v3(struct nand_chip *nand_chip, int chip)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+
+ if (chip == -1) {
+ /* Disable the NFC clock */
+ if (host->clk_act) {
+ clk_disable_unprepare(host->clk);
+ host->clk_act = 0;
+ }
+ return;
+ }
+
+ if (!host->clk_act) {
+ /* Enable the NFC clock */
+ clk_prepare_enable(host->clk);
+ host->clk_act = 1;
+ }
+}
+
+static void mxc_nand_select_chip_v2(struct nand_chip *nand_chip, int chip)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+
+ if (chip == -1) {
+ /* Disable the NFC clock */
+ if (host->clk_act) {
+ clk_disable_unprepare(host->clk);
+ host->clk_act = 0;
+ }
+ return;
+ }
+
+ if (!host->clk_act) {
+ /* Enable the NFC clock */
+ clk_prepare_enable(host->clk);
+ host->clk_act = 1;
+ }
+
+ host->active_cs = chip;
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+}
+
+#define MXC_V1_ECCBYTES 5
+
+static int mxc_v1_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+
+ if (section >= nand_chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 6;
+ oobregion->length = MXC_V1_ECCBYTES;
+
+ return 0;
+}
+
+static int mxc_v1_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+
+ if (section > nand_chip->ecc.steps)
+ return -ERANGE;
+
+ if (!section) {
+ if (mtd->writesize <= 512) {
+ oobregion->offset = 0;
+ oobregion->length = 5;
+ } else {
+ oobregion->offset = 2;
+ oobregion->length = 4;
+ }
+ } else {
+ oobregion->offset = ((section - 1) * 16) + MXC_V1_ECCBYTES + 6;
+ if (section < nand_chip->ecc.steps)
+ oobregion->length = (section * 16) + 6 -
+ oobregion->offset;
+ else
+ oobregion->length = mtd->oobsize - oobregion->offset;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops mxc_v1_ooblayout_ops = {
+ .ecc = mxc_v1_ooblayout_ecc,
+ .free = mxc_v1_ooblayout_free,
+};
+
+static int mxc_v2_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26;
+
+ if (section >= nand_chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * stepsize) + 7;
+ oobregion->length = nand_chip->ecc.bytes;
+
+ return 0;
+}
+
+static int mxc_v2_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26;
+
+ if (section >= nand_chip->ecc.steps)
+ return -ERANGE;
+
+ if (!section) {
+ if (mtd->writesize <= 512) {
+ oobregion->offset = 0;
+ oobregion->length = 5;
+ } else {
+ oobregion->offset = 2;
+ oobregion->length = 4;
+ }
+ } else {
+ oobregion->offset = section * stepsize;
+ oobregion->length = 7;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops mxc_v2_ooblayout_ops = {
+ .ecc = mxc_v2_ooblayout_ecc,
+ .free = mxc_v2_ooblayout_free,
+};
+
+/*
+ * v2 and v3 type controllers can do 4bit or 8bit ecc depending
+ * on how much oob the nand chip has. For 8bit ecc we need at least
+ * 26 bytes of oob data per 512 byte block.
+ */
+static int get_eccsize(struct mtd_info *mtd)
+{
+ int oobbytes_per_512 = 0;
+
+ oobbytes_per_512 = mtd->oobsize * 512 / mtd->writesize;
+
+ if (oobbytes_per_512 < 26)
+ return 4;
+ else
+ return 8;
+}
+
+static void preset_v1(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+ uint16_t config1 = 0;
+
+ if (nand_chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST &&
+ mtd->writesize)
+ config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
+
+ if (!host->devtype_data->irqpending_quirk)
+ config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
+
+ host->eccsize = 1;
+
+ writew(config1, NFC_V1_V2_CONFIG1);
+ /* preset operation */
+
+ /* Unlock the internal RAM Buffer */
+ writew(0x2, NFC_V1_V2_CONFIG);
+
+ /* Blocks to be unlocked */
+ writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
+ writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR);
+
+ /* Unlock Block Command for given address range */
+ writew(0x4, NFC_V1_V2_WRPROT);
+}
+
+static int mxc_nand_v2_setup_interface(struct nand_chip *chip, int csline,
+ const struct nand_interface_config *conf)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ int tRC_min_ns, tRC_ps, ret;
+ unsigned long rate, rate_round;
+ const struct nand_sdr_timings *timings;
+ u16 config1;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return -ENOTSUPP;
+
+ config1 = readw(NFC_V1_V2_CONFIG1);
+
+ tRC_min_ns = timings->tRC_min / 1000;
+ rate = 1000000000 / tRC_min_ns;
+
+ /*
+ * For tRC < 30ns we have to use EDO mode. In this case the controller
+ * does one access per clock cycle. Otherwise the controller does one
+ * access in two clock cycles, thus we have to double the rate to the
+ * controller.
+ */
+ if (tRC_min_ns < 30) {
+ rate_round = clk_round_rate(host->clk, rate);
+ config1 |= NFC_V2_CONFIG1_ONE_CYCLE;
+ tRC_ps = 1000000000 / (rate_round / 1000);
+ } else {
+ rate *= 2;
+ rate_round = clk_round_rate(host->clk, rate);
+ config1 &= ~NFC_V2_CONFIG1_ONE_CYCLE;
+ tRC_ps = 1000000000 / (rate_round / 1000 / 2);
+ }
+
+ /*
+ * The timing values compared against are from the i.MX25 Automotive
+ * datasheet, Table 50. NFC Timing Parameters
+ */
+ if (timings->tCLS_min > tRC_ps - 1000 ||
+ timings->tCLH_min > tRC_ps - 2000 ||
+ timings->tCS_min > tRC_ps - 1000 ||
+ timings->tCH_min > tRC_ps - 2000 ||
+ timings->tWP_min > tRC_ps - 1500 ||
+ timings->tALS_min > tRC_ps ||
+ timings->tALH_min > tRC_ps - 3000 ||
+ timings->tDS_min > tRC_ps ||
+ timings->tDH_min > tRC_ps - 5000 ||
+ timings->tWC_min > 2 * tRC_ps ||
+ timings->tWH_min > tRC_ps - 2500 ||
+ timings->tRR_min > 6 * tRC_ps ||
+ timings->tRP_min > 3 * tRC_ps / 2 ||
+ timings->tRC_min > 2 * tRC_ps ||
+ timings->tREH_min > (tRC_ps / 2) - 2500) {
+ dev_dbg(host->dev, "Timing out of bounds\n");
+ return -EINVAL;
+ }
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ ret = clk_set_rate(host->clk, rate);
+ if (ret)
+ return ret;
+
+ writew(config1, NFC_V1_V2_CONFIG1);
+
+ dev_dbg(host->dev, "Setting rate to %ldHz, %s mode\n", rate_round,
+ config1 & NFC_V2_CONFIG1_ONE_CYCLE ? "One cycle (EDO)" :
+ "normal");
+
+ return 0;
+}
+
+static void preset_v2(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+ uint16_t config1 = 0;
+
+ config1 |= NFC_V2_CONFIG1_FP_INT;
+
+ if (!host->devtype_data->irqpending_quirk)
+ config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
+
+ if (mtd->writesize) {
+ uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
+
+ if (nand_chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
+ config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
+
+ host->eccsize = get_eccsize(mtd);
+ if (host->eccsize == 4)
+ config1 |= NFC_V2_CONFIG1_ECC_MODE_4;
+
+ config1 |= NFC_V2_CONFIG1_PPB(ffs(pages_per_block) - 6);
+ } else {
+ host->eccsize = 1;
+ }
+
+ writew(config1, NFC_V1_V2_CONFIG1);
+ /* preset operation */
+
+ /* spare area size in 16-bit half-words */
+ writew(mtd->oobsize / 2, NFC_V21_RSLTSPARE_AREA);
+
+ /* Unlock the internal RAM Buffer */
+ writew(0x2, NFC_V1_V2_CONFIG);
+
+ /* Blocks to be unlocked */
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
+
+ /* Unlock Block Command for given address range */
+ writew(0x4, NFC_V1_V2_WRPROT);
+}
+
+static void preset_v3(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ uint32_t config2, config3;
+ int i, addr_phases;
+
+ writel(NFC_V3_CONFIG1_RBA(0), NFC_V3_CONFIG1);
+ writel(NFC_V3_IPC_CREQ, NFC_V3_IPC);
+
+ /* Unlock the internal RAM Buffer */
+ writel(NFC_V3_WRPROT_BLS_UNLOCK | NFC_V3_WRPROT_UNLOCK,
+ NFC_V3_WRPROT);
+
+ /* Blocks to be unlocked */
+ for (i = 0; i < NAND_MAX_CHIPS; i++)
+ writel(0xffff << 16, NFC_V3_WRPROT_UNLOCK_BLK_ADD0 + (i << 2));
+
+ writel(0, NFC_V3_IPC);
+
+ config2 = NFC_V3_CONFIG2_ONE_CYCLE |
+ NFC_V3_CONFIG2_2CMD_PHASES |
+ NFC_V3_CONFIG2_SPAS(mtd->oobsize >> 1) |
+ NFC_V3_CONFIG2_ST_CMD(0x70) |
+ NFC_V3_CONFIG2_INT_MSK |
+ NFC_V3_CONFIG2_NUM_ADDR_PHASE0;
+
+ addr_phases = fls(chip->pagemask) >> 3;
+
+ if (mtd->writesize == 2048) {
+ config2 |= NFC_V3_CONFIG2_PS_2048;
+ config2 |= NFC_V3_CONFIG2_NUM_ADDR_PHASE1(addr_phases);
+ } else if (mtd->writesize == 4096) {
+ config2 |= NFC_V3_CONFIG2_PS_4096;
+ config2 |= NFC_V3_CONFIG2_NUM_ADDR_PHASE1(addr_phases);
+ } else {
+ config2 |= NFC_V3_CONFIG2_PS_512;
+ config2 |= NFC_V3_CONFIG2_NUM_ADDR_PHASE1(addr_phases - 1);
+ }
+
+ if (mtd->writesize) {
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
+ config2 |= NFC_V3_CONFIG2_ECC_EN;
+
+ config2 |= NFC_V3_CONFIG2_PPB(
+ ffs(mtd->erasesize / mtd->writesize) - 6,
+ host->devtype_data->ppb_shift);
+ host->eccsize = get_eccsize(mtd);
+ if (host->eccsize == 8)
+ config2 |= NFC_V3_CONFIG2_ECC_MODE_8;
+ }
+
+ writel(config2, NFC_V3_CONFIG2);
+
+ config3 = NFC_V3_CONFIG3_NUM_OF_DEVICES(0) |
+ NFC_V3_CONFIG3_NO_SDMA |
+ NFC_V3_CONFIG3_RBB_MODE |
+ NFC_V3_CONFIG3_SBB(6) | /* Reset default */
+ NFC_V3_CONFIG3_ADD_OP(0);
+
+ if (!(chip->options & NAND_BUSWIDTH_16))
+ config3 |= NFC_V3_CONFIG3_FW8;
+
+ writel(config3, NFC_V3_CONFIG3);
+
+ writel(0, NFC_V3_DELAY_LINE);
+}
+
+/* Used by the upper layer to write command to NAND Flash for
+ * different operations to be carried out on NAND Flash */
+static void mxc_nand_command(struct nand_chip *nand_chip, unsigned command,
+ int column, int page_addr)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand_chip);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+
+ dev_dbg(host->dev, "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
+ command, column, page_addr);
+
+ /* Reset command state information */
+ host->status_request = false;
+
+ /* Command pre-processing step */
+ switch (command) {
+ case NAND_CMD_RESET:
+ host->devtype_data->preset(mtd);
+ host->devtype_data->send_cmd(host, command, false);
+ break;
+
+ case NAND_CMD_STATUS:
+ host->buf_start = 0;
+ host->status_request = true;
+
+ host->devtype_data->send_cmd(host, command, true);
+ WARN_ONCE(column != -1 || page_addr != -1,
+ "Unexpected column/row value (cmd=%u, col=%d, row=%d)\n",
+ command, column, page_addr);
+ mxc_do_addr_cycle(mtd, column, page_addr);
+ break;
+
+ case NAND_CMD_READID:
+ host->devtype_data->send_cmd(host, command, true);
+ mxc_do_addr_cycle(mtd, column, page_addr);
+ host->devtype_data->send_read_id(host);
+ host->buf_start = 0;
+ break;
+
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ host->devtype_data->send_cmd(host, command, false);
+ WARN_ONCE(column != -1,
+ "Unexpected column value (cmd=%u, col=%d)\n",
+ command, column);
+ mxc_do_addr_cycle(mtd, column, page_addr);
+
+ break;
+ case NAND_CMD_PARAM:
+ host->devtype_data->send_cmd(host, command, false);
+ mxc_do_addr_cycle(mtd, column, page_addr);
+ host->devtype_data->send_page(mtd, NFC_OUTPUT);
+ memcpy32_fromio(host->data_buf, host->main_area0, 512);
+ host->buf_start = 0;
+ break;
+ default:
+ WARN_ONCE(1, "Unimplemented command (cmd=%u)\n",
+ command);
+ break;
+ }
+}
+
+static int mxc_nand_set_features(struct nand_chip *chip, int addr,
+ u8 *subfeature_param)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ int i;
+
+ host->buf_start = 0;
+
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ chip->legacy.write_byte(chip, subfeature_param[i]);
+
+ memcpy32_toio(host->main_area0, host->data_buf, mtd->writesize);
+ host->devtype_data->send_cmd(host, NAND_CMD_SET_FEATURES, false);
+ mxc_do_addr_cycle(mtd, addr, -1);
+ host->devtype_data->send_page(mtd, NFC_INPUT);
+
+ return 0;
+}
+
+static int mxc_nand_get_features(struct nand_chip *chip, int addr,
+ u8 *subfeature_param)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ int i;
+
+ host->devtype_data->send_cmd(host, NAND_CMD_GET_FEATURES, false);
+ mxc_do_addr_cycle(mtd, addr, -1);
+ host->devtype_data->send_page(mtd, NFC_OUTPUT);
+ memcpy32_fromio(host->data_buf, host->main_area0, 512);
+ host->buf_start = 0;
+
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ *subfeature_param++ = chip->legacy.read_byte(chip);
+
+ return 0;
+}
+
+/*
+ * The generic flash bbt descriptors overlap with our ecc
+ * hardware, so define some i.MX specific ones.
+ */
+static uint8_t bbt_pattern[] = { 'B', 'b', 't', '0' };
+static uint8_t mirror_pattern[] = { '1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 0,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = 4,
+ .pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 0,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = 4,
+ .pattern = mirror_pattern,
+};
+
+/* v1 + irqpending_quirk: i.MX21 */
+static const struct mxc_nand_devtype_data imx21_nand_devtype_data = {
+ .preset = preset_v1,
+ .read_page = mxc_nand_read_page_v1,
+ .send_cmd = send_cmd_v1_v2,
+ .send_addr = send_addr_v1_v2,
+ .send_page = send_page_v1,
+ .send_read_id = send_read_id_v1_v2,
+ .get_dev_status = get_dev_status_v1_v2,
+ .check_int = check_int_v1_v2,
+ .irq_control = irq_control_v1_v2,
+ .get_ecc_status = get_ecc_status_v1,
+ .ooblayout = &mxc_v1_ooblayout_ops,
+ .select_chip = mxc_nand_select_chip_v1_v3,
+ .enable_hwecc = mxc_nand_enable_hwecc_v1_v2,
+ .irqpending_quirk = 1,
+ .needs_ip = 0,
+ .regs_offset = 0xe00,
+ .spare0_offset = 0x800,
+ .spare_len = 16,
+ .eccbytes = 3,
+ .eccsize = 1,
+};
+
+/* v1 + !irqpending_quirk: i.MX27, i.MX31 */
+static const struct mxc_nand_devtype_data imx27_nand_devtype_data = {
+ .preset = preset_v1,
+ .read_page = mxc_nand_read_page_v1,
+ .send_cmd = send_cmd_v1_v2,
+ .send_addr = send_addr_v1_v2,
+ .send_page = send_page_v1,
+ .send_read_id = send_read_id_v1_v2,
+ .get_dev_status = get_dev_status_v1_v2,
+ .check_int = check_int_v1_v2,
+ .irq_control = irq_control_v1_v2,
+ .get_ecc_status = get_ecc_status_v1,
+ .ooblayout = &mxc_v1_ooblayout_ops,
+ .select_chip = mxc_nand_select_chip_v1_v3,
+ .enable_hwecc = mxc_nand_enable_hwecc_v1_v2,
+ .irqpending_quirk = 0,
+ .needs_ip = 0,
+ .regs_offset = 0xe00,
+ .spare0_offset = 0x800,
+ .axi_offset = 0,
+ .spare_len = 16,
+ .eccbytes = 3,
+ .eccsize = 1,
+};
+
+/* v21: i.MX25, i.MX35 */
+static const struct mxc_nand_devtype_data imx25_nand_devtype_data = {
+ .preset = preset_v2,
+ .read_page = mxc_nand_read_page_v2_v3,
+ .send_cmd = send_cmd_v1_v2,
+ .send_addr = send_addr_v1_v2,
+ .send_page = send_page_v2,
+ .send_read_id = send_read_id_v1_v2,
+ .get_dev_status = get_dev_status_v1_v2,
+ .check_int = check_int_v1_v2,
+ .irq_control = irq_control_v1_v2,
+ .get_ecc_status = get_ecc_status_v2,
+ .ooblayout = &mxc_v2_ooblayout_ops,
+ .select_chip = mxc_nand_select_chip_v2,
+ .setup_interface = mxc_nand_v2_setup_interface,
+ .enable_hwecc = mxc_nand_enable_hwecc_v1_v2,
+ .irqpending_quirk = 0,
+ .needs_ip = 0,
+ .regs_offset = 0x1e00,
+ .spare0_offset = 0x1000,
+ .axi_offset = 0,
+ .spare_len = 64,
+ .eccbytes = 9,
+ .eccsize = 0,
+};
+
+/* v3.2a: i.MX51 */
+static const struct mxc_nand_devtype_data imx51_nand_devtype_data = {
+ .preset = preset_v3,
+ .read_page = mxc_nand_read_page_v2_v3,
+ .send_cmd = send_cmd_v3,
+ .send_addr = send_addr_v3,
+ .send_page = send_page_v3,
+ .send_read_id = send_read_id_v3,
+ .get_dev_status = get_dev_status_v3,
+ .check_int = check_int_v3,
+ .irq_control = irq_control_v3,
+ .get_ecc_status = get_ecc_status_v3,
+ .ooblayout = &mxc_v2_ooblayout_ops,
+ .select_chip = mxc_nand_select_chip_v1_v3,
+ .enable_hwecc = mxc_nand_enable_hwecc_v3,
+ .irqpending_quirk = 0,
+ .needs_ip = 1,
+ .regs_offset = 0,
+ .spare0_offset = 0x1000,
+ .axi_offset = 0x1e00,
+ .spare_len = 64,
+ .eccbytes = 0,
+ .eccsize = 0,
+ .ppb_shift = 7,
+};
+
+/* v3.2b: i.MX53 */
+static const struct mxc_nand_devtype_data imx53_nand_devtype_data = {
+ .preset = preset_v3,
+ .read_page = mxc_nand_read_page_v2_v3,
+ .send_cmd = send_cmd_v3,
+ .send_addr = send_addr_v3,
+ .send_page = send_page_v3,
+ .send_read_id = send_read_id_v3,
+ .get_dev_status = get_dev_status_v3,
+ .check_int = check_int_v3,
+ .irq_control = irq_control_v3,
+ .get_ecc_status = get_ecc_status_v3,
+ .ooblayout = &mxc_v2_ooblayout_ops,
+ .select_chip = mxc_nand_select_chip_v1_v3,
+ .enable_hwecc = mxc_nand_enable_hwecc_v3,
+ .irqpending_quirk = 0,
+ .needs_ip = 1,
+ .regs_offset = 0,
+ .spare0_offset = 0x1000,
+ .axi_offset = 0x1e00,
+ .spare_len = 64,
+ .eccbytes = 0,
+ .eccsize = 0,
+ .ppb_shift = 8,
+};
+
+static inline int is_imx21_nfc(struct mxc_nand_host *host)
+{
+ return host->devtype_data == &imx21_nand_devtype_data;
+}
+
+static inline int is_imx27_nfc(struct mxc_nand_host *host)
+{
+ return host->devtype_data == &imx27_nand_devtype_data;
+}
+
+static inline int is_imx25_nfc(struct mxc_nand_host *host)
+{
+ return host->devtype_data == &imx25_nand_devtype_data;
+}
+
+static const struct of_device_id mxcnd_dt_ids[] = {
+ { .compatible = "fsl,imx21-nand", .data = &imx21_nand_devtype_data, },
+ { .compatible = "fsl,imx27-nand", .data = &imx27_nand_devtype_data, },
+ { .compatible = "fsl,imx25-nand", .data = &imx25_nand_devtype_data, },
+ { .compatible = "fsl,imx51-nand", .data = &imx51_nand_devtype_data, },
+ { .compatible = "fsl,imx53-nand", .data = &imx53_nand_devtype_data, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxcnd_dt_ids);
+
+static int mxcnd_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ struct device *dev = mtd->dev.parent;
+
+ chip->ecc.bytes = host->devtype_data->eccbytes;
+ host->eccsize = host->devtype_data->eccsize;
+ chip->ecc.size = 512;
+ mtd_set_ooblayout(mtd, host->devtype_data->ooblayout);
+
+ switch (chip->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ chip->ecc.read_page = mxc_nand_read_page;
+ chip->ecc.read_page_raw = mxc_nand_read_page_raw;
+ chip->ecc.read_oob = mxc_nand_read_oob;
+ chip->ecc.write_page = mxc_nand_write_page_ecc;
+ chip->ecc.write_page_raw = mxc_nand_write_page_raw;
+ chip->ecc.write_oob = mxc_nand_write_oob;
+ break;
+
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH) {
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+ }
+
+ /* Allocate the right size buffer now */
+ devm_kfree(dev, (void *)host->data_buf);
+ host->data_buf = devm_kzalloc(dev, mtd->writesize + mtd->oobsize,
+ GFP_KERNEL);
+ if (!host->data_buf)
+ return -ENOMEM;
+
+ /* Call preset again, with correct writesize chip time */
+ host->devtype_data->preset(mtd);
+
+ if (!chip->ecc.bytes) {
+ if (host->eccsize == 8)
+ chip->ecc.bytes = 18;
+ else if (host->eccsize == 4)
+ chip->ecc.bytes = 9;
+ }
+
+ /*
+ * Experimentation shows that i.MX NFC can only handle up to 218 oob
+ * bytes. Limit used_oobsize to 218 so as to not confuse copy_spare()
+ * into copying invalid data to/from the spare IO buffer, as this
+ * might cause ECC data corruption when doing sub-page write to a
+ * partially written page.
+ */
+ host->used_oobsize = min(mtd->oobsize, 218U);
+
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
+ if (is_imx21_nfc(host) || is_imx27_nfc(host))
+ chip->ecc.strength = 1;
+ else
+ chip->ecc.strength = (host->eccsize == 4) ? 4 : 8;
+ }
+
+ return 0;
+}
+
+static int mxcnd_setup_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_interface_config *conf)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+
+ return host->devtype_data->setup_interface(chip, chipnr, conf);
+}
+
+static const struct nand_controller_ops mxcnd_controller_ops = {
+ .attach_chip = mxcnd_attach_chip,
+ .setup_interface = mxcnd_setup_interface,
+};
+
+static int mxcnd_probe(struct platform_device *pdev)
+{
+ struct nand_chip *this;
+ struct mtd_info *mtd;
+ struct mxc_nand_host *host;
+ int err = 0;
+
+ /* Allocate memory for MTD device structure and private data */
+ host = devm_kzalloc(&pdev->dev, sizeof(struct mxc_nand_host),
+ GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ /* allocate a temporary buffer for the nand_scan_ident() */
+ host->data_buf = devm_kzalloc(&pdev->dev, PAGE_SIZE, GFP_KERNEL);
+ if (!host->data_buf)
+ return -ENOMEM;
+
+ host->dev = &pdev->dev;
+ /* structures must be linked */
+ this = &host->nand;
+ mtd = nand_to_mtd(this);
+ mtd->dev.parent = &pdev->dev;
+ mtd->name = DRIVER_NAME;
+
+ /* 50 us command delay time */
+ this->legacy.chip_delay = 5;
+
+ nand_set_controller_data(this, host);
+ nand_set_flash_node(this, pdev->dev.of_node);
+ this->legacy.dev_ready = mxc_nand_dev_ready;
+ this->legacy.cmdfunc = mxc_nand_command;
+ this->legacy.read_byte = mxc_nand_read_byte;
+ this->legacy.write_buf = mxc_nand_write_buf;
+ this->legacy.read_buf = mxc_nand_read_buf;
+ this->legacy.set_features = mxc_nand_set_features;
+ this->legacy.get_features = mxc_nand_get_features;
+
+ host->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->clk))
+ return PTR_ERR(host->clk);
+
+ host->devtype_data = device_get_match_data(&pdev->dev);
+
+ if (!host->devtype_data->setup_interface)
+ this->options |= NAND_KEEP_TIMINGS;
+
+ if (host->devtype_data->needs_ip) {
+ host->regs_ip = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(host->regs_ip))
+ return PTR_ERR(host->regs_ip);
+
+ host->base = devm_platform_ioremap_resource(pdev, 1);
+ } else {
+ host->base = devm_platform_ioremap_resource(pdev, 0);
+ }
+
+ if (IS_ERR(host->base))
+ return PTR_ERR(host->base);
+
+ host->main_area0 = host->base;
+
+ if (host->devtype_data->regs_offset)
+ host->regs = host->base + host->devtype_data->regs_offset;
+ host->spare0 = host->base + host->devtype_data->spare0_offset;
+ if (host->devtype_data->axi_offset)
+ host->regs_axi = host->base + host->devtype_data->axi_offset;
+
+ this->legacy.select_chip = host->devtype_data->select_chip;
+
+ init_completion(&host->op_completion);
+
+ host->irq = platform_get_irq(pdev, 0);
+ if (host->irq < 0)
+ return host->irq;
+
+ /*
+ * Use host->devtype_data->irq_control() here instead of irq_control()
+ * because we must not disable_irq_nosync without having requested the
+ * irq.
+ */
+ host->devtype_data->irq_control(host, 0);
+
+ err = devm_request_irq(&pdev->dev, host->irq, mxc_nfc_irq,
+ 0, DRIVER_NAME, host);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(host->clk);
+ if (err)
+ return err;
+ host->clk_act = 1;
+
+ /*
+ * Now that we "own" the interrupt make sure the interrupt mask bit is
+ * cleared on i.MX21. Otherwise we can't read the interrupt status bit
+ * on this machine.
+ */
+ if (host->devtype_data->irqpending_quirk) {
+ disable_irq_nosync(host->irq);
+ host->devtype_data->irq_control(host, 1);
+ }
+
+ /* Scan the NAND device */
+ this->legacy.dummy_controller.ops = &mxcnd_controller_ops;
+ err = nand_scan(this, is_imx25_nfc(host) ? 4 : 1);
+ if (err)
+ goto escan;
+
+ /* Register the partitions */
+ err = mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
+ if (err)
+ goto cleanup_nand;
+
+ platform_set_drvdata(pdev, host);
+
+ return 0;
+
+cleanup_nand:
+ nand_cleanup(this);
+escan:
+ if (host->clk_act)
+ clk_disable_unprepare(host->clk);
+
+ return err;
+}
+
+static void mxcnd_remove(struct platform_device *pdev)
+{
+ struct mxc_nand_host *host = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &host->nand;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ if (host->clk_act)
+ clk_disable_unprepare(host->clk);
+}
+
+static struct platform_driver mxcnd_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = mxcnd_dt_ids,
+ },
+ .probe = mxcnd_probe,
+ .remove_new = mxcnd_remove,
+};
+module_platform_driver(mxcnd_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("MXC NAND MTD driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/mxic_nand.c b/drivers/mtd/nand/raw/mxic_nand.c
new file mode 100644
index 0000000000..be8050e84b
--- /dev/null
+++ b/drivers/mtd/nand/raw/mxic_nand.c
@@ -0,0 +1,587 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Macronix International Co., Ltd.
+ *
+ * Author:
+ * Mason Yang <masonccyang@mxic.com.tw>
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/platform_device.h>
+
+#include "internals.h"
+
+#define HC_CFG 0x0
+#define HC_CFG_IF_CFG(x) ((x) << 27)
+#define HC_CFG_DUAL_SLAVE BIT(31)
+#define HC_CFG_INDIVIDUAL BIT(30)
+#define HC_CFG_NIO(x) (((x) / 4) << 27)
+#define HC_CFG_TYPE(s, t) ((t) << (23 + ((s) * 2)))
+#define HC_CFG_TYPE_SPI_NOR 0
+#define HC_CFG_TYPE_SPI_NAND 1
+#define HC_CFG_TYPE_SPI_RAM 2
+#define HC_CFG_TYPE_RAW_NAND 3
+#define HC_CFG_SLV_ACT(x) ((x) << 21)
+#define HC_CFG_CLK_PH_EN BIT(20)
+#define HC_CFG_CLK_POL_INV BIT(19)
+#define HC_CFG_BIG_ENDIAN BIT(18)
+#define HC_CFG_DATA_PASS BIT(17)
+#define HC_CFG_IDLE_SIO_LVL(x) ((x) << 16)
+#define HC_CFG_MAN_START_EN BIT(3)
+#define HC_CFG_MAN_START BIT(2)
+#define HC_CFG_MAN_CS_EN BIT(1)
+#define HC_CFG_MAN_CS_ASSERT BIT(0)
+
+#define INT_STS 0x4
+#define INT_STS_EN 0x8
+#define INT_SIG_EN 0xc
+#define INT_STS_ALL GENMASK(31, 0)
+#define INT_RDY_PIN BIT(26)
+#define INT_RDY_SR BIT(25)
+#define INT_LNR_SUSP BIT(24)
+#define INT_ECC_ERR BIT(17)
+#define INT_CRC_ERR BIT(16)
+#define INT_LWR_DIS BIT(12)
+#define INT_LRD_DIS BIT(11)
+#define INT_SDMA_INT BIT(10)
+#define INT_DMA_FINISH BIT(9)
+#define INT_RX_NOT_FULL BIT(3)
+#define INT_RX_NOT_EMPTY BIT(2)
+#define INT_TX_NOT_FULL BIT(1)
+#define INT_TX_EMPTY BIT(0)
+
+#define HC_EN 0x10
+#define HC_EN_BIT BIT(0)
+
+#define TXD(x) (0x14 + ((x) * 4))
+#define RXD 0x24
+
+#define SS_CTRL(s) (0x30 + ((s) * 4))
+#define LRD_CFG 0x44
+#define LWR_CFG 0x80
+#define RWW_CFG 0x70
+#define OP_READ BIT(23)
+#define OP_DUMMY_CYC(x) ((x) << 17)
+#define OP_ADDR_BYTES(x) ((x) << 14)
+#define OP_CMD_BYTES(x) (((x) - 1) << 13)
+#define OP_OCTA_CRC_EN BIT(12)
+#define OP_DQS_EN BIT(11)
+#define OP_ENHC_EN BIT(10)
+#define OP_PREAMBLE_EN BIT(9)
+#define OP_DATA_DDR BIT(8)
+#define OP_DATA_BUSW(x) ((x) << 6)
+#define OP_ADDR_DDR BIT(5)
+#define OP_ADDR_BUSW(x) ((x) << 3)
+#define OP_CMD_DDR BIT(2)
+#define OP_CMD_BUSW(x) (x)
+#define OP_BUSW_1 0
+#define OP_BUSW_2 1
+#define OP_BUSW_4 2
+#define OP_BUSW_8 3
+
+#define OCTA_CRC 0x38
+#define OCTA_CRC_IN_EN(s) BIT(3 + ((s) * 16))
+#define OCTA_CRC_CHUNK(s, x) ((fls((x) / 32)) << (1 + ((s) * 16)))
+#define OCTA_CRC_OUT_EN(s) BIT(0 + ((s) * 16))
+
+#define ONFI_DIN_CNT(s) (0x3c + (s))
+
+#define LRD_CTRL 0x48
+#define RWW_CTRL 0x74
+#define LWR_CTRL 0x84
+#define LMODE_EN BIT(31)
+#define LMODE_SLV_ACT(x) ((x) << 21)
+#define LMODE_CMD1(x) ((x) << 8)
+#define LMODE_CMD0(x) (x)
+
+#define LRD_ADDR 0x4c
+#define LWR_ADDR 0x88
+#define LRD_RANGE 0x50
+#define LWR_RANGE 0x8c
+
+#define AXI_SLV_ADDR 0x54
+
+#define DMAC_RD_CFG 0x58
+#define DMAC_WR_CFG 0x94
+#define DMAC_CFG_PERIPH_EN BIT(31)
+#define DMAC_CFG_ALLFLUSH_EN BIT(30)
+#define DMAC_CFG_LASTFLUSH_EN BIT(29)
+#define DMAC_CFG_QE(x) (((x) + 1) << 16)
+#define DMAC_CFG_BURST_LEN(x) (((x) + 1) << 12)
+#define DMAC_CFG_BURST_SZ(x) ((x) << 8)
+#define DMAC_CFG_DIR_READ BIT(1)
+#define DMAC_CFG_START BIT(0)
+
+#define DMAC_RD_CNT 0x5c
+#define DMAC_WR_CNT 0x98
+
+#define SDMA_ADDR 0x60
+
+#define DMAM_CFG 0x64
+#define DMAM_CFG_START BIT(31)
+#define DMAM_CFG_CONT BIT(30)
+#define DMAM_CFG_SDMA_GAP(x) (fls((x) / 8192) << 2)
+#define DMAM_CFG_DIR_READ BIT(1)
+#define DMAM_CFG_EN BIT(0)
+
+#define DMAM_CNT 0x68
+
+#define LNR_TIMER_TH 0x6c
+
+#define RDM_CFG0 0x78
+#define RDM_CFG0_POLY(x) (x)
+
+#define RDM_CFG1 0x7c
+#define RDM_CFG1_RDM_EN BIT(31)
+#define RDM_CFG1_SEED(x) (x)
+
+#define LWR_SUSP_CTRL 0x90
+#define LWR_SUSP_CTRL_EN BIT(31)
+
+#define DMAS_CTRL 0x9c
+#define DMAS_CTRL_EN BIT(31)
+#define DMAS_CTRL_DIR_READ BIT(30)
+
+#define DATA_STROB 0xa0
+#define DATA_STROB_EDO_EN BIT(2)
+#define DATA_STROB_INV_POL BIT(1)
+#define DATA_STROB_DELAY_2CYC BIT(0)
+
+#define IDLY_CODE(x) (0xa4 + ((x) * 4))
+#define IDLY_CODE_VAL(x, v) ((v) << (((x) % 4) * 8))
+
+#define GPIO 0xc4
+#define GPIO_PT(x) BIT(3 + ((x) * 16))
+#define GPIO_RESET(x) BIT(2 + ((x) * 16))
+#define GPIO_HOLDB(x) BIT(1 + ((x) * 16))
+#define GPIO_WPB(x) BIT((x) * 16)
+
+#define HC_VER 0xd0
+
+#define HW_TEST(x) (0xe0 + ((x) * 4))
+
+#define MXIC_NFC_MAX_CLK_HZ 50000000
+#define IRQ_TIMEOUT 1000
+
+struct mxic_nand_ctlr {
+ struct clk *ps_clk;
+ struct clk *send_clk;
+ struct clk *send_dly_clk;
+ struct completion complete;
+ void __iomem *regs;
+ struct nand_controller controller;
+ struct device *dev;
+ struct nand_chip chip;
+};
+
+static int mxic_nfc_clk_enable(struct mxic_nand_ctlr *nfc)
+{
+ int ret;
+
+ ret = clk_prepare_enable(nfc->ps_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(nfc->send_clk);
+ if (ret)
+ goto err_ps_clk;
+
+ ret = clk_prepare_enable(nfc->send_dly_clk);
+ if (ret)
+ goto err_send_dly_clk;
+
+ return ret;
+
+err_send_dly_clk:
+ clk_disable_unprepare(nfc->send_clk);
+err_ps_clk:
+ clk_disable_unprepare(nfc->ps_clk);
+
+ return ret;
+}
+
+static void mxic_nfc_clk_disable(struct mxic_nand_ctlr *nfc)
+{
+ clk_disable_unprepare(nfc->send_clk);
+ clk_disable_unprepare(nfc->send_dly_clk);
+ clk_disable_unprepare(nfc->ps_clk);
+}
+
+static void mxic_nfc_set_input_delay(struct mxic_nand_ctlr *nfc, u8 idly_code)
+{
+ writel(IDLY_CODE_VAL(0, idly_code) |
+ IDLY_CODE_VAL(1, idly_code) |
+ IDLY_CODE_VAL(2, idly_code) |
+ IDLY_CODE_VAL(3, idly_code),
+ nfc->regs + IDLY_CODE(0));
+ writel(IDLY_CODE_VAL(4, idly_code) |
+ IDLY_CODE_VAL(5, idly_code) |
+ IDLY_CODE_VAL(6, idly_code) |
+ IDLY_CODE_VAL(7, idly_code),
+ nfc->regs + IDLY_CODE(1));
+}
+
+static int mxic_nfc_clk_setup(struct mxic_nand_ctlr *nfc, unsigned long freq)
+{
+ int ret;
+
+ ret = clk_set_rate(nfc->send_clk, freq);
+ if (ret)
+ return ret;
+
+ ret = clk_set_rate(nfc->send_dly_clk, freq);
+ if (ret)
+ return ret;
+
+ /*
+ * A constant delay range from 0x0 ~ 0x1F for input delay,
+ * the unit is 78 ps, the max input delay is 2.418 ns.
+ */
+ mxic_nfc_set_input_delay(nfc, 0xf);
+
+ /*
+ * Phase degree = 360 * freq * output-delay
+ * where output-delay is a constant value 1 ns in FPGA.
+ *
+ * Get Phase degree = 360 * freq * 1 ns
+ * = 360 * freq * 1 sec / 1000000000
+ * = 9 * freq / 25000000
+ */
+ ret = clk_set_phase(nfc->send_dly_clk, 9 * freq / 25000000);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mxic_nfc_set_freq(struct mxic_nand_ctlr *nfc, unsigned long freq)
+{
+ int ret;
+
+ if (freq > MXIC_NFC_MAX_CLK_HZ)
+ freq = MXIC_NFC_MAX_CLK_HZ;
+
+ mxic_nfc_clk_disable(nfc);
+ ret = mxic_nfc_clk_setup(nfc, freq);
+ if (ret)
+ return ret;
+
+ ret = mxic_nfc_clk_enable(nfc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static irqreturn_t mxic_nfc_isr(int irq, void *dev_id)
+{
+ struct mxic_nand_ctlr *nfc = dev_id;
+ u32 sts;
+
+ sts = readl(nfc->regs + INT_STS);
+ if (sts & INT_RDY_PIN)
+ complete(&nfc->complete);
+ else
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+}
+
+static void mxic_nfc_hw_init(struct mxic_nand_ctlr *nfc)
+{
+ writel(HC_CFG_NIO(8) | HC_CFG_TYPE(1, HC_CFG_TYPE_RAW_NAND) |
+ HC_CFG_SLV_ACT(0) | HC_CFG_MAN_CS_EN |
+ HC_CFG_IDLE_SIO_LVL(1), nfc->regs + HC_CFG);
+ writel(INT_STS_ALL, nfc->regs + INT_STS_EN);
+ writel(INT_RDY_PIN, nfc->regs + INT_SIG_EN);
+ writel(0x0, nfc->regs + ONFI_DIN_CNT(0));
+ writel(0, nfc->regs + LRD_CFG);
+ writel(0, nfc->regs + LRD_CTRL);
+ writel(0x0, nfc->regs + HC_EN);
+}
+
+static void mxic_nfc_cs_enable(struct mxic_nand_ctlr *nfc)
+{
+ writel(readl(nfc->regs + HC_CFG) | HC_CFG_MAN_CS_EN,
+ nfc->regs + HC_CFG);
+ writel(HC_CFG_MAN_CS_ASSERT | readl(nfc->regs + HC_CFG),
+ nfc->regs + HC_CFG);
+}
+
+static void mxic_nfc_cs_disable(struct mxic_nand_ctlr *nfc)
+{
+ writel(~HC_CFG_MAN_CS_ASSERT & readl(nfc->regs + HC_CFG),
+ nfc->regs + HC_CFG);
+}
+
+static int mxic_nfc_wait_ready(struct nand_chip *chip)
+{
+ struct mxic_nand_ctlr *nfc = nand_get_controller_data(chip);
+ int ret;
+
+ ret = wait_for_completion_timeout(&nfc->complete,
+ msecs_to_jiffies(IRQ_TIMEOUT));
+ if (!ret) {
+ dev_err(nfc->dev, "nand device timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int mxic_nfc_data_xfer(struct mxic_nand_ctlr *nfc, const void *txbuf,
+ void *rxbuf, unsigned int len)
+{
+ unsigned int pos = 0;
+
+ while (pos < len) {
+ unsigned int nbytes = len - pos;
+ u32 data = 0xffffffff;
+ u32 sts;
+ int ret;
+
+ if (nbytes > 4)
+ nbytes = 4;
+
+ if (txbuf)
+ memcpy(&data, txbuf + pos, nbytes);
+
+ ret = readl_poll_timeout(nfc->regs + INT_STS, sts,
+ sts & INT_TX_EMPTY, 0, USEC_PER_SEC);
+ if (ret)
+ return ret;
+
+ writel(data, nfc->regs + TXD(nbytes % 4));
+
+ ret = readl_poll_timeout(nfc->regs + INT_STS, sts,
+ sts & INT_TX_EMPTY, 0, USEC_PER_SEC);
+ if (ret)
+ return ret;
+
+ ret = readl_poll_timeout(nfc->regs + INT_STS, sts,
+ sts & INT_RX_NOT_EMPTY, 0,
+ USEC_PER_SEC);
+ if (ret)
+ return ret;
+
+ data = readl(nfc->regs + RXD);
+ if (rxbuf) {
+ data >>= (8 * (4 - nbytes));
+ memcpy(rxbuf + pos, &data, nbytes);
+ }
+ if (readl(nfc->regs + INT_STS) & INT_RX_NOT_EMPTY)
+ dev_warn(nfc->dev, "RX FIFO not empty\n");
+
+ pos += nbytes;
+ }
+
+ return 0;
+}
+
+static int mxic_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op, bool check_only)
+{
+ struct mxic_nand_ctlr *nfc = nand_get_controller_data(chip);
+ const struct nand_op_instr *instr = NULL;
+ int ret = 0;
+ unsigned int op_id;
+
+ if (check_only)
+ return 0;
+
+ mxic_nfc_cs_enable(nfc);
+ init_completion(&nfc->complete);
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ writel(0, nfc->regs + HC_EN);
+ writel(HC_EN_BIT, nfc->regs + HC_EN);
+ writel(OP_CMD_BUSW(OP_BUSW_8) | OP_DUMMY_CYC(0x3F) |
+ OP_CMD_BYTES(0), nfc->regs + SS_CTRL(0));
+
+ ret = mxic_nfc_data_xfer(nfc,
+ &instr->ctx.cmd.opcode,
+ NULL, 1);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ writel(OP_ADDR_BUSW(OP_BUSW_8) | OP_DUMMY_CYC(0x3F) |
+ OP_ADDR_BYTES(instr->ctx.addr.naddrs),
+ nfc->regs + SS_CTRL(0));
+ ret = mxic_nfc_data_xfer(nfc,
+ instr->ctx.addr.addrs, NULL,
+ instr->ctx.addr.naddrs);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ writel(0x0, nfc->regs + ONFI_DIN_CNT(0));
+ writel(OP_DATA_BUSW(OP_BUSW_8) | OP_DUMMY_CYC(0x3F) |
+ OP_READ, nfc->regs + SS_CTRL(0));
+ ret = mxic_nfc_data_xfer(nfc, NULL,
+ instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ writel(instr->ctx.data.len,
+ nfc->regs + ONFI_DIN_CNT(0));
+ writel(OP_DATA_BUSW(OP_BUSW_8) | OP_DUMMY_CYC(0x3F),
+ nfc->regs + SS_CTRL(0));
+ ret = mxic_nfc_data_xfer(nfc,
+ instr->ctx.data.buf.out, NULL,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ ret = mxic_nfc_wait_ready(chip);
+ break;
+ }
+ }
+ mxic_nfc_cs_disable(nfc);
+
+ return ret;
+}
+
+static int mxic_nfc_setup_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_interface_config *conf)
+{
+ struct mxic_nand_ctlr *nfc = nand_get_controller_data(chip);
+ const struct nand_sdr_timings *sdr;
+ unsigned long freq;
+ int ret;
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ freq = NSEC_PER_SEC / (sdr->tRC_min / 1000);
+
+ ret = mxic_nfc_set_freq(nfc, freq);
+ if (ret)
+ dev_err(nfc->dev, "set freq:%ld failed\n", freq);
+
+ if (sdr->tRC_min < 30000)
+ writel(DATA_STROB_EDO_EN, nfc->regs + DATA_STROB);
+
+ return 0;
+}
+
+static const struct nand_controller_ops mxic_nand_controller_ops = {
+ .exec_op = mxic_nfc_exec_op,
+ .setup_interface = mxic_nfc_setup_interface,
+};
+
+static int mxic_nfc_probe(struct platform_device *pdev)
+{
+ struct device_node *nand_np, *np = pdev->dev.of_node;
+ struct mtd_info *mtd;
+ struct mxic_nand_ctlr *nfc;
+ struct nand_chip *nand_chip;
+ int err;
+ int irq;
+
+ nfc = devm_kzalloc(&pdev->dev, sizeof(struct mxic_nand_ctlr),
+ GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->ps_clk = devm_clk_get(&pdev->dev, "ps");
+ if (IS_ERR(nfc->ps_clk))
+ return PTR_ERR(nfc->ps_clk);
+
+ nfc->send_clk = devm_clk_get(&pdev->dev, "send");
+ if (IS_ERR(nfc->send_clk))
+ return PTR_ERR(nfc->send_clk);
+
+ nfc->send_dly_clk = devm_clk_get(&pdev->dev, "send_dly");
+ if (IS_ERR(nfc->send_dly_clk))
+ return PTR_ERR(nfc->send_dly_clk);
+
+ nfc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(nfc->regs))
+ return PTR_ERR(nfc->regs);
+
+ nand_chip = &nfc->chip;
+ mtd = nand_to_mtd(nand_chip);
+ mtd->dev.parent = &pdev->dev;
+
+ for_each_child_of_node(np, nand_np)
+ nand_set_flash_node(nand_chip, nand_np);
+
+ nand_chip->priv = nfc;
+ nfc->dev = &pdev->dev;
+ nfc->controller.ops = &mxic_nand_controller_ops;
+ nand_controller_init(&nfc->controller);
+ nand_chip->controller = &nfc->controller;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ mxic_nfc_hw_init(nfc);
+
+ err = devm_request_irq(&pdev->dev, irq, mxic_nfc_isr,
+ 0, "mxic-nfc", nfc);
+ if (err)
+ goto fail;
+
+ err = nand_scan(nand_chip, 1);
+ if (err)
+ goto fail;
+
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err)
+ goto fail;
+
+ platform_set_drvdata(pdev, nfc);
+ return 0;
+
+fail:
+ mxic_nfc_clk_disable(nfc);
+ return err;
+}
+
+static void mxic_nfc_remove(struct platform_device *pdev)
+{
+ struct mxic_nand_ctlr *nfc = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &nfc->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+
+ mxic_nfc_clk_disable(nfc);
+}
+
+static const struct of_device_id mxic_nfc_of_ids[] = {
+ { .compatible = "mxic,multi-itfc-v009-nand-controller", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mxic_nfc_of_ids);
+
+static struct platform_driver mxic_nfc_driver = {
+ .probe = mxic_nfc_probe,
+ .remove_new = mxic_nfc_remove,
+ .driver = {
+ .name = "mxic-nfc",
+ .of_match_table = mxic_nfc_of_ids,
+ },
+};
+module_platform_driver(mxic_nfc_driver);
+
+MODULE_AUTHOR("Mason Yang <masonccyang@mxic.com.tw>");
+MODULE_DESCRIPTION("Macronix raw NAND controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/nand_amd.c b/drivers/mtd/nand/raw/nand_amd.c
new file mode 100644
index 0000000000..c3d4dae3cd
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_amd.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Free Electrons
+ * Copyright (C) 2017 NextThing Co
+ *
+ * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
+ */
+
+#include "internals.h"
+
+static void amd_nand_decode_id(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ nand_decode_ext_id(chip);
+
+ /*
+ * Check for Spansion/AMD ID + repeating 5th, 6th byte since
+ * some Spansion chips have erasesize that conflicts with size
+ * listed in nand_ids table.
+ * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
+ */
+ if (chip->id.data[4] != 0x00 && chip->id.data[5] == 0x00 &&
+ chip->id.data[6] == 0x00 && chip->id.data[7] == 0x00 &&
+ memorg->pagesize == 512) {
+ memorg->pages_per_eraseblock = 256;
+ memorg->pages_per_eraseblock <<= ((chip->id.data[3] & 0x03) << 1);
+ mtd->erasesize = memorg->pages_per_eraseblock *
+ memorg->pagesize;
+ }
+}
+
+static int amd_nand_init(struct nand_chip *chip)
+{
+ if (nand_is_slc(chip))
+ /*
+ * According to the datasheet of some Cypress SLC NANDs,
+ * the bad block markers can be in the first, second or last
+ * page of a block. So let's check all three locations.
+ */
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE |
+ NAND_BBM_LASTPAGE;
+
+ return 0;
+}
+
+const struct nand_manufacturer_ops amd_nand_manuf_ops = {
+ .detect = amd_nand_decode_id,
+ .init = amd_nand_init,
+};
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
new file mode 100644
index 0000000000..76167b8ca9
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -0,0 +1,6677 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Overview:
+ * This is the generic MTD driver for NAND flash devices. It should be
+ * capable of working with almost all NAND chips currently available.
+ *
+ * Additional technical information is available on
+ * http://www.linux-mtd.infradead.org/doc/nand.html
+ *
+ * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
+ * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Credits:
+ * David Woodhouse for adding multichip support
+ *
+ * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
+ * rework for 2K page size chips
+ *
+ * TODO:
+ * Enable cached programming for 2k page size chips
+ * Check, if mtd->ecctype should be set to MTD_ECC_HW
+ * if we have HW ECC support.
+ * BBT table is not serialized, has to be fixed
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
+#include <linux/mtd/nand-ecc-sw-bch.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
+
+#include "internals.h"
+
+static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page,
+ struct mtd_pairing_info *info)
+{
+ int lastpage = (mtd->erasesize / mtd->writesize) - 1;
+ int dist = 3;
+
+ if (page == lastpage)
+ dist = 2;
+
+ if (!page || (page & 1)) {
+ info->group = 0;
+ info->pair = (page + 1) / 2;
+ } else {
+ info->group = 1;
+ info->pair = (page + 1 - dist) / 2;
+ }
+
+ return 0;
+}
+
+static int nand_pairing_dist3_get_wunit(struct mtd_info *mtd,
+ const struct mtd_pairing_info *info)
+{
+ int lastpair = ((mtd->erasesize / mtd->writesize) - 1) / 2;
+ int page = info->pair * 2;
+ int dist = 3;
+
+ if (!info->group && !info->pair)
+ return 0;
+
+ if (info->pair == lastpair && info->group)
+ dist = 2;
+
+ if (!info->group)
+ page--;
+ else if (info->pair)
+ page += dist - 1;
+
+ if (page >= mtd->erasesize / mtd->writesize)
+ return -EINVAL;
+
+ return page;
+}
+
+const struct mtd_pairing_scheme dist3_pairing_scheme = {
+ .ngroups = 2,
+ .get_info = nand_pairing_dist3_get_info,
+ .get_wunit = nand_pairing_dist3_get_wunit,
+};
+
+static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
+{
+ int ret = 0;
+
+ /* Start address must align on block boundary */
+ if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
+ pr_debug("%s: unaligned address\n", __func__);
+ ret = -EINVAL;
+ }
+
+ /* Length must align on block boundary */
+ if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
+ pr_debug("%s: length not block aligned\n", __func__);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * nand_extract_bits - Copy unaligned bits from one buffer to another one
+ * @dst: destination buffer
+ * @dst_off: bit offset at which the writing starts
+ * @src: source buffer
+ * @src_off: bit offset at which the reading starts
+ * @nbits: number of bits to copy from @src to @dst
+ *
+ * Copy bits from one memory region to another (overlap authorized).
+ */
+void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
+ unsigned int src_off, unsigned int nbits)
+{
+ unsigned int tmp, n;
+
+ dst += dst_off / 8;
+ dst_off %= 8;
+ src += src_off / 8;
+ src_off %= 8;
+
+ while (nbits) {
+ n = min3(8 - dst_off, 8 - src_off, nbits);
+
+ tmp = (*src >> src_off) & GENMASK(n - 1, 0);
+ *dst &= ~GENMASK(n - 1 + dst_off, dst_off);
+ *dst |= tmp << dst_off;
+
+ dst_off += n;
+ if (dst_off >= 8) {
+ dst++;
+ dst_off -= 8;
+ }
+
+ src_off += n;
+ if (src_off >= 8) {
+ src++;
+ src_off -= 8;
+ }
+
+ nbits -= n;
+ }
+}
+EXPORT_SYMBOL_GPL(nand_extract_bits);
+
+/**
+ * nand_select_target() - Select a NAND target (A.K.A. die)
+ * @chip: NAND chip object
+ * @cs: the CS line to select. Note that this CS id is always from the chip
+ * PoV, not the controller one
+ *
+ * Select a NAND target so that further operations executed on @chip go to the
+ * selected NAND target.
+ */
+void nand_select_target(struct nand_chip *chip, unsigned int cs)
+{
+ /*
+ * cs should always lie between 0 and nanddev_ntargets(), when that's
+ * not the case it's a bug and the caller should be fixed.
+ */
+ if (WARN_ON(cs > nanddev_ntargets(&chip->base)))
+ return;
+
+ chip->cur_cs = cs;
+
+ if (chip->legacy.select_chip)
+ chip->legacy.select_chip(chip, cs);
+}
+EXPORT_SYMBOL_GPL(nand_select_target);
+
+/**
+ * nand_deselect_target() - Deselect the currently selected target
+ * @chip: NAND chip object
+ *
+ * Deselect the currently selected NAND target. The result of operations
+ * executed on @chip after the target has been deselected is undefined.
+ */
+void nand_deselect_target(struct nand_chip *chip)
+{
+ if (chip->legacy.select_chip)
+ chip->legacy.select_chip(chip, -1);
+
+ chip->cur_cs = -1;
+}
+EXPORT_SYMBOL_GPL(nand_deselect_target);
+
+/**
+ * nand_release_device - [GENERIC] release chip
+ * @chip: NAND chip object
+ *
+ * Release chip lock and wake up anyone waiting on the device.
+ */
+static void nand_release_device(struct nand_chip *chip)
+{
+ /* Release the controller and the chip */
+ mutex_unlock(&chip->controller->lock);
+ mutex_unlock(&chip->lock);
+}
+
+/**
+ * nand_bbm_get_next_page - Get the next page for bad block markers
+ * @chip: NAND chip object
+ * @page: First page to start checking for bad block marker usage
+ *
+ * Returns an integer that corresponds to the page offset within a block, for
+ * a page that is used to store bad block markers. If no more pages are
+ * available, -EINVAL is returned.
+ */
+int nand_bbm_get_next_page(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int last_page = ((mtd->erasesize - mtd->writesize) >>
+ chip->page_shift) & chip->pagemask;
+ unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE
+ | NAND_BBM_LASTPAGE;
+
+ if (page == 0 && !(chip->options & bbm_flags))
+ return 0;
+ if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE)
+ return 0;
+ if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
+ return 1;
+ if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
+ return last_page;
+
+ return -EINVAL;
+}
+
+/**
+ * nand_block_bad - [DEFAULT] Read bad block marker from the chip
+ * @chip: NAND chip object
+ * @ofs: offset from device start
+ *
+ * Check, if the block is bad.
+ */
+static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
+{
+ int first_page, page_offset;
+ int res;
+ u8 bad;
+
+ first_page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+ page_offset = nand_bbm_get_next_page(chip, 0);
+
+ while (page_offset >= 0) {
+ res = chip->ecc.read_oob(chip, first_page + page_offset);
+ if (res < 0)
+ return res;
+
+ bad = chip->oob_poi[chip->badblockpos];
+
+ if (likely(chip->badblockbits == 8))
+ res = bad != 0xFF;
+ else
+ res = hweight8(bad) < chip->badblockbits;
+ if (res)
+ return res;
+
+ page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
+ }
+
+ return 0;
+}
+
+/**
+ * nand_region_is_secured() - Check if the region is secured
+ * @chip: NAND chip object
+ * @offset: Offset of the region to check
+ * @size: Size of the region to check
+ *
+ * Checks if the region is secured by comparing the offset and size with the
+ * list of secure regions obtained from DT. Returns true if the region is
+ * secured else false.
+ */
+static bool nand_region_is_secured(struct nand_chip *chip, loff_t offset, u64 size)
+{
+ int i;
+
+ /* Skip touching the secure regions if present */
+ for (i = 0; i < chip->nr_secure_regions; i++) {
+ const struct nand_secure_region *region = &chip->secure_regions[i];
+
+ if (offset + size <= region->offset ||
+ offset >= region->offset + region->size)
+ continue;
+
+ pr_debug("%s: Region 0x%llx - 0x%llx is secured!",
+ __func__, offset, offset + size);
+
+ return true;
+ }
+
+ return false;
+}
+
+static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (chip->options & NAND_NO_BBM_QUIRK)
+ return 0;
+
+ /* Check if the region is secured */
+ if (nand_region_is_secured(chip, ofs, mtd->erasesize))
+ return -EIO;
+
+ if (mtd_check_expert_analysis_mode())
+ return 0;
+
+ if (chip->legacy.block_bad)
+ return chip->legacy.block_bad(chip, ofs);
+
+ return nand_block_bad(chip, ofs);
+}
+
+/**
+ * nand_get_device - [GENERIC] Get chip for selected access
+ * @chip: NAND chip structure
+ *
+ * Lock the device and its controller for exclusive access
+ */
+static void nand_get_device(struct nand_chip *chip)
+{
+ /* Wait until the device is resumed. */
+ while (1) {
+ mutex_lock(&chip->lock);
+ if (!chip->suspended) {
+ mutex_lock(&chip->controller->lock);
+ return;
+ }
+ mutex_unlock(&chip->lock);
+
+ wait_event(chip->resume_wq, !chip->suspended);
+ }
+}
+
+/**
+ * nand_check_wp - [GENERIC] check if the chip is write protected
+ * @chip: NAND chip object
+ *
+ * Check, if the device is write protected. The function expects, that the
+ * device is already selected.
+ */
+static int nand_check_wp(struct nand_chip *chip)
+{
+ u8 status;
+ int ret;
+
+ /* Broken xD cards report WP despite being writable */
+ if (chip->options & NAND_BROKEN_XD)
+ return 0;
+
+ /* Check the WP bit */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ return status & NAND_STATUS_WP ? 0 : 1;
+}
+
+/**
+ * nand_fill_oob - [INTERN] Transfer client buffer to oob
+ * @chip: NAND chip object
+ * @oob: oob data buffer
+ * @len: oob data write length
+ * @ops: oob ops structure
+ */
+static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
+ struct mtd_oob_ops *ops)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ /*
+ * Initialise to all 0xFF, to avoid the possibility of left over OOB
+ * data from a previous OOB read.
+ */
+ memset(chip->oob_poi, 0xff, mtd->oobsize);
+
+ switch (ops->mode) {
+
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_RAW:
+ memcpy(chip->oob_poi + ops->ooboffs, oob, len);
+ return oob + len;
+
+ case MTD_OPS_AUTO_OOB:
+ ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
+ ops->ooboffs, len);
+ BUG_ON(ret);
+ return oob + len;
+
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+/**
+ * nand_do_write_oob - [MTD Interface] NAND write out-of-band
+ * @chip: NAND chip object
+ * @to: offset to write to
+ * @ops: oob operation description structure
+ *
+ * NAND write out-of-band.
+ */
+static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int chipnr, page, status, len, ret;
+
+ pr_debug("%s: to = 0x%08x, len = %i\n",
+ __func__, (unsigned int)to, (int)ops->ooblen);
+
+ len = mtd_oobavail(mtd, ops);
+
+ /* Do not allow write past end of page */
+ if ((ops->ooboffs + ops->ooblen) > len) {
+ pr_debug("%s: attempt to write past end of page\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Check if the region is secured */
+ if (nand_region_is_secured(chip, to, ops->ooblen))
+ return -EIO;
+
+ chipnr = (int)(to >> chip->chip_shift);
+
+ /*
+ * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
+ * of my DiskOnChip 2000 test units) will clear the whole data page too
+ * if we don't do this. I have no clue why, but I seem to have 'fixed'
+ * it in the doc2000 driver in August 1999. dwmw2.
+ */
+ ret = nand_reset(chip, chipnr);
+ if (ret)
+ return ret;
+
+ nand_select_target(chip, chipnr);
+
+ /* Shift to get page */
+ page = (int)(to >> chip->page_shift);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(chip)) {
+ nand_deselect_target(chip);
+ return -EROFS;
+ }
+
+ /* Invalidate the page cache, if we write to the cached page */
+ if (page == chip->pagecache.page)
+ chip->pagecache.page = -1;
+
+ nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
+
+ if (ops->mode == MTD_OPS_RAW)
+ status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
+ else
+ status = chip->ecc.write_oob(chip, page & chip->pagemask);
+
+ nand_deselect_target(chip);
+
+ if (status)
+ return status;
+
+ ops->oobretlen = ops->ooblen;
+
+ return 0;
+}
+
+/**
+ * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
+ * @chip: NAND chip object
+ * @ofs: offset from device start
+ *
+ * This is the default implementation, which can be overridden by a hardware
+ * specific driver. It provides the details for writing a bad block marker to a
+ * block.
+ */
+static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mtd_oob_ops ops;
+ uint8_t buf[2] = { 0, 0 };
+ int ret = 0, res, page_offset;
+
+ memset(&ops, 0, sizeof(ops));
+ ops.oobbuf = buf;
+ ops.ooboffs = chip->badblockpos;
+ if (chip->options & NAND_BUSWIDTH_16) {
+ ops.ooboffs &= ~0x01;
+ ops.len = ops.ooblen = 2;
+ } else {
+ ops.len = ops.ooblen = 1;
+ }
+ ops.mode = MTD_OPS_PLACE_OOB;
+
+ page_offset = nand_bbm_get_next_page(chip, 0);
+
+ while (page_offset >= 0) {
+ res = nand_do_write_oob(chip,
+ ofs + (page_offset * mtd->writesize),
+ &ops);
+
+ if (!ret)
+ ret = res;
+
+ page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
+ }
+
+ return ret;
+}
+
+/**
+ * nand_markbad_bbm - mark a block by updating the BBM
+ * @chip: NAND chip object
+ * @ofs: offset of the block to mark bad
+ */
+int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
+{
+ if (chip->legacy.block_markbad)
+ return chip->legacy.block_markbad(chip, ofs);
+
+ return nand_default_block_markbad(chip, ofs);
+}
+
+/**
+ * nand_block_markbad_lowlevel - mark a block bad
+ * @chip: NAND chip object
+ * @ofs: offset from device start
+ *
+ * This function performs the generic NAND bad block marking steps (i.e., bad
+ * block table(s) and/or marker(s)). We only allow the hardware driver to
+ * specify how to write bad block markers to OOB (chip->legacy.block_markbad).
+ *
+ * We try operations in the following order:
+ *
+ * (1) erase the affected block, to allow OOB marker to be written cleanly
+ * (2) write bad block marker to OOB area of affected block (unless flag
+ * NAND_BBT_NO_OOB_BBM is present)
+ * (3) update the BBT
+ *
+ * Note that we retain the first error encountered in (2) or (3), finish the
+ * procedures, and dump the error in the end.
+*/
+static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int res, ret = 0;
+
+ if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
+ struct erase_info einfo;
+
+ /* Attempt erase before marking OOB */
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.addr = ofs;
+ einfo.len = 1ULL << chip->phys_erase_shift;
+ nand_erase_nand(chip, &einfo, 0);
+
+ /* Write bad block marker to OOB */
+ nand_get_device(chip);
+
+ ret = nand_markbad_bbm(chip, ofs);
+ nand_release_device(chip);
+ }
+
+ /* Mark block bad in BBT */
+ if (chip->bbt) {
+ res = nand_markbad_bbt(chip, ofs);
+ if (!ret)
+ ret = res;
+ }
+
+ if (!ret)
+ mtd->ecc_stats.badblocks++;
+
+ return ret;
+}
+
+/**
+ * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ *
+ * Check if the block is marked as reserved.
+ */
+static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (!chip->bbt)
+ return 0;
+ /* Return info from the table */
+ return nand_isreserved_bbt(chip, ofs);
+}
+
+/**
+ * nand_block_checkbad - [GENERIC] Check if a block is marked bad
+ * @chip: NAND chip object
+ * @ofs: offset from device start
+ * @allowbbt: 1, if its allowed to access the bbt area
+ *
+ * Check, if the block is bad. Either by reading the bad block table or
+ * calling of the scan function.
+ */
+static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt)
+{
+ /* Return info from the table */
+ if (chip->bbt)
+ return nand_isbad_bbt(chip, ofs, allowbbt);
+
+ return nand_isbad_bbm(chip, ofs);
+}
+
+/**
+ * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
+ * @chip: NAND chip structure
+ * @timeout_ms: Timeout in ms
+ *
+ * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
+ * If that does not happen whitin the specified timeout, -ETIMEDOUT is
+ * returned.
+ *
+ * This helper is intended to be used when the controller does not have access
+ * to the NAND R/B pin.
+ *
+ * Be aware that calling this helper from an ->exec_op() implementation means
+ * ->exec_op() must be re-entrant.
+ *
+ * Return 0 if the NAND chip is ready, a negative error otherwise.
+ */
+int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
+{
+ const struct nand_interface_config *conf;
+ u8 status = 0;
+ int ret;
+
+ if (!nand_has_exec_op(chip))
+ return -ENOTSUPP;
+
+ /* Wait tWB before polling the STATUS reg. */
+ conf = nand_get_interface_config(chip);
+ ndelay(NAND_COMMON_TIMING_NS(conf, tWB_max));
+
+ ret = nand_status_op(chip, NULL);
+ if (ret)
+ return ret;
+
+ /*
+ * +1 below is necessary because if we are now in the last fraction
+ * of jiffy and msecs_to_jiffies is 1 then we will wait only that
+ * small jiffy fraction - possibly leading to false timeout
+ */
+ timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
+ do {
+ ret = nand_read_data_op(chip, &status, sizeof(status), true,
+ false);
+ if (ret)
+ break;
+
+ if (status & NAND_STATUS_READY)
+ break;
+
+ /*
+ * Typical lowest execution time for a tR on most NANDs is 10us,
+ * use this as polling delay before doing something smarter (ie.
+ * deriving a delay from the timeout value, timeout_ms/ratio).
+ */
+ udelay(10);
+ } while (time_before(jiffies, timeout_ms));
+
+ /*
+ * We have to exit READ_STATUS mode in order to read real data on the
+ * bus in case the WAITRDY instruction is preceding a DATA_IN
+ * instruction.
+ */
+ nand_exit_status_op(chip);
+
+ if (ret)
+ return ret;
+
+ return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
+};
+EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
+
+/**
+ * nand_gpio_waitrdy - Poll R/B GPIO pin until ready
+ * @chip: NAND chip structure
+ * @gpiod: GPIO descriptor of R/B pin
+ * @timeout_ms: Timeout in ms
+ *
+ * Poll the R/B GPIO pin until it becomes ready. If that does not happen
+ * whitin the specified timeout, -ETIMEDOUT is returned.
+ *
+ * This helper is intended to be used when the controller has access to the
+ * NAND R/B pin over GPIO.
+ *
+ * Return 0 if the R/B pin indicates chip is ready, a negative error otherwise.
+ */
+int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
+ unsigned long timeout_ms)
+{
+
+ /*
+ * Wait until R/B pin indicates chip is ready or timeout occurs.
+ * +1 below is necessary because if we are now in the last fraction
+ * of jiffy and msecs_to_jiffies is 1 then we will wait only that
+ * small jiffy fraction - possibly leading to false timeout.
+ */
+ timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
+ do {
+ if (gpiod_get_value_cansleep(gpiod))
+ return 0;
+
+ cond_resched();
+ } while (time_before(jiffies, timeout_ms));
+
+ return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT;
+};
+EXPORT_SYMBOL_GPL(nand_gpio_waitrdy);
+
+/**
+ * panic_nand_wait - [GENERIC] wait until the command is done
+ * @chip: NAND chip structure
+ * @timeo: timeout
+ *
+ * Wait for command done. This is a helper function for nand_wait used when
+ * we are in interrupt context. May happen when in panic and trying to write
+ * an oops through mtdoops.
+ */
+void panic_nand_wait(struct nand_chip *chip, unsigned long timeo)
+{
+ int i;
+ for (i = 0; i < timeo; i++) {
+ if (chip->legacy.dev_ready) {
+ if (chip->legacy.dev_ready(chip))
+ break;
+ } else {
+ int ret;
+ u8 status;
+
+ ret = nand_read_data_op(chip, &status, sizeof(status),
+ true, false);
+ if (ret)
+ return;
+
+ if (status & NAND_STATUS_READY)
+ break;
+ }
+ mdelay(1);
+ }
+}
+
+static bool nand_supports_get_features(struct nand_chip *chip, int addr)
+{
+ return (chip->parameters.supports_set_get_features &&
+ test_bit(addr, chip->parameters.get_feature_list));
+}
+
+static bool nand_supports_set_features(struct nand_chip *chip, int addr)
+{
+ return (chip->parameters.supports_set_get_features &&
+ test_bit(addr, chip->parameters.set_feature_list));
+}
+
+/**
+ * nand_reset_interface - Reset data interface and timings
+ * @chip: The NAND chip
+ * @chipnr: Internal die id
+ *
+ * Reset the Data interface and timings to ONFI mode 0.
+ *
+ * Returns 0 for success or negative error code otherwise.
+ */
+static int nand_reset_interface(struct nand_chip *chip, int chipnr)
+{
+ const struct nand_controller_ops *ops = chip->controller->ops;
+ int ret;
+
+ if (!nand_controller_can_setup_interface(chip))
+ return 0;
+
+ /*
+ * The ONFI specification says:
+ * "
+ * To transition from NV-DDR or NV-DDR2 to the SDR data
+ * interface, the host shall use the Reset (FFh) command
+ * using SDR timing mode 0. A device in any timing mode is
+ * required to recognize Reset (FFh) command issued in SDR
+ * timing mode 0.
+ * "
+ *
+ * Configure the data interface in SDR mode and set the
+ * timings to timing mode 0.
+ */
+
+ chip->current_interface_config = nand_get_reset_interface_config();
+ ret = ops->setup_interface(chip, chipnr,
+ chip->current_interface_config);
+ if (ret)
+ pr_err("Failed to configure data interface to SDR timing mode 0\n");
+
+ return ret;
+}
+
+/**
+ * nand_setup_interface - Setup the best data interface and timings
+ * @chip: The NAND chip
+ * @chipnr: Internal die id
+ *
+ * Configure what has been reported to be the best data interface and NAND
+ * timings supported by the chip and the driver.
+ *
+ * Returns 0 for success or negative error code otherwise.
+ */
+static int nand_setup_interface(struct nand_chip *chip, int chipnr)
+{
+ const struct nand_controller_ops *ops = chip->controller->ops;
+ u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { }, request;
+ int ret;
+
+ if (!nand_controller_can_setup_interface(chip))
+ return 0;
+
+ /*
+ * A nand_reset_interface() put both the NAND chip and the NAND
+ * controller in timings mode 0. If the default mode for this chip is
+ * also 0, no need to proceed to the change again. Plus, at probe time,
+ * nand_setup_interface() uses ->set/get_features() which would
+ * fail anyway as the parameter page is not available yet.
+ */
+ if (!chip->best_interface_config)
+ return 0;
+
+ request = chip->best_interface_config->timings.mode;
+ if (nand_interface_is_sdr(chip->best_interface_config))
+ request |= ONFI_DATA_INTERFACE_SDR;
+ else
+ request |= ONFI_DATA_INTERFACE_NVDDR;
+ tmode_param[0] = request;
+
+ /* Change the mode on the chip side (if supported by the NAND chip) */
+ if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
+ nand_select_target(chip, chipnr);
+ ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
+ tmode_param);
+ nand_deselect_target(chip);
+ if (ret)
+ return ret;
+ }
+
+ /* Change the mode on the controller side */
+ ret = ops->setup_interface(chip, chipnr, chip->best_interface_config);
+ if (ret)
+ return ret;
+
+ /* Check the mode has been accepted by the chip, if supported */
+ if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
+ goto update_interface_config;
+
+ memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
+ nand_select_target(chip, chipnr);
+ ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
+ tmode_param);
+ nand_deselect_target(chip);
+ if (ret)
+ goto err_reset_chip;
+
+ if (request != tmode_param[0]) {
+ pr_warn("%s timing mode %d not acknowledged by the NAND chip\n",
+ nand_interface_is_nvddr(chip->best_interface_config) ? "NV-DDR" : "SDR",
+ chip->best_interface_config->timings.mode);
+ pr_debug("NAND chip would work in %s timing mode %d\n",
+ tmode_param[0] & ONFI_DATA_INTERFACE_NVDDR ? "NV-DDR" : "SDR",
+ (unsigned int)ONFI_TIMING_MODE_PARAM(tmode_param[0]));
+ goto err_reset_chip;
+ }
+
+update_interface_config:
+ chip->current_interface_config = chip->best_interface_config;
+
+ return 0;
+
+err_reset_chip:
+ /*
+ * Fallback to mode 0 if the chip explicitly did not ack the chosen
+ * timing mode.
+ */
+ nand_reset_interface(chip, chipnr);
+ nand_select_target(chip, chipnr);
+ nand_reset_op(chip);
+ nand_deselect_target(chip);
+
+ return ret;
+}
+
+/**
+ * nand_choose_best_sdr_timings - Pick up the best SDR timings that both the
+ * NAND controller and the NAND chip support
+ * @chip: the NAND chip
+ * @iface: the interface configuration (can eventually be updated)
+ * @spec_timings: specific timings, when not fitting the ONFI specification
+ *
+ * If specific timings are provided, use them. Otherwise, retrieve supported
+ * timing modes from ONFI information.
+ */
+int nand_choose_best_sdr_timings(struct nand_chip *chip,
+ struct nand_interface_config *iface,
+ struct nand_sdr_timings *spec_timings)
+{
+ const struct nand_controller_ops *ops = chip->controller->ops;
+ int best_mode = 0, mode, ret = -EOPNOTSUPP;
+
+ iface->type = NAND_SDR_IFACE;
+
+ if (spec_timings) {
+ iface->timings.sdr = *spec_timings;
+ iface->timings.mode = onfi_find_closest_sdr_mode(spec_timings);
+
+ /* Verify the controller supports the requested interface */
+ ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
+ iface);
+ if (!ret) {
+ chip->best_interface_config = iface;
+ return ret;
+ }
+
+ /* Fallback to slower modes */
+ best_mode = iface->timings.mode;
+ } else if (chip->parameters.onfi) {
+ best_mode = fls(chip->parameters.onfi->sdr_timing_modes) - 1;
+ }
+
+ for (mode = best_mode; mode >= 0; mode--) {
+ onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, mode);
+
+ ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
+ iface);
+ if (!ret) {
+ chip->best_interface_config = iface;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * nand_choose_best_nvddr_timings - Pick up the best NVDDR timings that both the
+ * NAND controller and the NAND chip support
+ * @chip: the NAND chip
+ * @iface: the interface configuration (can eventually be updated)
+ * @spec_timings: specific timings, when not fitting the ONFI specification
+ *
+ * If specific timings are provided, use them. Otherwise, retrieve supported
+ * timing modes from ONFI information.
+ */
+int nand_choose_best_nvddr_timings(struct nand_chip *chip,
+ struct nand_interface_config *iface,
+ struct nand_nvddr_timings *spec_timings)
+{
+ const struct nand_controller_ops *ops = chip->controller->ops;
+ int best_mode = 0, mode, ret = -EOPNOTSUPP;
+
+ iface->type = NAND_NVDDR_IFACE;
+
+ if (spec_timings) {
+ iface->timings.nvddr = *spec_timings;
+ iface->timings.mode = onfi_find_closest_nvddr_mode(spec_timings);
+
+ /* Verify the controller supports the requested interface */
+ ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
+ iface);
+ if (!ret) {
+ chip->best_interface_config = iface;
+ return ret;
+ }
+
+ /* Fallback to slower modes */
+ best_mode = iface->timings.mode;
+ } else if (chip->parameters.onfi) {
+ best_mode = fls(chip->parameters.onfi->nvddr_timing_modes) - 1;
+ }
+
+ for (mode = best_mode; mode >= 0; mode--) {
+ onfi_fill_interface_config(chip, iface, NAND_NVDDR_IFACE, mode);
+
+ ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
+ iface);
+ if (!ret) {
+ chip->best_interface_config = iface;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * nand_choose_best_timings - Pick up the best NVDDR or SDR timings that both
+ * NAND controller and the NAND chip support
+ * @chip: the NAND chip
+ * @iface: the interface configuration (can eventually be updated)
+ *
+ * If specific timings are provided, use them. Otherwise, retrieve supported
+ * timing modes from ONFI information.
+ */
+static int nand_choose_best_timings(struct nand_chip *chip,
+ struct nand_interface_config *iface)
+{
+ int ret;
+
+ /* Try the fastest timings: NV-DDR */
+ ret = nand_choose_best_nvddr_timings(chip, iface, NULL);
+ if (!ret)
+ return 0;
+
+ /* Fallback to SDR timings otherwise */
+ return nand_choose_best_sdr_timings(chip, iface, NULL);
+}
+
+/**
+ * nand_choose_interface_config - find the best data interface and timings
+ * @chip: The NAND chip
+ *
+ * Find the best data interface and NAND timings supported by the chip
+ * and the driver. Eventually let the NAND manufacturer driver propose his own
+ * set of timings.
+ *
+ * After this function nand_chip->interface_config is initialized with the best
+ * timing mode available.
+ *
+ * Returns 0 for success or negative error code otherwise.
+ */
+static int nand_choose_interface_config(struct nand_chip *chip)
+{
+ struct nand_interface_config *iface;
+ int ret;
+
+ if (!nand_controller_can_setup_interface(chip))
+ return 0;
+
+ iface = kzalloc(sizeof(*iface), GFP_KERNEL);
+ if (!iface)
+ return -ENOMEM;
+
+ if (chip->ops.choose_interface_config)
+ ret = chip->ops.choose_interface_config(chip, iface);
+ else
+ ret = nand_choose_best_timings(chip, iface);
+
+ if (ret)
+ kfree(iface);
+
+ return ret;
+}
+
+/**
+ * nand_fill_column_cycles - fill the column cycles of an address
+ * @chip: The NAND chip
+ * @addrs: Array of address cycles to fill
+ * @offset_in_page: The offset in the page
+ *
+ * Fills the first or the first two bytes of the @addrs field depending
+ * on the NAND bus width and the page size.
+ *
+ * Returns the number of cycles needed to encode the column, or a negative
+ * error code in case one of the arguments is invalid.
+ */
+static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
+ unsigned int offset_in_page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /* Make sure the offset is less than the actual page size. */
+ if (offset_in_page > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ /*
+ * On small page NANDs, there's a dedicated command to access the OOB
+ * area, and the column address is relative to the start of the OOB
+ * area, not the start of the page. Asjust the address accordingly.
+ */
+ if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
+ offset_in_page -= mtd->writesize;
+
+ /*
+ * The offset in page is expressed in bytes, if the NAND bus is 16-bit
+ * wide, then it must be divided by 2.
+ */
+ if (chip->options & NAND_BUSWIDTH_16) {
+ if (WARN_ON(offset_in_page % 2))
+ return -EINVAL;
+
+ offset_in_page /= 2;
+ }
+
+ addrs[0] = offset_in_page;
+
+ /*
+ * Small page NANDs use 1 cycle for the columns, while large page NANDs
+ * need 2
+ */
+ if (mtd->writesize <= 512)
+ return 1;
+
+ addrs[1] = offset_in_page >> 8;
+
+ return 2;
+}
+
+static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len)
+{
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 addrs[4];
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ NAND_OP_ADDR(3, addrs, NAND_COMMON_TIMING_NS(conf, tWB_max)),
+ NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
+ NAND_COMMON_TIMING_NS(conf, tRR_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ int ret;
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ if (offset_in_page >= mtd->writesize)
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
+ else if (offset_in_page >= 256 &&
+ !(chip->options & NAND_BUSWIDTH_16))
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ addrs[1] = page;
+ addrs[2] = page >> 8;
+
+ if (chip->options & NAND_ROW_ADDR_3) {
+ addrs[3] = page >> 16;
+ instrs[1].ctx.addr.naddrs++;
+ }
+
+ return nand_exec_op(chip, &op);
+}
+
+static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len)
+{
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
+ u8 addrs[5];
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ NAND_OP_ADDR(4, addrs, 0),
+ NAND_OP_CMD(NAND_CMD_READSTART, NAND_COMMON_TIMING_NS(conf, tWB_max)),
+ NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
+ NAND_COMMON_TIMING_NS(conf, tRR_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ int ret;
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ addrs[2] = page;
+ addrs[3] = page >> 8;
+
+ if (chip->options & NAND_ROW_ADDR_3) {
+ addrs[4] = page >> 16;
+ instrs[1].ctx.addr.naddrs++;
+ }
+
+ return nand_exec_op(chip, &op);
+}
+
+static void rawnand_cap_cont_reads(struct nand_chip *chip)
+{
+ struct nand_memory_organization *memorg;
+ unsigned int pages_per_lun, first_lun, last_lun;
+
+ memorg = nanddev_get_memorg(&chip->base);
+ pages_per_lun = memorg->pages_per_eraseblock * memorg->eraseblocks_per_lun;
+ first_lun = chip->cont_read.first_page / pages_per_lun;
+ last_lun = chip->cont_read.last_page / pages_per_lun;
+
+ /* Prevent sequential cache reads across LUN boundaries */
+ if (first_lun != last_lun)
+ chip->cont_read.pause_page = first_lun * pages_per_lun + pages_per_lun - 1;
+ else
+ chip->cont_read.pause_page = chip->cont_read.last_page;
+}
+
+static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len, bool check_only)
+{
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
+ u8 addrs[5];
+ struct nand_op_instr start_instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ NAND_OP_ADDR(4, addrs, 0),
+ NAND_OP_CMD(NAND_CMD_READSTART, NAND_COMMON_TIMING_NS(conf, tWB_max)),
+ NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), 0),
+ NAND_OP_CMD(NAND_CMD_READCACHESEQ, NAND_COMMON_TIMING_NS(conf, tWB_max)),
+ NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
+ NAND_COMMON_TIMING_NS(conf, tRR_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_op_instr cont_instrs[] = {
+ NAND_OP_CMD(page == chip->cont_read.pause_page ?
+ NAND_CMD_READCACHEEND : NAND_CMD_READCACHESEQ,
+ NAND_COMMON_TIMING_NS(conf, tWB_max)),
+ NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
+ NAND_COMMON_TIMING_NS(conf, tRR_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation start_op = NAND_OPERATION(chip->cur_cs, start_instrs);
+ struct nand_operation cont_op = NAND_OPERATION(chip->cur_cs, cont_instrs);
+ int ret;
+
+ if (!len) {
+ start_op.ninstrs--;
+ cont_op.ninstrs--;
+ }
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ addrs[2] = page;
+ addrs[3] = page >> 8;
+
+ if (chip->options & NAND_ROW_ADDR_3) {
+ addrs[4] = page >> 16;
+ start_instrs[1].ctx.addr.naddrs++;
+ }
+
+ /* Check if cache reads are supported */
+ if (check_only) {
+ if (nand_check_op(chip, &start_op) || nand_check_op(chip, &cont_op))
+ return -EOPNOTSUPP;
+
+ return 0;
+ }
+
+ if (page == chip->cont_read.first_page)
+ ret = nand_exec_op(chip, &start_op);
+ else
+ ret = nand_exec_op(chip, &cont_op);
+ if (ret)
+ return ret;
+
+ if (!chip->cont_read.ongoing)
+ return 0;
+
+ if (page == chip->cont_read.pause_page &&
+ page != chip->cont_read.last_page) {
+ chip->cont_read.first_page = chip->cont_read.pause_page + 1;
+ rawnand_cap_cont_reads(chip);
+ } else if (page == chip->cont_read.last_page) {
+ chip->cont_read.ongoing = false;
+ }
+
+ return 0;
+}
+
+static bool rawnand_cont_read_ongoing(struct nand_chip *chip, unsigned int page)
+{
+ return chip->cont_read.ongoing && page >= chip->cont_read.first_page;
+}
+
+/**
+ * nand_read_page_op - Do a READ PAGE operation
+ * @chip: The NAND chip
+ * @page: page to read
+ * @offset_in_page: offset within the page
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ *
+ * This function issues a READ PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf, unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ if (nand_has_exec_op(chip)) {
+ if (mtd->writesize > 512) {
+ if (rawnand_cont_read_ongoing(chip, page))
+ return nand_lp_exec_cont_read_page_op(chip, page,
+ offset_in_page,
+ buf, len, false);
+ else
+ return nand_lp_exec_read_page_op(chip, page,
+ offset_in_page, buf,
+ len);
+ }
+
+ return nand_sp_exec_read_page_op(chip, page, offset_in_page,
+ buf, len);
+ }
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page);
+ if (len)
+ chip->legacy.read_buf(chip, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_read_page_op);
+
+/**
+ * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
+ * @chip: The NAND chip
+ * @page: parameter page to read
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ *
+ * This function issues a READ PARAMETER PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
+ unsigned int len)
+{
+ unsigned int i;
+ u8 *p = buf;
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_PARAM, 0),
+ NAND_OP_ADDR(1, &page,
+ NAND_COMMON_TIMING_NS(conf, tWB_max)),
+ NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
+ NAND_COMMON_TIMING_NS(conf, tRR_min)),
+ NAND_OP_8BIT_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1);
+ for (i = 0; i < len; i++)
+ p[i] = chip->legacy.read_byte(chip);
+
+ return 0;
+}
+
+/**
+ * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
+ * @chip: The NAND chip
+ * @offset_in_page: offset within the page
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function issues a CHANGE READ COLUMN operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_change_read_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ /* Small page NANDs do not support column change. */
+ if (mtd->writesize <= 512)
+ return -ENOTSUPP;
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
+ u8 addrs[2] = {};
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
+ NAND_OP_ADDR(2, addrs, 0),
+ NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
+ NAND_COMMON_TIMING_NS(conf, tCCS_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ int ret;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ instrs[3].ctx.data.force_8bit = force_8bit;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1);
+ if (len)
+ chip->legacy.read_buf(chip, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_change_read_column_op);
+
+/**
+ * nand_read_oob_op - Do a READ OOB operation
+ * @chip: The NAND chip
+ * @page: page to read
+ * @offset_in_oob: offset within the OOB area
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ *
+ * This function issues a READ OOB operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_oob, void *buf, unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_oob + len > mtd->oobsize)
+ return -EINVAL;
+
+ if (nand_has_exec_op(chip))
+ return nand_read_page_op(chip, page,
+ mtd->writesize + offset_in_oob,
+ buf, len);
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page);
+ if (len)
+ chip->legacy.read_buf(chip, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_read_oob_op);
+
+static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len, bool prog)
+{
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 addrs[5] = {};
+ struct nand_op_instr instrs[] = {
+ /*
+ * The first instruction will be dropped if we're dealing
+ * with a large page NAND and adjusted if we're dealing
+ * with a small page NAND and the page offset is > 255.
+ */
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ NAND_OP_CMD(NAND_CMD_SEQIN, 0),
+ NAND_OP_ADDR(0, addrs, NAND_COMMON_TIMING_NS(conf, tADL_min)),
+ NAND_OP_DATA_OUT(len, buf, 0),
+ NAND_OP_CMD(NAND_CMD_PAGEPROG,
+ NAND_COMMON_TIMING_NS(conf, tWB_max)),
+ NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
+
+ if (naddrs < 0)
+ return naddrs;
+
+ addrs[naddrs++] = page;
+ addrs[naddrs++] = page >> 8;
+ if (chip->options & NAND_ROW_ADDR_3)
+ addrs[naddrs++] = page >> 16;
+
+ instrs[2].ctx.addr.naddrs = naddrs;
+
+ /* Drop the last two instructions if we're not programming the page. */
+ if (!prog) {
+ op.ninstrs -= 2;
+ /* Also drop the DATA_OUT instruction if empty. */
+ if (!len)
+ op.ninstrs--;
+ }
+
+ if (mtd->writesize <= 512) {
+ /*
+ * Small pages need some more tweaking: we have to adjust the
+ * first instruction depending on the page offset we're trying
+ * to access.
+ */
+ if (offset_in_page >= mtd->writesize)
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
+ else if (offset_in_page >= 256 &&
+ !(chip->options & NAND_BUSWIDTH_16))
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
+ } else {
+ /*
+ * Drop the first command if we're dealing with a large page
+ * NAND.
+ */
+ op.instrs++;
+ op.ninstrs--;
+ }
+
+ return nand_exec_op(chip, &op);
+}
+
+/**
+ * nand_prog_page_begin_op - starts a PROG PAGE operation
+ * @chip: The NAND chip
+ * @page: page to write
+ * @offset_in_page: offset within the page
+ * @buf: buffer containing the data to write to the page
+ * @len: length of the buffer
+ *
+ * This function issues the first half of a PROG PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ if (nand_has_exec_op(chip))
+ return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
+ len, false);
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page);
+
+ if (buf)
+ chip->legacy.write_buf(chip, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
+
+/**
+ * nand_prog_page_end_op - ends a PROG PAGE operation
+ * @chip: The NAND chip
+ *
+ * This function issues the second half of a PROG PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_prog_page_end_op(struct nand_chip *chip)
+{
+ int ret;
+ u8 status;
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_PAGEPROG,
+ NAND_COMMON_TIMING_NS(conf, tWB_max)),
+ NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max),
+ 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ ret = nand_exec_op(chip, &op);
+ if (ret)
+ return ret;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+ } else {
+ chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
+ ret = chip->legacy.waitfunc(chip);
+ if (ret < 0)
+ return ret;
+
+ status = ret;
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
+
+/**
+ * nand_prog_page_op - Do a full PROG PAGE operation
+ * @chip: The NAND chip
+ * @page: page to write
+ * @offset_in_page: offset within the page
+ * @buf: buffer containing the data to write to the page
+ * @len: length of the buffer
+ *
+ * This function issues a full PROG PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 status;
+ int ret;
+
+ if (!len || !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ if (nand_has_exec_op(chip)) {
+ ret = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
+ len, true);
+ if (ret)
+ return ret;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+ } else {
+ chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page,
+ page);
+ chip->legacy.write_buf(chip, buf, len);
+ chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
+ ret = chip->legacy.waitfunc(chip);
+ if (ret < 0)
+ return ret;
+
+ status = ret;
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_prog_page_op);
+
+/**
+ * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
+ * @chip: The NAND chip
+ * @offset_in_page: offset within the page
+ * @buf: buffer containing the data to send to the NAND
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function issues a CHANGE WRITE COLUMN operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_change_write_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page,
+ const void *buf, unsigned int len,
+ bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ /* Small page NANDs do not support column change. */
+ if (mtd->writesize <= 512)
+ return -ENOTSUPP;
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
+ u8 addrs[2];
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_RNDIN, 0),
+ NAND_OP_ADDR(2, addrs, NAND_COMMON_TIMING_NS(conf, tCCS_min)),
+ NAND_OP_DATA_OUT(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ int ret;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ instrs[2].ctx.data.force_8bit = force_8bit;
+
+ /* Drop the DATA_OUT instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1);
+ if (len)
+ chip->legacy.write_buf(chip, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_change_write_column_op);
+
+/**
+ * nand_readid_op - Do a READID operation
+ * @chip: The NAND chip
+ * @addr: address cycle to pass after the READID command
+ * @buf: buffer used to store the ID
+ * @len: length of the buffer
+ *
+ * This function sends a READID command and reads back the ID returned by the
+ * NAND.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
+ unsigned int len)
+{
+ unsigned int i;
+ u8 *id = buf, *ddrbuf = NULL;
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READID, 0),
+ NAND_OP_ADDR(1, &addr,
+ NAND_COMMON_TIMING_NS(conf, tADL_min)),
+ NAND_OP_8BIT_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ int ret;
+
+ /* READ_ID data bytes are received twice in NV-DDR mode */
+ if (len && nand_interface_is_nvddr(conf)) {
+ ddrbuf = kzalloc(len * 2, GFP_KERNEL);
+ if (!ddrbuf)
+ return -ENOMEM;
+
+ instrs[2].ctx.data.len *= 2;
+ instrs[2].ctx.data.buf.in = ddrbuf;
+ }
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ ret = nand_exec_op(chip, &op);
+ if (!ret && len && nand_interface_is_nvddr(conf)) {
+ for (i = 0; i < len; i++)
+ id[i] = ddrbuf[i * 2];
+ }
+
+ kfree(ddrbuf);
+
+ return ret;
+ }
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1);
+
+ for (i = 0; i < len; i++)
+ id[i] = chip->legacy.read_byte(chip);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_readid_op);
+
+/**
+ * nand_status_op - Do a STATUS operation
+ * @chip: The NAND chip
+ * @status: out variable to store the NAND status
+ *
+ * This function sends a STATUS command and reads back the status returned by
+ * the NAND.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_status_op(struct nand_chip *chip, u8 *status)
+{
+ if (nand_has_exec_op(chip)) {
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
+ u8 ddrstatus[2];
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_STATUS,
+ NAND_COMMON_TIMING_NS(conf, tADL_min)),
+ NAND_OP_8BIT_DATA_IN(1, status, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ int ret;
+
+ /* The status data byte will be received twice in NV-DDR mode */
+ if (status && nand_interface_is_nvddr(conf)) {
+ instrs[1].ctx.data.len *= 2;
+ instrs[1].ctx.data.buf.in = ddrstatus;
+ }
+
+ if (!status)
+ op.ninstrs--;
+
+ ret = nand_exec_op(chip, &op);
+ if (!ret && status && nand_interface_is_nvddr(conf))
+ *status = ddrstatus[0];
+
+ return ret;
+ }
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
+ if (status)
+ *status = chip->legacy.read_byte(chip);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_status_op);
+
+/**
+ * nand_exit_status_op - Exit a STATUS operation
+ * @chip: The NAND chip
+ *
+ * This function sends a READ0 command to cancel the effect of the STATUS
+ * command to avoid reading only the status until a new read command is sent.
+ *
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_exit_status_op(struct nand_chip *chip)
+{
+ if (nand_has_exec_op(chip)) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_exit_status_op);
+
+/**
+ * nand_erase_op - Do an erase operation
+ * @chip: The NAND chip
+ * @eraseblock: block to erase
+ *
+ * This function sends an ERASE command and waits for the NAND to be ready
+ * before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
+{
+ unsigned int page = eraseblock <<
+ (chip->phys_erase_shift - chip->page_shift);
+ int ret;
+ u8 status;
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
+ u8 addrs[3] = { page, page >> 8, page >> 16 };
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_ERASE1, 0),
+ NAND_OP_ADDR(2, addrs, 0),
+ NAND_OP_CMD(NAND_CMD_ERASE2,
+ NAND_COMMON_TIMING_NS(conf, tWB_max)),
+ NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max),
+ 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ if (chip->options & NAND_ROW_ADDR_3)
+ instrs[1].ctx.addr.naddrs++;
+
+ ret = nand_exec_op(chip, &op);
+ if (ret)
+ return ret;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+ } else {
+ chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
+ chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
+
+ ret = chip->legacy.waitfunc(chip);
+ if (ret < 0)
+ return ret;
+
+ status = ret;
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_erase_op);
+
+/**
+ * nand_set_features_op - Do a SET FEATURES operation
+ * @chip: The NAND chip
+ * @feature: feature id
+ * @data: 4 bytes of data
+ *
+ * This function sends a SET FEATURES command and waits for the NAND to be
+ * ready before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int nand_set_features_op(struct nand_chip *chip, u8 feature,
+ const void *data)
+{
+ const u8 *params = data;
+ int i, ret;
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
+ NAND_OP_ADDR(1, &feature, NAND_COMMON_TIMING_NS(conf,
+ tADL_min)),
+ NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
+ NAND_COMMON_TIMING_NS(conf,
+ tWB_max)),
+ NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max),
+ 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1);
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ chip->legacy.write_byte(chip, params[i]);
+
+ ret = chip->legacy.waitfunc(chip);
+ if (ret < 0)
+ return ret;
+
+ if (ret & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * nand_get_features_op - Do a GET FEATURES operation
+ * @chip: The NAND chip
+ * @feature: feature id
+ * @data: 4 bytes of data
+ *
+ * This function sends a GET FEATURES command and waits for the NAND to be
+ * ready before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int nand_get_features_op(struct nand_chip *chip, u8 feature,
+ void *data)
+{
+ u8 *params = data, ddrbuf[ONFI_SUBFEATURE_PARAM_LEN * 2];
+ int i;
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
+ NAND_OP_ADDR(1, &feature,
+ NAND_COMMON_TIMING_NS(conf, tWB_max)),
+ NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max),
+ NAND_COMMON_TIMING_NS(conf, tRR_min)),
+ NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
+ data, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ int ret;
+
+ /* GET_FEATURE data bytes are received twice in NV-DDR mode */
+ if (nand_interface_is_nvddr(conf)) {
+ instrs[3].ctx.data.len *= 2;
+ instrs[3].ctx.data.buf.in = ddrbuf;
+ }
+
+ ret = nand_exec_op(chip, &op);
+ if (nand_interface_is_nvddr(conf)) {
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; i++)
+ params[i] = ddrbuf[i * 2];
+ }
+
+ return ret;
+ }
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1);
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ params[i] = chip->legacy.read_byte(chip);
+
+ return 0;
+}
+
+static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
+ unsigned int delay_ns)
+{
+ if (nand_has_exec_op(chip)) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
+ PSEC_TO_NSEC(delay_ns)),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ /* Apply delay or wait for ready/busy pin */
+ if (!chip->legacy.dev_ready)
+ udelay(chip->legacy.chip_delay);
+ else
+ nand_wait_ready(chip);
+
+ return 0;
+}
+
+/**
+ * nand_reset_op - Do a reset operation
+ * @chip: The NAND chip
+ *
+ * This function sends a RESET command and waits for the NAND to be ready
+ * before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_reset_op(struct nand_chip *chip)
+{
+ if (nand_has_exec_op(chip)) {
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_RESET,
+ NAND_COMMON_TIMING_NS(conf, tWB_max)),
+ NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tRST_max),
+ 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_reset_op);
+
+/**
+ * nand_read_data_op - Read data from the NAND
+ * @chip: The NAND chip
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ * @check_only: do not actually run the command, only checks if the
+ * controller driver supports it
+ *
+ * This function does a raw data read on the bus. Usually used after launching
+ * another NAND operation like nand_read_page_op().
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
+ bool force_8bit, bool check_only)
+{
+ if (!len || !buf)
+ return -EINVAL;
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ u8 *ddrbuf = NULL;
+ int ret, i;
+
+ instrs[0].ctx.data.force_8bit = force_8bit;
+
+ /*
+ * Parameter payloads (ID, status, features, etc) do not go
+ * through the same pipeline as regular data, hence the
+ * force_8bit flag must be set and this also indicates that in
+ * case NV-DDR timings are being used the data will be received
+ * twice.
+ */
+ if (force_8bit && nand_interface_is_nvddr(conf)) {
+ ddrbuf = kzalloc(len * 2, GFP_KERNEL);
+ if (!ddrbuf)
+ return -ENOMEM;
+
+ instrs[0].ctx.data.len *= 2;
+ instrs[0].ctx.data.buf.in = ddrbuf;
+ }
+
+ if (check_only) {
+ ret = nand_check_op(chip, &op);
+ kfree(ddrbuf);
+ return ret;
+ }
+
+ ret = nand_exec_op(chip, &op);
+ if (!ret && force_8bit && nand_interface_is_nvddr(conf)) {
+ u8 *dst = buf;
+
+ for (i = 0; i < len; i++)
+ dst[i] = ddrbuf[i * 2];
+ }
+
+ kfree(ddrbuf);
+
+ return ret;
+ }
+
+ if (check_only)
+ return 0;
+
+ if (force_8bit) {
+ u8 *p = buf;
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ p[i] = chip->legacy.read_byte(chip);
+ } else {
+ chip->legacy.read_buf(chip, buf, len);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_read_data_op);
+
+/**
+ * nand_write_data_op - Write data from the NAND
+ * @chip: The NAND chip
+ * @buf: buffer containing the data to send on the bus
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function does a raw data write on the bus. Usually used after launching
+ * another NAND operation like nand_write_page_begin_op().
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_write_data_op(struct nand_chip *chip, const void *buf,
+ unsigned int len, bool force_8bit)
+{
+ if (!len || !buf)
+ return -EINVAL;
+
+ if (nand_has_exec_op(chip)) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_DATA_OUT(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ instrs[0].ctx.data.force_8bit = force_8bit;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ if (force_8bit) {
+ const u8 *p = buf;
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ chip->legacy.write_byte(chip, p[i]);
+ } else {
+ chip->legacy.write_buf(chip, buf, len);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_write_data_op);
+
+/**
+ * struct nand_op_parser_ctx - Context used by the parser
+ * @instrs: array of all the instructions that must be addressed
+ * @ninstrs: length of the @instrs array
+ * @subop: Sub-operation to be passed to the NAND controller
+ *
+ * This structure is used by the core to split NAND operations into
+ * sub-operations that can be handled by the NAND controller.
+ */
+struct nand_op_parser_ctx {
+ const struct nand_op_instr *instrs;
+ unsigned int ninstrs;
+ struct nand_subop subop;
+};
+
+/**
+ * nand_op_parser_must_split_instr - Checks if an instruction must be split
+ * @pat: the parser pattern element that matches @instr
+ * @instr: pointer to the instruction to check
+ * @start_offset: this is an in/out parameter. If @instr has already been
+ * split, then @start_offset is the offset from which to start
+ * (either an address cycle or an offset in the data buffer).
+ * Conversely, if the function returns true (ie. instr must be
+ * split), this parameter is updated to point to the first
+ * data/address cycle that has not been taken care of.
+ *
+ * Some NAND controllers are limited and cannot send X address cycles with a
+ * unique operation, or cannot read/write more than Y bytes at the same time.
+ * In this case, split the instruction that does not fit in a single
+ * controller-operation into two or more chunks.
+ *
+ * Returns true if the instruction must be split, false otherwise.
+ * The @start_offset parameter is also updated to the offset at which the next
+ * bundle of instruction must start (if an address or a data instruction).
+ */
+static bool
+nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
+ const struct nand_op_instr *instr,
+ unsigned int *start_offset)
+{
+ switch (pat->type) {
+ case NAND_OP_ADDR_INSTR:
+ if (!pat->ctx.addr.maxcycles)
+ break;
+
+ if (instr->ctx.addr.naddrs - *start_offset >
+ pat->ctx.addr.maxcycles) {
+ *start_offset += pat->ctx.addr.maxcycles;
+ return true;
+ }
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_DATA_OUT_INSTR:
+ if (!pat->ctx.data.maxlen)
+ break;
+
+ if (instr->ctx.data.len - *start_offset >
+ pat->ctx.data.maxlen) {
+ *start_offset += pat->ctx.data.maxlen;
+ return true;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/**
+ * nand_op_parser_match_pat - Checks if a pattern matches the instructions
+ * remaining in the parser context
+ * @pat: the pattern to test
+ * @ctx: the parser context structure to match with the pattern @pat
+ *
+ * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
+ * Returns true if this is the case, false ortherwise. When true is returned,
+ * @ctx->subop is updated with the set of instructions to be passed to the
+ * controller driver.
+ */
+static bool
+nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
+ struct nand_op_parser_ctx *ctx)
+{
+ unsigned int instr_offset = ctx->subop.first_instr_start_off;
+ const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
+ const struct nand_op_instr *instr = ctx->subop.instrs;
+ unsigned int i, ninstrs;
+
+ for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
+ /*
+ * The pattern instruction does not match the operation
+ * instruction. If the instruction is marked optional in the
+ * pattern definition, we skip the pattern element and continue
+ * to the next one. If the element is mandatory, there's no
+ * match and we can return false directly.
+ */
+ if (instr->type != pat->elems[i].type) {
+ if (!pat->elems[i].optional)
+ return false;
+
+ continue;
+ }
+
+ /*
+ * Now check the pattern element constraints. If the pattern is
+ * not able to handle the whole instruction in a single step,
+ * we have to split it.
+ * The last_instr_end_off value comes back updated to point to
+ * the position where we have to split the instruction (the
+ * start of the next subop chunk).
+ */
+ if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
+ &instr_offset)) {
+ ninstrs++;
+ i++;
+ break;
+ }
+
+ instr++;
+ ninstrs++;
+ instr_offset = 0;
+ }
+
+ /*
+ * This can happen if all instructions of a pattern are optional.
+ * Still, if there's not at least one instruction handled by this
+ * pattern, this is not a match, and we should try the next one (if
+ * any).
+ */
+ if (!ninstrs)
+ return false;
+
+ /*
+ * We had a match on the pattern head, but the pattern may be longer
+ * than the instructions we're asked to execute. We need to make sure
+ * there's no mandatory elements in the pattern tail.
+ */
+ for (; i < pat->nelems; i++) {
+ if (!pat->elems[i].optional)
+ return false;
+ }
+
+ /*
+ * We have a match: update the subop structure accordingly and return
+ * true.
+ */
+ ctx->subop.ninstrs = ninstrs;
+ ctx->subop.last_instr_end_off = instr_offset;
+
+ return true;
+}
+
+#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
+static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
+{
+ const struct nand_op_instr *instr;
+ char *prefix = " ";
+ unsigned int i;
+
+ pr_debug("executing subop (CS%d):\n", ctx->subop.cs);
+
+ for (i = 0; i < ctx->ninstrs; i++) {
+ instr = &ctx->instrs[i];
+
+ if (instr == &ctx->subop.instrs[0])
+ prefix = " ->";
+
+ nand_op_trace(prefix, instr);
+
+ if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
+ prefix = " ";
+ }
+}
+#else
+static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
+{
+ /* NOP */
+}
+#endif
+
+static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a,
+ const struct nand_op_parser_ctx *b)
+{
+ if (a->subop.ninstrs < b->subop.ninstrs)
+ return -1;
+ else if (a->subop.ninstrs > b->subop.ninstrs)
+ return 1;
+
+ if (a->subop.last_instr_end_off < b->subop.last_instr_end_off)
+ return -1;
+ else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * nand_op_parser_exec_op - exec_op parser
+ * @chip: the NAND chip
+ * @parser: patterns description provided by the controller driver
+ * @op: the NAND operation to address
+ * @check_only: when true, the function only checks if @op can be handled but
+ * does not execute the operation
+ *
+ * Helper function designed to ease integration of NAND controller drivers that
+ * only support a limited set of instruction sequences. The supported sequences
+ * are described in @parser, and the framework takes care of splitting @op into
+ * multiple sub-operations (if required) and pass them back to the ->exec()
+ * callback of the matching pattern if @check_only is set to false.
+ *
+ * NAND controller drivers should call this function from their own ->exec_op()
+ * implementation.
+ *
+ * Returns 0 on success, a negative error code otherwise. A failure can be
+ * caused by an unsupported operation (none of the supported patterns is able
+ * to handle the requested operation), or an error returned by one of the
+ * matching pattern->exec() hook.
+ */
+int nand_op_parser_exec_op(struct nand_chip *chip,
+ const struct nand_op_parser *parser,
+ const struct nand_operation *op, bool check_only)
+{
+ struct nand_op_parser_ctx ctx = {
+ .subop.cs = op->cs,
+ .subop.instrs = op->instrs,
+ .instrs = op->instrs,
+ .ninstrs = op->ninstrs,
+ };
+ unsigned int i;
+
+ while (ctx.subop.instrs < op->instrs + op->ninstrs) {
+ const struct nand_op_parser_pattern *pattern;
+ struct nand_op_parser_ctx best_ctx;
+ int ret, best_pattern = -1;
+
+ for (i = 0; i < parser->npatterns; i++) {
+ struct nand_op_parser_ctx test_ctx = ctx;
+
+ pattern = &parser->patterns[i];
+ if (!nand_op_parser_match_pat(pattern, &test_ctx))
+ continue;
+
+ if (best_pattern >= 0 &&
+ nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0)
+ continue;
+
+ best_pattern = i;
+ best_ctx = test_ctx;
+ }
+
+ if (best_pattern < 0) {
+ pr_debug("->exec_op() parser: pattern not found!\n");
+ return -ENOTSUPP;
+ }
+
+ ctx = best_ctx;
+ nand_op_parser_trace(&ctx);
+
+ if (!check_only) {
+ pattern = &parser->patterns[best_pattern];
+ ret = pattern->exec(chip, &ctx.subop);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Update the context structure by pointing to the start of the
+ * next subop.
+ */
+ ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
+ if (ctx.subop.last_instr_end_off)
+ ctx.subop.instrs -= 1;
+
+ ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
+
+static bool nand_instr_is_data(const struct nand_op_instr *instr)
+{
+ return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
+ instr->type == NAND_OP_DATA_OUT_INSTR);
+}
+
+static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ return subop && instr_idx < subop->ninstrs;
+}
+
+static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ if (instr_idx)
+ return 0;
+
+ return subop->first_instr_start_off;
+}
+
+/**
+ * nand_subop_get_addr_start_off - Get the start offset in an address array
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->addr.addrs field of address instructions. This is wrong as address
+ * instructions might be split.
+ *
+ * Given an address instruction, returns the offset of the first cycle to issue.
+ */
+unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
+ subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
+ return 0;
+
+ return nand_subop_get_start_off(subop, instr_idx);
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
+
+/**
+ * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->addr->naddrs field of a data instruction. This is wrong as instructions
+ * might be split.
+ *
+ * Given an address instruction, returns the number of address cycle to issue.
+ */
+unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ int start_off, end_off;
+
+ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
+ subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
+ return 0;
+
+ start_off = nand_subop_get_addr_start_off(subop, instr_idx);
+
+ if (instr_idx == subop->ninstrs - 1 &&
+ subop->last_instr_end_off)
+ end_off = subop->last_instr_end_off;
+ else
+ end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
+
+ return end_off - start_off;
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
+
+/**
+ * nand_subop_get_data_start_off - Get the start offset in a data array
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->data->buf.{in,out} field of data instructions. This is wrong as data
+ * instructions might be split.
+ *
+ * Given a data instruction, returns the offset to start from.
+ */
+unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
+ !nand_instr_is_data(&subop->instrs[instr_idx])))
+ return 0;
+
+ return nand_subop_get_start_off(subop, instr_idx);
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
+
+/**
+ * nand_subop_get_data_len - Get the number of bytes to retrieve
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->data->len field of a data instruction. This is wrong as data instructions
+ * might be split.
+ *
+ * Returns the length of the chunk of data to send/receive.
+ */
+unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ int start_off = 0, end_off;
+
+ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
+ !nand_instr_is_data(&subop->instrs[instr_idx])))
+ return 0;
+
+ start_off = nand_subop_get_data_start_off(subop, instr_idx);
+
+ if (instr_idx == subop->ninstrs - 1 &&
+ subop->last_instr_end_off)
+ end_off = subop->last_instr_end_off;
+ else
+ end_off = subop->instrs[instr_idx].ctx.data.len;
+
+ return end_off - start_off;
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
+
+/**
+ * nand_reset - Reset and initialize a NAND device
+ * @chip: The NAND chip
+ * @chipnr: Internal die id
+ *
+ * Save the timings data structure, then apply SDR timings mode 0 (see
+ * nand_reset_interface for details), do the reset operation, and apply
+ * back the previous timings.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_reset(struct nand_chip *chip, int chipnr)
+{
+ int ret;
+
+ ret = nand_reset_interface(chip, chipnr);
+ if (ret)
+ return ret;
+
+ /*
+ * The CS line has to be released before we can apply the new NAND
+ * interface settings, hence this weird nand_select_target()
+ * nand_deselect_target() dance.
+ */
+ nand_select_target(chip, chipnr);
+ ret = nand_reset_op(chip);
+ nand_deselect_target(chip);
+ if (ret)
+ return ret;
+
+ ret = nand_setup_interface(chip, chipnr);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_reset);
+
+/**
+ * nand_get_features - wrapper to perform a GET_FEATURE
+ * @chip: NAND chip info structure
+ * @addr: feature address
+ * @subfeature_param: the subfeature parameters, a four bytes array
+ *
+ * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
+ * operation cannot be handled.
+ */
+int nand_get_features(struct nand_chip *chip, int addr,
+ u8 *subfeature_param)
+{
+ if (!nand_supports_get_features(chip, addr))
+ return -ENOTSUPP;
+
+ if (chip->legacy.get_features)
+ return chip->legacy.get_features(chip, addr, subfeature_param);
+
+ return nand_get_features_op(chip, addr, subfeature_param);
+}
+
+/**
+ * nand_set_features - wrapper to perform a SET_FEATURE
+ * @chip: NAND chip info structure
+ * @addr: feature address
+ * @subfeature_param: the subfeature parameters, a four bytes array
+ *
+ * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
+ * operation cannot be handled.
+ */
+int nand_set_features(struct nand_chip *chip, int addr,
+ u8 *subfeature_param)
+{
+ if (!nand_supports_set_features(chip, addr))
+ return -ENOTSUPP;
+
+ if (chip->legacy.set_features)
+ return chip->legacy.set_features(chip, addr, subfeature_param);
+
+ return nand_set_features_op(chip, addr, subfeature_param);
+}
+
+/**
+ * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
+ * @buf: buffer to test
+ * @len: buffer length
+ * @bitflips_threshold: maximum number of bitflips
+ *
+ * Check if a buffer contains only 0xff, which means the underlying region
+ * has been erased and is ready to be programmed.
+ * The bitflips_threshold specify the maximum number of bitflips before
+ * considering the region is not erased.
+ * Note: The logic of this function has been extracted from the memweight
+ * implementation, except that nand_check_erased_buf function exit before
+ * testing the whole buffer if the number of bitflips exceed the
+ * bitflips_threshold value.
+ *
+ * Returns a positive number of bitflips less than or equal to
+ * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
+ * threshold.
+ */
+static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
+{
+ const unsigned char *bitmap = buf;
+ int bitflips = 0;
+ int weight;
+
+ for (; len && ((uintptr_t)bitmap) % sizeof(long);
+ len--, bitmap++) {
+ weight = hweight8(*bitmap);
+ bitflips += BITS_PER_BYTE - weight;
+ if (unlikely(bitflips > bitflips_threshold))
+ return -EBADMSG;
+ }
+
+ for (; len >= sizeof(long);
+ len -= sizeof(long), bitmap += sizeof(long)) {
+ unsigned long d = *((unsigned long *)bitmap);
+ if (d == ~0UL)
+ continue;
+ weight = hweight_long(d);
+ bitflips += BITS_PER_LONG - weight;
+ if (unlikely(bitflips > bitflips_threshold))
+ return -EBADMSG;
+ }
+
+ for (; len > 0; len--, bitmap++) {
+ weight = hweight8(*bitmap);
+ bitflips += BITS_PER_BYTE - weight;
+ if (unlikely(bitflips > bitflips_threshold))
+ return -EBADMSG;
+ }
+
+ return bitflips;
+}
+
+/**
+ * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
+ * 0xff data
+ * @data: data buffer to test
+ * @datalen: data length
+ * @ecc: ECC buffer
+ * @ecclen: ECC length
+ * @extraoob: extra OOB buffer
+ * @extraooblen: extra OOB length
+ * @bitflips_threshold: maximum number of bitflips
+ *
+ * Check if a data buffer and its associated ECC and OOB data contains only
+ * 0xff pattern, which means the underlying region has been erased and is
+ * ready to be programmed.
+ * The bitflips_threshold specify the maximum number of bitflips before
+ * considering the region as not erased.
+ *
+ * Note:
+ * 1/ ECC algorithms are working on pre-defined block sizes which are usually
+ * different from the NAND page size. When fixing bitflips, ECC engines will
+ * report the number of errors per chunk, and the NAND core infrastructure
+ * expect you to return the maximum number of bitflips for the whole page.
+ * This is why you should always use this function on a single chunk and
+ * not on the whole page. After checking each chunk you should update your
+ * max_bitflips value accordingly.
+ * 2/ When checking for bitflips in erased pages you should not only check
+ * the payload data but also their associated ECC data, because a user might
+ * have programmed almost all bits to 1 but a few. In this case, we
+ * shouldn't consider the chunk as erased, and checking ECC bytes prevent
+ * this case.
+ * 3/ The extraoob argument is optional, and should be used if some of your OOB
+ * data are protected by the ECC engine.
+ * It could also be used if you support subpages and want to attach some
+ * extra OOB data to an ECC chunk.
+ *
+ * Returns a positive number of bitflips less than or equal to
+ * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
+ * threshold. In case of success, the passed buffers are filled with 0xff.
+ */
+int nand_check_erased_ecc_chunk(void *data, int datalen,
+ void *ecc, int ecclen,
+ void *extraoob, int extraooblen,
+ int bitflips_threshold)
+{
+ int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
+
+ data_bitflips = nand_check_erased_buf(data, datalen,
+ bitflips_threshold);
+ if (data_bitflips < 0)
+ return data_bitflips;
+
+ bitflips_threshold -= data_bitflips;
+
+ ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
+ if (ecc_bitflips < 0)
+ return ecc_bitflips;
+
+ bitflips_threshold -= ecc_bitflips;
+
+ extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
+ bitflips_threshold);
+ if (extraoob_bitflips < 0)
+ return extraoob_bitflips;
+
+ if (data_bitflips)
+ memset(data, 0xff, datalen);
+
+ if (ecc_bitflips)
+ memset(ecc, 0xff, ecclen);
+
+ if (extraoob_bitflips)
+ memset(extraoob, 0xff, extraooblen);
+
+ return data_bitflips + ecc_bitflips + extraoob_bitflips;
+}
+EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
+
+/**
+ * nand_read_page_raw_notsupp - dummy read raw page function
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Returns -ENOTSUPP unconditionally.
+ */
+int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ return -ENOTSUPP;
+}
+
+/**
+ * nand_read_page_raw - [INTERN] read raw page data without ecc
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Not for syndrome calculating ECC controllers, which use a special oob layout.
+ */
+int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
+ if (ret)
+ return ret;
+
+ if (oob_required) {
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
+ false, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(nand_read_page_raw);
+
+/**
+ * nand_monolithic_read_page_raw - Monolithic page read in raw mode
+ * @chip: NAND chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * This is a raw page read, ie. without any error detection/correction.
+ * Monolithic means we are requesting all the relevant data (main plus
+ * eventually OOB) to be loaded in the NAND cache and sent over the
+ * bus (from the NAND chip to the NAND controller) in a single
+ * operation. This is an alternative to nand_read_page_raw(), which
+ * first reads the main data, and if the OOB data is requested too,
+ * then reads more data on the bus.
+ */
+int nand_monolithic_read_page_raw(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int size = mtd->writesize;
+ u8 *read_buf = buf;
+ int ret;
+
+ if (oob_required) {
+ size += mtd->oobsize;
+
+ if (buf != chip->data_buf)
+ read_buf = nand_get_data_buf(chip);
+ }
+
+ ret = nand_read_page_op(chip, page, 0, read_buf, size);
+ if (ret)
+ return ret;
+
+ if (buf != chip->data_buf)
+ memcpy(buf, read_buf, mtd->writesize);
+
+ return 0;
+}
+EXPORT_SYMBOL(nand_monolithic_read_page_raw);
+
+/**
+ * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * We need a special oob layout and handling even when OOB isn't used.
+ */
+static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ uint8_t *oob = chip->oob_poi;
+ int steps, size, ret;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (steps = chip->ecc.steps; steps > 0; steps--) {
+ ret = nand_read_data_op(chip, buf, eccsize, false, false);
+ if (ret)
+ return ret;
+
+ buf += eccsize;
+
+ if (chip->ecc.prepad) {
+ ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
+ false, false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.prepad;
+ }
+
+ ret = nand_read_data_op(chip, oob, eccbytes, false, false);
+ if (ret)
+ return ret;
+
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
+ false, false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ size = mtd->oobsize - (oob - chip->oob_poi);
+ if (size) {
+ ret = nand_read_data_op(chip, oob, size, false, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ */
+static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int i, eccsize = chip->ecc.size, ret;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
+ unsigned int max_bitflips = 0;
+
+ chip->ecc.read_page_raw(chip, buf, 1, page);
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
+ chip->ecc.calculate(chip, p, &ecc_calc[i]);
+
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ eccsteps = chip->ecc.steps;
+ p = buf;
+
+ for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+ return max_bitflips;
+}
+
+/**
+ * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
+ * @chip: nand chip info structure
+ * @data_offs: offset of requested data within the page
+ * @readlen: data length
+ * @bufpoi: buffer to store read data
+ * @page: page number to read
+ */
+static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs,
+ uint32_t readlen, uint8_t *bufpoi, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int start_step, end_step, num_steps, ret;
+ uint8_t *p;
+ int data_col_addr, i, gaps = 0;
+ int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
+ int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
+ int index, section = 0;
+ unsigned int max_bitflips = 0;
+ struct mtd_oob_region oobregion = { };
+
+ /* Column address within the page aligned to ECC size (256bytes) */
+ start_step = data_offs / chip->ecc.size;
+ end_step = (data_offs + readlen - 1) / chip->ecc.size;
+ num_steps = end_step - start_step + 1;
+ index = start_step * chip->ecc.bytes;
+
+ /* Data size aligned to ECC ecc.size */
+ datafrag_len = num_steps * chip->ecc.size;
+ eccfrag_len = num_steps * chip->ecc.bytes;
+
+ data_col_addr = start_step * chip->ecc.size;
+ /* If we read not a page aligned data */
+ p = bufpoi + data_col_addr;
+ ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
+ if (ret)
+ return ret;
+
+ /* Calculate ECC */
+ for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
+ chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]);
+
+ /*
+ * The performance is faster if we position offsets according to
+ * ecc.pos. Let's make sure that there are no gaps in ECC positions.
+ */
+ ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
+ if (ret)
+ return ret;
+
+ if (oobregion.length < eccfrag_len)
+ gaps = 1;
+
+ if (gaps) {
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ } else {
+ /*
+ * Send the command to read the particular ECC bytes take care
+ * about buswidth alignment in read_buf.
+ */
+ aligned_pos = oobregion.offset & ~(busw - 1);
+ aligned_len = eccfrag_len;
+ if (oobregion.offset & (busw - 1))
+ aligned_len++;
+ if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
+ (busw - 1))
+ aligned_len++;
+
+ ret = nand_change_read_column_op(chip,
+ mtd->writesize + aligned_pos,
+ &chip->oob_poi[aligned_pos],
+ aligned_len, false);
+ if (ret)
+ return ret;
+ }
+
+ ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
+ chip->oob_poi, index, eccfrag_len);
+ if (ret)
+ return ret;
+
+ p = bufpoi + data_col_addr;
+ for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
+ int stat;
+
+ stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i],
+ &chip->ecc.calc_buf[i]);
+ if (stat == -EBADMSG &&
+ (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
+ /* check for empty pages with bitflips */
+ stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
+ &chip->ecc.code_buf[i],
+ chip->ecc.bytes,
+ NULL, 0,
+ chip->ecc.strength);
+ }
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+ return max_bitflips;
+}
+
+/**
+ * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Not for syndrome calculating ECC controllers which need a special oob layout.
+ */
+static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int i, eccsize = chip->ecc.size, ret;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
+ unsigned int max_bitflips = 0;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ chip->ecc.hwctl(chip, NAND_ECC_READ);
+
+ ret = nand_read_data_op(chip, p, eccsize, false, false);
+ if (ret)
+ return ret;
+
+ chip->ecc.calculate(chip, p, &ecc_calc[i]);
+ }
+
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
+ false);
+ if (ret)
+ return ret;
+
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ eccsteps = chip->ecc.steps;
+ p = buf;
+
+ for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
+ if (stat == -EBADMSG &&
+ (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
+ /* check for empty pages with bitflips */
+ stat = nand_check_erased_ecc_chunk(p, eccsize,
+ &ecc_code[i], eccbytes,
+ NULL, 0,
+ chip->ecc.strength);
+ }
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+ return max_bitflips;
+}
+
+/**
+ * nand_read_page_hwecc_oob_first - Hardware ECC page read with ECC
+ * data read from OOB area
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Hardware ECC for large page chips, which requires the ECC data to be
+ * extracted from the OOB before the actual data is read.
+ */
+int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int i, eccsize = chip->ecc.size, ret;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
+ unsigned int max_bitflips = 0;
+
+ /* Read the OOB area first */
+ ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+ if (ret)
+ return ret;
+
+ /* Move read cursor to start of page */
+ ret = nand_change_read_column_op(chip, 0, NULL, 0, false);
+ if (ret)
+ return ret;
+
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ chip->ecc.hwctl(chip, NAND_ECC_READ);
+
+ ret = nand_read_data_op(chip, p, eccsize, false, false);
+ if (ret)
+ return ret;
+
+ stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
+ if (stat == -EBADMSG &&
+ (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
+ /* check for empty pages with bitflips */
+ stat = nand_check_erased_ecc_chunk(p, eccsize,
+ &ecc_code[i],
+ eccbytes, NULL, 0,
+ chip->ecc.strength);
+ }
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+ return max_bitflips;
+}
+EXPORT_SYMBOL_GPL(nand_read_page_hwecc_oob_first);
+
+/**
+ * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * The hw generator calculates the error syndrome automatically. Therefore we
+ * need a special oob layout and handling.
+ */
+static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret, i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
+ uint8_t *p = buf;
+ uint8_t *oob = chip->oob_poi;
+ unsigned int max_bitflips = 0;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ chip->ecc.hwctl(chip, NAND_ECC_READ);
+
+ ret = nand_read_data_op(chip, p, eccsize, false, false);
+ if (ret)
+ return ret;
+
+ if (chip->ecc.prepad) {
+ ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
+ false, false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.prepad;
+ }
+
+ chip->ecc.hwctl(chip, NAND_ECC_READSYN);
+
+ ret = nand_read_data_op(chip, oob, eccbytes, false, false);
+ if (ret)
+ return ret;
+
+ stat = chip->ecc.correct(chip, p, oob, NULL);
+
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
+ false, false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.postpad;
+ }
+
+ if (stat == -EBADMSG &&
+ (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
+ /* check for empty pages with bitflips */
+ stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
+ oob - eccpadbytes,
+ eccpadbytes,
+ NULL, 0,
+ chip->ecc.strength);
+ }
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+
+ /* Calculate remaining oob bytes */
+ i = mtd->oobsize - (oob - chip->oob_poi);
+ if (i) {
+ ret = nand_read_data_op(chip, oob, i, false, false);
+ if (ret)
+ return ret;
+ }
+
+ return max_bitflips;
+}
+
+/**
+ * nand_transfer_oob - [INTERN] Transfer oob to client buffer
+ * @chip: NAND chip object
+ * @oob: oob destination address
+ * @ops: oob ops structure
+ * @len: size of oob to transfer
+ */
+static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
+ struct mtd_oob_ops *ops, size_t len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ switch (ops->mode) {
+
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_RAW:
+ memcpy(oob, chip->oob_poi + ops->ooboffs, len);
+ return oob + len;
+
+ case MTD_OPS_AUTO_OOB:
+ ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
+ ops->ooboffs, len);
+ BUG_ON(ret);
+ return oob + len;
+
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+static void rawnand_enable_cont_reads(struct nand_chip *chip, unsigned int page,
+ u32 readlen, int col)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int end_page, end_col;
+
+ chip->cont_read.ongoing = false;
+
+ if (!chip->controller->supported_op.cont_read)
+ return;
+
+ end_page = DIV_ROUND_UP(col + readlen, mtd->writesize);
+ end_col = (col + readlen) % mtd->writesize;
+
+ if (col)
+ page++;
+
+ if (end_col && end_page)
+ end_page--;
+
+ if (page + 1 > end_page)
+ return;
+
+ chip->cont_read.first_page = page;
+ chip->cont_read.last_page = end_page;
+ chip->cont_read.ongoing = true;
+
+ rawnand_cap_cont_reads(chip);
+}
+
+static void rawnand_cont_read_skip_first_page(struct nand_chip *chip, unsigned int page)
+{
+ if (!chip->cont_read.ongoing || page != chip->cont_read.first_page)
+ return;
+
+ chip->cont_read.first_page++;
+ if (chip->cont_read.first_page == chip->cont_read.pause_page)
+ chip->cont_read.first_page++;
+ if (chip->cont_read.first_page >= chip->cont_read.last_page)
+ chip->cont_read.ongoing = false;
+}
+
+/**
+ * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
+ * @chip: NAND chip object
+ * @retry_mode: the retry mode to use
+ *
+ * Some vendors supply a special command to shift the Vt threshold, to be used
+ * when there are too many bitflips in a page (i.e., ECC error). After setting
+ * a new threshold, the host should retry reading the page.
+ */
+static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
+{
+ pr_debug("setting READ RETRY mode %d\n", retry_mode);
+
+ if (retry_mode >= chip->read_retries)
+ return -EINVAL;
+
+ if (!chip->ops.setup_read_retry)
+ return -EOPNOTSUPP;
+
+ return chip->ops.setup_read_retry(chip, retry_mode);
+}
+
+static void nand_wait_readrdy(struct nand_chip *chip)
+{
+ const struct nand_interface_config *conf;
+
+ if (!(chip->options & NAND_NEED_READRDY))
+ return;
+
+ conf = nand_get_interface_config(chip);
+ WARN_ON(nand_wait_rdy_op(chip, NAND_COMMON_TIMING_MS(conf, tR_max), 0));
+}
+
+/**
+ * nand_do_read_ops - [INTERN] Read data with ECC
+ * @chip: NAND chip object
+ * @from: offset to read from
+ * @ops: oob ops structure
+ *
+ * Internal function. Called with chip held.
+ */
+static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ int chipnr, page, realpage, col, bytes, aligned, oob_required;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret = 0;
+ uint32_t readlen = ops->len;
+ uint32_t oobreadlen = ops->ooblen;
+ uint32_t max_oobsize = mtd_oobavail(mtd, ops);
+
+ uint8_t *bufpoi, *oob, *buf;
+ int use_bounce_buf;
+ unsigned int max_bitflips = 0;
+ int retry_mode = 0;
+ bool ecc_fail = false;
+
+ /* Check if the region is secured */
+ if (nand_region_is_secured(chip, from, readlen))
+ return -EIO;
+
+ chipnr = (int)(from >> chip->chip_shift);
+ nand_select_target(chip, chipnr);
+
+ realpage = (int)(from >> chip->page_shift);
+ page = realpage & chip->pagemask;
+
+ col = (int)(from & (mtd->writesize - 1));
+
+ buf = ops->datbuf;
+ oob = ops->oobbuf;
+ oob_required = oob ? 1 : 0;
+
+ rawnand_enable_cont_reads(chip, page, readlen, col);
+
+ while (1) {
+ struct mtd_ecc_stats ecc_stats = mtd->ecc_stats;
+
+ bytes = min(mtd->writesize - col, readlen);
+ aligned = (bytes == mtd->writesize);
+
+ if (!aligned)
+ use_bounce_buf = 1;
+ else if (chip->options & NAND_USES_DMA)
+ use_bounce_buf = !virt_addr_valid(buf) ||
+ !IS_ALIGNED((unsigned long)buf,
+ chip->buf_align);
+ else
+ use_bounce_buf = 0;
+
+ /* Is the current page in the buffer? */
+ if (realpage != chip->pagecache.page || oob) {
+ bufpoi = use_bounce_buf ? chip->data_buf : buf;
+
+ if (use_bounce_buf && aligned)
+ pr_debug("%s: using read bounce buffer for buf@%p\n",
+ __func__, buf);
+
+read_retry:
+ /*
+ * Now read the page into the buffer. Absent an error,
+ * the read methods return max bitflips per ecc step.
+ */
+ if (unlikely(ops->mode == MTD_OPS_RAW))
+ ret = chip->ecc.read_page_raw(chip, bufpoi,
+ oob_required,
+ page);
+ else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
+ !oob)
+ ret = chip->ecc.read_subpage(chip, col, bytes,
+ bufpoi, page);
+ else
+ ret = chip->ecc.read_page(chip, bufpoi,
+ oob_required, page);
+ if (ret < 0) {
+ if (use_bounce_buf)
+ /* Invalidate page cache */
+ chip->pagecache.page = -1;
+ break;
+ }
+
+ /*
+ * Copy back the data in the initial buffer when reading
+ * partial pages or when a bounce buffer is required.
+ */
+ if (use_bounce_buf) {
+ if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
+ !(mtd->ecc_stats.failed - ecc_stats.failed) &&
+ (ops->mode != MTD_OPS_RAW)) {
+ chip->pagecache.page = realpage;
+ chip->pagecache.bitflips = ret;
+ } else {
+ /* Invalidate page cache */
+ chip->pagecache.page = -1;
+ }
+ memcpy(buf, bufpoi + col, bytes);
+ }
+
+ if (unlikely(oob)) {
+ int toread = min(oobreadlen, max_oobsize);
+
+ if (toread) {
+ oob = nand_transfer_oob(chip, oob, ops,
+ toread);
+ oobreadlen -= toread;
+ }
+ }
+
+ nand_wait_readrdy(chip);
+
+ if (mtd->ecc_stats.failed - ecc_stats.failed) {
+ if (retry_mode + 1 < chip->read_retries) {
+ retry_mode++;
+ ret = nand_setup_read_retry(chip,
+ retry_mode);
+ if (ret < 0)
+ break;
+
+ /* Reset ecc_stats; retry */
+ mtd->ecc_stats = ecc_stats;
+ goto read_retry;
+ } else {
+ /* No more retry modes; real failure */
+ ecc_fail = true;
+ }
+ }
+
+ buf += bytes;
+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
+ } else {
+ memcpy(buf, chip->data_buf + col, bytes);
+ buf += bytes;
+ max_bitflips = max_t(unsigned int, max_bitflips,
+ chip->pagecache.bitflips);
+
+ rawnand_cont_read_skip_first_page(chip, page);
+ }
+
+ readlen -= bytes;
+
+ /* Reset to retry mode 0 */
+ if (retry_mode) {
+ ret = nand_setup_read_retry(chip, 0);
+ if (ret < 0)
+ break;
+ retry_mode = 0;
+ }
+
+ if (!readlen)
+ break;
+
+ /* For subsequent reads align to page boundary */
+ col = 0;
+ /* Increment page address */
+ realpage++;
+
+ page = realpage & chip->pagemask;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ nand_deselect_target(chip);
+ nand_select_target(chip, chipnr);
+ }
+ }
+ nand_deselect_target(chip);
+
+ ops->retlen = ops->len - (size_t) readlen;
+ if (oob)
+ ops->oobretlen = ops->ooblen - oobreadlen;
+
+ if (ret < 0)
+ return ret;
+
+ if (ecc_fail)
+ return -EBADMSG;
+
+ return max_bitflips;
+}
+
+/**
+ * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
+ * @chip: nand chip info structure
+ * @page: page number to read
+ */
+int nand_read_oob_std(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+}
+EXPORT_SYMBOL(nand_read_oob_std);
+
+/**
+ * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
+ * with syndromes
+ * @chip: nand chip info structure
+ * @page: page number to read
+ */
+static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int length = mtd->oobsize;
+ int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
+ int eccsize = chip->ecc.size;
+ uint8_t *bufpoi = chip->oob_poi;
+ int i, toread, sndrnd = 0, pos, ret;
+
+ ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ if (sndrnd) {
+ int ret;
+
+ pos = eccsize + i * (eccsize + chunk);
+ if (mtd->writesize > 512)
+ ret = nand_change_read_column_op(chip, pos,
+ NULL, 0,
+ false);
+ else
+ ret = nand_read_page_op(chip, page, pos, NULL,
+ 0);
+
+ if (ret)
+ return ret;
+ } else
+ sndrnd = 1;
+ toread = min_t(int, length, chunk);
+
+ ret = nand_read_data_op(chip, bufpoi, toread, false, false);
+ if (ret)
+ return ret;
+
+ bufpoi += toread;
+ length -= toread;
+ }
+ if (length > 0) {
+ ret = nand_read_data_op(chip, bufpoi, length, false, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
+ * @chip: nand chip info structure
+ * @page: page number to write
+ */
+int nand_write_oob_std(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
+}
+EXPORT_SYMBOL(nand_write_oob_std);
+
+/**
+ * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
+ * with syndrome - only for large page flash
+ * @chip: nand chip info structure
+ * @page: page number to write
+ */
+static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
+ int eccsize = chip->ecc.size, length = mtd->oobsize;
+ int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
+ const uint8_t *bufpoi = chip->oob_poi;
+
+ /*
+ * data-ecc-data-ecc ... ecc-oob
+ * or
+ * data-pad-ecc-pad-data-pad .... ecc-pad-oob
+ */
+ if (!chip->ecc.prepad && !chip->ecc.postpad) {
+ pos = steps * (eccsize + chunk);
+ steps = 0;
+ } else
+ pos = eccsize;
+
+ ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < steps; i++) {
+ if (sndcmd) {
+ if (mtd->writesize <= 512) {
+ uint32_t fill = 0xFFFFFFFF;
+
+ len = eccsize;
+ while (len > 0) {
+ int num = min_t(int, len, 4);
+
+ ret = nand_write_data_op(chip, &fill,
+ num, false);
+ if (ret)
+ return ret;
+
+ len -= num;
+ }
+ } else {
+ pos = eccsize + i * (eccsize + chunk);
+ ret = nand_change_write_column_op(chip, pos,
+ NULL, 0,
+ false);
+ if (ret)
+ return ret;
+ }
+ } else
+ sndcmd = 1;
+ len = min_t(int, length, chunk);
+
+ ret = nand_write_data_op(chip, bufpoi, len, false);
+ if (ret)
+ return ret;
+
+ bufpoi += len;
+ length -= len;
+ }
+ if (length > 0) {
+ ret = nand_write_data_op(chip, bufpoi, length, false);
+ if (ret)
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+/**
+ * nand_do_read_oob - [INTERN] NAND read out-of-band
+ * @chip: NAND chip object
+ * @from: offset to read from
+ * @ops: oob operations description structure
+ *
+ * NAND read out-of-band data from the spare area.
+ */
+static int nand_do_read_oob(struct nand_chip *chip, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int max_bitflips = 0;
+ int page, realpage, chipnr;
+ struct mtd_ecc_stats stats;
+ int readlen = ops->ooblen;
+ int len;
+ uint8_t *buf = ops->oobbuf;
+ int ret = 0;
+
+ pr_debug("%s: from = 0x%08Lx, len = %i\n",
+ __func__, (unsigned long long)from, readlen);
+
+ /* Check if the region is secured */
+ if (nand_region_is_secured(chip, from, readlen))
+ return -EIO;
+
+ stats = mtd->ecc_stats;
+
+ len = mtd_oobavail(mtd, ops);
+
+ chipnr = (int)(from >> chip->chip_shift);
+ nand_select_target(chip, chipnr);
+
+ /* Shift to get page */
+ realpage = (int)(from >> chip->page_shift);
+ page = realpage & chip->pagemask;
+
+ while (1) {
+ if (ops->mode == MTD_OPS_RAW)
+ ret = chip->ecc.read_oob_raw(chip, page);
+ else
+ ret = chip->ecc.read_oob(chip, page);
+
+ if (ret < 0)
+ break;
+
+ len = min(len, readlen);
+ buf = nand_transfer_oob(chip, buf, ops, len);
+
+ nand_wait_readrdy(chip);
+
+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
+
+ readlen -= len;
+ if (!readlen)
+ break;
+
+ /* Increment page address */
+ realpage++;
+
+ page = realpage & chip->pagemask;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ nand_deselect_target(chip);
+ nand_select_target(chip, chipnr);
+ }
+ }
+ nand_deselect_target(chip);
+
+ ops->oobretlen = ops->ooblen - readlen;
+
+ if (ret < 0)
+ return ret;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ return max_bitflips;
+}
+
+/**
+ * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operation description structure
+ *
+ * NAND read data and/or out-of-band data.
+ */
+static int nand_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_ecc_stats old_stats;
+ int ret;
+
+ ops->retlen = 0;
+
+ if (ops->mode != MTD_OPS_PLACE_OOB &&
+ ops->mode != MTD_OPS_AUTO_OOB &&
+ ops->mode != MTD_OPS_RAW)
+ return -ENOTSUPP;
+
+ nand_get_device(chip);
+
+ old_stats = mtd->ecc_stats;
+
+ if (!ops->datbuf)
+ ret = nand_do_read_oob(chip, from, ops);
+ else
+ ret = nand_do_read_ops(chip, from, ops);
+
+ if (ops->stats) {
+ ops->stats->uncorrectable_errors +=
+ mtd->ecc_stats.failed - old_stats.failed;
+ ops->stats->corrected_bitflips +=
+ mtd->ecc_stats.corrected - old_stats.corrected;
+ }
+
+ nand_release_device(chip);
+ return ret;
+}
+
+/**
+ * nand_write_page_raw_notsupp - dummy raw page write function
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * Returns -ENOTSUPP unconditionally.
+ */
+int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ return -ENOTSUPP;
+}
+
+/**
+ * nand_write_page_raw - [INTERN] raw page write function
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * Not for syndrome calculating ECC controllers, which use a special oob layout.
+ */
+int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
+ if (ret)
+ return ret;
+
+ if (oob_required) {
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+EXPORT_SYMBOL(nand_write_page_raw);
+
+/**
+ * nand_monolithic_write_page_raw - Monolithic page write in raw mode
+ * @chip: NAND chip info structure
+ * @buf: data buffer to write
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * This is a raw page write, ie. without any error detection/correction.
+ * Monolithic means we are requesting all the relevant data (main plus
+ * eventually OOB) to be sent over the bus and effectively programmed
+ * into the NAND chip arrays in a single operation. This is an
+ * alternative to nand_write_page_raw(), which first sends the main
+ * data, then eventually send the OOB data by latching more data
+ * cycles on the NAND bus, and finally sends the program command to
+ * synchronyze the NAND chip cache.
+ */
+int nand_monolithic_write_page_raw(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int size = mtd->writesize;
+ u8 *write_buf = (u8 *)buf;
+
+ if (oob_required) {
+ size += mtd->oobsize;
+
+ if (buf != chip->data_buf) {
+ write_buf = nand_get_data_buf(chip);
+ memcpy(write_buf, buf, mtd->writesize);
+ }
+ }
+
+ return nand_prog_page_op(chip, page, 0, write_buf, size);
+}
+EXPORT_SYMBOL(nand_monolithic_write_page_raw);
+
+/**
+ * nand_write_page_raw_syndrome - [INTERN] raw page write function
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * We need a special oob layout and handling even when ECC isn't checked.
+ */
+static int nand_write_page_raw_syndrome(struct nand_chip *chip,
+ const uint8_t *buf, int oob_required,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ uint8_t *oob = chip->oob_poi;
+ int steps, size, ret;
+
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (steps = chip->ecc.steps; steps > 0; steps--) {
+ ret = nand_write_data_op(chip, buf, eccsize, false);
+ if (ret)
+ return ret;
+
+ buf += eccsize;
+
+ if (chip->ecc.prepad) {
+ ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.prepad;
+ }
+
+ ret = nand_write_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ size = mtd->oobsize - (oob - chip->oob_poi);
+ if (size) {
+ ret = nand_write_data_op(chip, oob, size, false);
+ if (ret)
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+/**
+ * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ */
+static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int i, eccsize = chip->ecc.size, ret;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ const uint8_t *p = buf;
+
+ /* Software ECC calculation */
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
+ chip->ecc.calculate(chip, p, &ecc_calc[i]);
+
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ return chip->ecc.write_page_raw(chip, buf, 1, page);
+}
+
+/**
+ * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ */
+static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int i, eccsize = chip->ecc.size, ret;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ const uint8_t *p = buf;
+
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ chip->ecc.hwctl(chip, NAND_ECC_WRITE);
+
+ ret = nand_write_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
+
+ chip->ecc.calculate(chip, p, &ecc_calc[i]);
+ }
+
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
+
+ return nand_prog_page_end_op(chip);
+}
+
+
+/**
+ * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
+ * @chip: nand chip info structure
+ * @offset: column address of subpage within the page
+ * @data_len: data length
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ */
+static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset,
+ uint32_t data_len, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ uint8_t *oob_buf = chip->oob_poi;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ int ecc_steps = chip->ecc.steps;
+ uint32_t start_step = offset / ecc_size;
+ uint32_t end_step = (offset + data_len - 1) / ecc_size;
+ int oob_bytes = mtd->oobsize / ecc_steps;
+ int step, ret;
+
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (step = 0; step < ecc_steps; step++) {
+ /* configure controller for WRITE access */
+ chip->ecc.hwctl(chip, NAND_ECC_WRITE);
+
+ /* write data (untouched subpages already masked by 0xFF) */
+ ret = nand_write_data_op(chip, buf, ecc_size, false);
+ if (ret)
+ return ret;
+
+ /* mask ECC of un-touched subpages by padding 0xFF */
+ if ((step < start_step) || (step > end_step))
+ memset(ecc_calc, 0xff, ecc_bytes);
+ else
+ chip->ecc.calculate(chip, buf, ecc_calc);
+
+ /* mask OOB of un-touched subpages by padding 0xFF */
+ /* if oob_required, preserve OOB metadata of written subpage */
+ if (!oob_required || (step < start_step) || (step > end_step))
+ memset(oob_buf, 0xff, oob_bytes);
+
+ buf += ecc_size;
+ ecc_calc += ecc_bytes;
+ oob_buf += oob_bytes;
+ }
+
+ /* copy calculated ECC for whole page to chip->buffer->oob */
+ /* this include masked-value(0xFF) for unwritten subpages */
+ ecc_calc = chip->ecc.calc_buf;
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ /* write OOB buffer to NAND device */
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
+
+ return nand_prog_page_end_op(chip);
+}
+
+
+/**
+ * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * The hw generator calculates the error syndrome automatically. Therefore we
+ * need a special oob layout and handling.
+ */
+static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ const uint8_t *p = buf;
+ uint8_t *oob = chip->oob_poi;
+ int ret;
+
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ chip->ecc.hwctl(chip, NAND_ECC_WRITE);
+
+ ret = nand_write_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
+
+ if (chip->ecc.prepad) {
+ ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.prepad;
+ }
+
+ chip->ecc.calculate(chip, p, oob);
+
+ ret = nand_write_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ /* Calculate remaining oob bytes */
+ i = mtd->oobsize - (oob - chip->oob_poi);
+ if (i) {
+ ret = nand_write_data_op(chip, oob, i, false);
+ if (ret)
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+/**
+ * nand_write_page - write one page
+ * @chip: NAND chip descriptor
+ * @offset: address offset within the page
+ * @data_len: length of actual data to be written
+ * @buf: the data to write
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ * @raw: use _raw version of write_page
+ */
+static int nand_write_page(struct nand_chip *chip, uint32_t offset,
+ int data_len, const uint8_t *buf, int oob_required,
+ int page, int raw)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int status, subpage;
+
+ if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
+ chip->ecc.write_subpage)
+ subpage = offset || (data_len < mtd->writesize);
+ else
+ subpage = 0;
+
+ if (unlikely(raw))
+ status = chip->ecc.write_page_raw(chip, buf, oob_required,
+ page);
+ else if (subpage)
+ status = chip->ecc.write_subpage(chip, offset, data_len, buf,
+ oob_required, page);
+ else
+ status = chip->ecc.write_page(chip, buf, oob_required, page);
+
+ if (status < 0)
+ return status;
+
+ return 0;
+}
+
+#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
+
+/**
+ * nand_do_write_ops - [INTERN] NAND write with ECC
+ * @chip: NAND chip object
+ * @to: offset to write to
+ * @ops: oob operations description structure
+ *
+ * NAND write with ECC.
+ */
+static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int chipnr, realpage, page, column;
+ uint32_t writelen = ops->len;
+
+ uint32_t oobwritelen = ops->ooblen;
+ uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
+
+ uint8_t *oob = ops->oobbuf;
+ uint8_t *buf = ops->datbuf;
+ int ret;
+ int oob_required = oob ? 1 : 0;
+
+ ops->retlen = 0;
+ if (!writelen)
+ return 0;
+
+ /* Reject writes, which are not page aligned */
+ if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
+ pr_notice("%s: attempt to write non page aligned data\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Check if the region is secured */
+ if (nand_region_is_secured(chip, to, writelen))
+ return -EIO;
+
+ column = to & (mtd->writesize - 1);
+
+ chipnr = (int)(to >> chip->chip_shift);
+ nand_select_target(chip, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(chip)) {
+ ret = -EIO;
+ goto err_out;
+ }
+
+ realpage = (int)(to >> chip->page_shift);
+ page = realpage & chip->pagemask;
+
+ /* Invalidate the page cache, when we write to the cached page */
+ if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) &&
+ ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len))
+ chip->pagecache.page = -1;
+
+ /* Don't allow multipage oob writes with offset */
+ if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ while (1) {
+ int bytes = mtd->writesize;
+ uint8_t *wbuf = buf;
+ int use_bounce_buf;
+ int part_pagewr = (column || writelen < mtd->writesize);
+
+ if (part_pagewr)
+ use_bounce_buf = 1;
+ else if (chip->options & NAND_USES_DMA)
+ use_bounce_buf = !virt_addr_valid(buf) ||
+ !IS_ALIGNED((unsigned long)buf,
+ chip->buf_align);
+ else
+ use_bounce_buf = 0;
+
+ /*
+ * Copy the data from the initial buffer when doing partial page
+ * writes or when a bounce buffer is required.
+ */
+ if (use_bounce_buf) {
+ pr_debug("%s: using write bounce buffer for buf@%p\n",
+ __func__, buf);
+ if (part_pagewr)
+ bytes = min_t(int, bytes - column, writelen);
+ wbuf = nand_get_data_buf(chip);
+ memset(wbuf, 0xff, mtd->writesize);
+ memcpy(&wbuf[column], buf, bytes);
+ }
+
+ if (unlikely(oob)) {
+ size_t len = min(oobwritelen, oobmaxlen);
+ oob = nand_fill_oob(chip, oob, len, ops);
+ oobwritelen -= len;
+ } else {
+ /* We still need to erase leftover OOB data */
+ memset(chip->oob_poi, 0xff, mtd->oobsize);
+ }
+
+ ret = nand_write_page(chip, column, bytes, wbuf,
+ oob_required, page,
+ (ops->mode == MTD_OPS_RAW));
+ if (ret)
+ break;
+
+ writelen -= bytes;
+ if (!writelen)
+ break;
+
+ column = 0;
+ buf += bytes;
+ realpage++;
+
+ page = realpage & chip->pagemask;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ nand_deselect_target(chip);
+ nand_select_target(chip, chipnr);
+ }
+ }
+
+ ops->retlen = ops->len - writelen;
+ if (unlikely(oob))
+ ops->oobretlen = ops->ooblen;
+
+err_out:
+ nand_deselect_target(chip);
+ return ret;
+}
+
+/**
+ * panic_nand_write - [MTD Interface] NAND write with ECC
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of written bytes
+ * @buf: the data to write
+ *
+ * NAND write with ECC. Used when performing writes in interrupt context, this
+ * may for example be called by mtdoops when writing an oops while in panic.
+ */
+static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const uint8_t *buf)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int chipnr = (int)(to >> chip->chip_shift);
+ struct mtd_oob_ops ops;
+ int ret;
+
+ nand_select_target(chip, chipnr);
+
+ /* Wait for the device to get ready */
+ panic_nand_wait(chip, 400);
+
+ memset(&ops, 0, sizeof(ops));
+ ops.len = len;
+ ops.datbuf = (uint8_t *)buf;
+ ops.mode = MTD_OPS_PLACE_OOB;
+
+ ret = nand_do_write_ops(chip, to, &ops);
+
+ *retlen = ops.retlen;
+ return ret;
+}
+
+/**
+ * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
+ */
+static int nand_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int ret = 0;
+
+ ops->retlen = 0;
+
+ nand_get_device(chip);
+
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ case MTD_OPS_RAW:
+ break;
+
+ default:
+ goto out;
+ }
+
+ if (!ops->datbuf)
+ ret = nand_do_write_oob(chip, to, ops);
+ else
+ ret = nand_do_write_ops(chip, to, ops);
+
+out:
+ nand_release_device(chip);
+ return ret;
+}
+
+/**
+ * nand_erase - [MTD Interface] erase block(s)
+ * @mtd: MTD device structure
+ * @instr: erase instruction
+ *
+ * Erase one ore more blocks.
+ */
+static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ return nand_erase_nand(mtd_to_nand(mtd), instr, 0);
+}
+
+/**
+ * nand_erase_nand - [INTERN] erase block(s)
+ * @chip: NAND chip object
+ * @instr: erase instruction
+ * @allowbbt: allow erasing the bbt area
+ *
+ * Erase one ore more blocks.
+ */
+int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
+ int allowbbt)
+{
+ int page, pages_per_block, ret, chipnr;
+ loff_t len;
+
+ pr_debug("%s: start = 0x%012llx, len = %llu\n",
+ __func__, (unsigned long long)instr->addr,
+ (unsigned long long)instr->len);
+
+ if (check_offs_len(chip, instr->addr, instr->len))
+ return -EINVAL;
+
+ /* Check if the region is secured */
+ if (nand_region_is_secured(chip, instr->addr, instr->len))
+ return -EIO;
+
+ /* Grab the lock and see if the device is available */
+ nand_get_device(chip);
+
+ /* Shift to get first page */
+ page = (int)(instr->addr >> chip->page_shift);
+ chipnr = (int)(instr->addr >> chip->chip_shift);
+
+ /* Calculate pages in each block */
+ pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
+
+ /* Select the NAND device */
+ nand_select_target(chip, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(chip)) {
+ pr_debug("%s: device is write protected!\n",
+ __func__);
+ ret = -EIO;
+ goto erase_exit;
+ }
+
+ /* Loop through the pages */
+ len = instr->len;
+
+ while (len) {
+ loff_t ofs = (loff_t)page << chip->page_shift;
+
+ /* Check if we have a bad block, we do not erase bad blocks! */
+ if (nand_block_checkbad(chip, ((loff_t) page) <<
+ chip->page_shift, allowbbt)) {
+ pr_warn("%s: attempt to erase a bad block at 0x%08llx\n",
+ __func__, (unsigned long long)ofs);
+ ret = -EIO;
+ goto erase_exit;
+ }
+
+ /*
+ * Invalidate the page cache, if we erase the block which
+ * contains the current cached page.
+ */
+ if (page <= chip->pagecache.page && chip->pagecache.page <
+ (page + pages_per_block))
+ chip->pagecache.page = -1;
+
+ ret = nand_erase_op(chip, (page & chip->pagemask) >>
+ (chip->phys_erase_shift - chip->page_shift));
+ if (ret) {
+ pr_debug("%s: failed erase, page 0x%08x\n",
+ __func__, page);
+ instr->fail_addr = ofs;
+ goto erase_exit;
+ }
+
+ /* Increment page address and decrement length */
+ len -= (1ULL << chip->phys_erase_shift);
+ page += pages_per_block;
+
+ /* Check, if we cross a chip boundary */
+ if (len && !(page & chip->pagemask)) {
+ chipnr++;
+ nand_deselect_target(chip);
+ nand_select_target(chip, chipnr);
+ }
+ }
+
+ ret = 0;
+erase_exit:
+
+ /* Deselect and wake up anyone waiting on the device */
+ nand_deselect_target(chip);
+ nand_release_device(chip);
+
+ /* Return more or less happy */
+ return ret;
+}
+
+/**
+ * nand_sync - [MTD Interface] sync
+ * @mtd: MTD device structure
+ *
+ * Sync is actually a wait for chip ready function.
+ */
+static void nand_sync(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ pr_debug("%s: called\n", __func__);
+
+ /* Grab the lock and see if the device is available */
+ nand_get_device(chip);
+ /* Release it and go back */
+ nand_release_device(chip);
+}
+
+/**
+ * nand_block_isbad - [MTD Interface] Check if block at offset is bad
+ * @mtd: MTD device structure
+ * @offs: offset relative to mtd start
+ */
+static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int chipnr = (int)(offs >> chip->chip_shift);
+ int ret;
+
+ /* Select the NAND device */
+ nand_get_device(chip);
+
+ nand_select_target(chip, chipnr);
+
+ ret = nand_block_checkbad(chip, offs, 0);
+
+ nand_deselect_target(chip);
+ nand_release_device(chip);
+
+ return ret;
+}
+
+/**
+ * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
+ */
+static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ int ret;
+
+ ret = nand_block_isbad(mtd, ofs);
+ if (ret) {
+ /* If it was bad already, return success and do nothing */
+ if (ret > 0)
+ return 0;
+ return ret;
+ }
+
+ return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs);
+}
+
+/**
+ * nand_suspend - [MTD Interface] Suspend the NAND flash
+ * @mtd: MTD device structure
+ *
+ * Returns 0 for success or negative error code otherwise.
+ */
+static int nand_suspend(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int ret = 0;
+
+ mutex_lock(&chip->lock);
+ if (chip->ops.suspend)
+ ret = chip->ops.suspend(chip);
+ if (!ret)
+ chip->suspended = 1;
+ mutex_unlock(&chip->lock);
+
+ return ret;
+}
+
+/**
+ * nand_resume - [MTD Interface] Resume the NAND flash
+ * @mtd: MTD device structure
+ */
+static void nand_resume(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ mutex_lock(&chip->lock);
+ if (chip->suspended) {
+ if (chip->ops.resume)
+ chip->ops.resume(chip);
+ chip->suspended = 0;
+ } else {
+ pr_err("%s called for a chip which is not in suspended state\n",
+ __func__);
+ }
+ mutex_unlock(&chip->lock);
+
+ wake_up_all(&chip->resume_wq);
+}
+
+/**
+ * nand_shutdown - [MTD Interface] Finish the current NAND operation and
+ * prevent further operations
+ * @mtd: MTD device structure
+ */
+static void nand_shutdown(struct mtd_info *mtd)
+{
+ nand_suspend(mtd);
+}
+
+/**
+ * nand_lock - [MTD Interface] Lock the NAND flash
+ * @mtd: MTD device structure
+ * @ofs: offset byte address
+ * @len: number of bytes to lock (must be a multiple of block/page size)
+ */
+static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (!chip->ops.lock_area)
+ return -ENOTSUPP;
+
+ return chip->ops.lock_area(chip, ofs, len);
+}
+
+/**
+ * nand_unlock - [MTD Interface] Unlock the NAND flash
+ * @mtd: MTD device structure
+ * @ofs: offset byte address
+ * @len: number of bytes to unlock (must be a multiple of block/page size)
+ */
+static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (!chip->ops.unlock_area)
+ return -ENOTSUPP;
+
+ return chip->ops.unlock_area(chip, ofs, len);
+}
+
+/* Set default functions */
+static void nand_set_defaults(struct nand_chip *chip)
+{
+ /* If no controller is provided, use the dummy, legacy one. */
+ if (!chip->controller) {
+ chip->controller = &chip->legacy.dummy_controller;
+ nand_controller_init(chip->controller);
+ }
+
+ nand_legacy_set_defaults(chip);
+
+ if (!chip->buf_align)
+ chip->buf_align = 1;
+}
+
+/* Sanitize ONFI strings so we can safely print them */
+void sanitize_string(uint8_t *s, size_t len)
+{
+ ssize_t i;
+
+ /* Null terminate */
+ s[len - 1] = 0;
+
+ /* Remove non printable chars */
+ for (i = 0; i < len - 1; i++) {
+ if (s[i] < ' ' || s[i] > 127)
+ s[i] = '?';
+ }
+
+ /* Remove trailing spaces */
+ strim(s);
+}
+
+/*
+ * nand_id_has_period - Check if an ID string has a given wraparound period
+ * @id_data: the ID string
+ * @arrlen: the length of the @id_data array
+ * @period: the period of repitition
+ *
+ * Check if an ID string is repeated within a given sequence of bytes at
+ * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
+ * period of 3). This is a helper function for nand_id_len(). Returns non-zero
+ * if the repetition has a period of @period; otherwise, returns zero.
+ */
+static int nand_id_has_period(u8 *id_data, int arrlen, int period)
+{
+ int i, j;
+ for (i = 0; i < period; i++)
+ for (j = i + period; j < arrlen; j += period)
+ if (id_data[i] != id_data[j])
+ return 0;
+ return 1;
+}
+
+/*
+ * nand_id_len - Get the length of an ID string returned by CMD_READID
+ * @id_data: the ID string
+ * @arrlen: the length of the @id_data array
+
+ * Returns the length of the ID string, according to known wraparound/trailing
+ * zero patterns. If no pattern exists, returns the length of the array.
+ */
+static int nand_id_len(u8 *id_data, int arrlen)
+{
+ int last_nonzero, period;
+
+ /* Find last non-zero byte */
+ for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
+ if (id_data[last_nonzero])
+ break;
+
+ /* All zeros */
+ if (last_nonzero < 0)
+ return 0;
+
+ /* Calculate wraparound period */
+ for (period = 1; period < arrlen; period++)
+ if (nand_id_has_period(id_data, arrlen, period))
+ break;
+
+ /* There's a repeated pattern */
+ if (period < arrlen)
+ return period;
+
+ /* There are trailing zeros */
+ if (last_nonzero < arrlen - 1)
+ return last_nonzero + 1;
+
+ /* No pattern detected */
+ return arrlen;
+}
+
+/* Extract the bits of per cell from the 3rd byte of the extended ID */
+static int nand_get_bits_per_cell(u8 cellinfo)
+{
+ int bits;
+
+ bits = cellinfo & NAND_CI_CELLTYPE_MSK;
+ bits >>= NAND_CI_CELLTYPE_SHIFT;
+ return bits + 1;
+}
+
+/*
+ * Many new NAND share similar device ID codes, which represent the size of the
+ * chip. The rest of the parameters must be decoded according to generic or
+ * manufacturer-specific "extended ID" decoding patterns.
+ */
+void nand_decode_ext_id(struct nand_chip *chip)
+{
+ struct nand_memory_organization *memorg;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int extid;
+ u8 *id_data = chip->id.data;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ /* The 3rd id byte holds MLC / multichip data */
+ memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
+ /* The 4th id byte is the important one */
+ extid = id_data[3];
+
+ /* Calc pagesize */
+ memorg->pagesize = 1024 << (extid & 0x03);
+ mtd->writesize = memorg->pagesize;
+ extid >>= 2;
+ /* Calc oobsize */
+ memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
+ mtd->oobsize = memorg->oobsize;
+ extid >>= 2;
+ /* Calc blocksize. Blocksize is multiples of 64KiB */
+ memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) /
+ memorg->pagesize;
+ mtd->erasesize = (64 * 1024) << (extid & 0x03);
+ extid >>= 2;
+ /* Get buswidth information */
+ if (extid & 0x1)
+ chip->options |= NAND_BUSWIDTH_16;
+}
+EXPORT_SYMBOL_GPL(nand_decode_ext_id);
+
+/*
+ * Old devices have chip data hardcoded in the device ID table. nand_decode_id
+ * decodes a matching ID table entry and assigns the MTD size parameters for
+ * the chip.
+ */
+static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ memorg->pages_per_eraseblock = type->erasesize / type->pagesize;
+ mtd->erasesize = type->erasesize;
+ memorg->pagesize = type->pagesize;
+ mtd->writesize = memorg->pagesize;
+ memorg->oobsize = memorg->pagesize / 32;
+ mtd->oobsize = memorg->oobsize;
+
+ /* All legacy ID NAND are small-page, SLC */
+ memorg->bits_per_cell = 1;
+}
+
+/*
+ * Set the bad block marker/indicator (BBM/BBI) patterns according to some
+ * heuristic patterns using various detected parameters (e.g., manufacturer,
+ * page size, cell-type information).
+ */
+static void nand_decode_bbm_options(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /* Set the bad block position */
+ if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
+ chip->badblockpos = NAND_BBM_POS_LARGE;
+ else
+ chip->badblockpos = NAND_BBM_POS_SMALL;
+}
+
+static inline bool is_full_id_nand(struct nand_flash_dev *type)
+{
+ return type->id_len;
+}
+
+static bool find_full_id_nand(struct nand_chip *chip,
+ struct nand_flash_dev *type)
+{
+ struct nand_device *base = &chip->base;
+ struct nand_ecc_props requirements;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+ u8 *id_data = chip->id.data;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ if (!strncmp(type->id, id_data, type->id_len)) {
+ memorg->pagesize = type->pagesize;
+ mtd->writesize = memorg->pagesize;
+ memorg->pages_per_eraseblock = type->erasesize /
+ type->pagesize;
+ mtd->erasesize = type->erasesize;
+ memorg->oobsize = type->oobsize;
+ mtd->oobsize = memorg->oobsize;
+
+ memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
+ memorg->eraseblocks_per_lun =
+ DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
+ memorg->pagesize *
+ memorg->pages_per_eraseblock);
+ chip->options |= type->options;
+ requirements.strength = NAND_ECC_STRENGTH(type);
+ requirements.step_size = NAND_ECC_STEP(type);
+ nanddev_set_ecc_requirements(base, &requirements);
+
+ chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
+ if (!chip->parameters.model)
+ return false;
+
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
+ * compliant and does not have a full-id or legacy-id entry in the nand_ids
+ * table.
+ */
+static void nand_manufacturer_detect(struct nand_chip *chip)
+{
+ /*
+ * Try manufacturer detection if available and use
+ * nand_decode_ext_id() otherwise.
+ */
+ if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
+ chip->manufacturer.desc->ops->detect) {
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ /* The 3rd id byte holds MLC / multichip data */
+ memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
+ chip->manufacturer.desc->ops->detect(chip);
+ } else {
+ nand_decode_ext_id(chip);
+ }
+}
+
+/*
+ * Manufacturer initialization. This function is called for all NANDs including
+ * ONFI and JEDEC compliant ones.
+ * Manufacturer drivers should put all their specific initialization code in
+ * their ->init() hook.
+ */
+static int nand_manufacturer_init(struct nand_chip *chip)
+{
+ if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
+ !chip->manufacturer.desc->ops->init)
+ return 0;
+
+ return chip->manufacturer.desc->ops->init(chip);
+}
+
+/*
+ * Manufacturer cleanup. This function is called for all NANDs including
+ * ONFI and JEDEC compliant ones.
+ * Manufacturer drivers should put all their specific cleanup code in their
+ * ->cleanup() hook.
+ */
+static void nand_manufacturer_cleanup(struct nand_chip *chip)
+{
+ /* Release manufacturer private data */
+ if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
+ chip->manufacturer.desc->ops->cleanup)
+ chip->manufacturer.desc->ops->cleanup(chip);
+}
+
+static const char *
+nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc)
+{
+ return manufacturer_desc ? manufacturer_desc->name : "Unknown";
+}
+
+static void rawnand_check_data_only_read_support(struct nand_chip *chip)
+{
+ /* Use an arbitrary size for the check */
+ if (!nand_read_data_op(chip, NULL, SZ_512, true, true))
+ chip->controller->supported_op.data_only_read = 1;
+}
+
+static void rawnand_early_check_supported_ops(struct nand_chip *chip)
+{
+ /* The supported_op fields should not be set by individual drivers */
+ WARN_ON_ONCE(chip->controller->supported_op.data_only_read);
+
+ if (!nand_has_exec_op(chip))
+ return;
+
+ rawnand_check_data_only_read_support(chip);
+}
+
+static void rawnand_check_cont_read_support(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (!chip->parameters.supports_read_cache)
+ return;
+
+ if (chip->read_retries)
+ return;
+
+ if (!nand_lp_exec_cont_read_page_op(chip, 0, 0, NULL,
+ mtd->writesize, true))
+ chip->controller->supported_op.cont_read = 1;
+}
+
+static void rawnand_late_check_supported_ops(struct nand_chip *chip)
+{
+ /* The supported_op fields should not be set by individual drivers */
+ WARN_ON_ONCE(chip->controller->supported_op.cont_read);
+
+ /*
+ * Too many devices do not support sequential cached reads with on-die
+ * ECC correction enabled, so in this case refuse to perform the
+ * automation.
+ */
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE)
+ return;
+
+ if (!nand_has_exec_op(chip))
+ return;
+
+ rawnand_check_cont_read_support(chip);
+}
+
+/*
+ * Get the flash and manufacturer id and lookup if the type is supported.
+ */
+static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
+{
+ const struct nand_manufacturer_desc *manufacturer_desc;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+ int busw, ret;
+ u8 *id_data = chip->id.data;
+ u8 maf_id, dev_id;
+ u64 targetsize;
+
+ /*
+ * Let's start by initializing memorg fields that might be left
+ * unassigned by the ID-based detection logic.
+ */
+ memorg = nanddev_get_memorg(&chip->base);
+ memorg->planes_per_lun = 1;
+ memorg->luns_per_target = 1;
+
+ /*
+ * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
+ * after power-up.
+ */
+ ret = nand_reset(chip, 0);
+ if (ret)
+ return ret;
+
+ /* Select the device */
+ nand_select_target(chip, 0);
+
+ rawnand_early_check_supported_ops(chip);
+
+ /* Send the command for reading device ID */
+ ret = nand_readid_op(chip, 0, id_data, 2);
+ if (ret)
+ return ret;
+
+ /* Read manufacturer and device IDs */
+ maf_id = id_data[0];
+ dev_id = id_data[1];
+
+ /*
+ * Try again to make sure, as some systems the bus-hold or other
+ * interface concerns can cause random data which looks like a
+ * possibly credible NAND flash to appear. If the two results do
+ * not match, ignore the device completely.
+ */
+
+ /* Read entire ID string */
+ ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
+ if (ret)
+ return ret;
+
+ if (id_data[0] != maf_id || id_data[1] != dev_id) {
+ pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
+ maf_id, dev_id, id_data[0], id_data[1]);
+ return -ENODEV;
+ }
+
+ chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
+
+ /* Try to identify manufacturer */
+ manufacturer_desc = nand_get_manufacturer_desc(maf_id);
+ chip->manufacturer.desc = manufacturer_desc;
+
+ if (!type)
+ type = nand_flash_ids;
+
+ /*
+ * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
+ * override it.
+ * This is required to make sure initial NAND bus width set by the
+ * NAND controller driver is coherent with the real NAND bus width
+ * (extracted by auto-detection code).
+ */
+ busw = chip->options & NAND_BUSWIDTH_16;
+
+ /*
+ * The flag is only set (never cleared), reset it to its default value
+ * before starting auto-detection.
+ */
+ chip->options &= ~NAND_BUSWIDTH_16;
+
+ for (; type->name != NULL; type++) {
+ if (is_full_id_nand(type)) {
+ if (find_full_id_nand(chip, type))
+ goto ident_done;
+ } else if (dev_id == type->dev_id) {
+ break;
+ }
+ }
+
+ if (!type->name || !type->pagesize) {
+ /* Check if the chip is ONFI compliant */
+ ret = nand_onfi_detect(chip);
+ if (ret < 0)
+ return ret;
+ else if (ret)
+ goto ident_done;
+
+ /* Check if the chip is JEDEC compliant */
+ ret = nand_jedec_detect(chip);
+ if (ret < 0)
+ return ret;
+ else if (ret)
+ goto ident_done;
+ }
+
+ if (!type->name)
+ return -ENODEV;
+
+ chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
+ if (!chip->parameters.model)
+ return -ENOMEM;
+
+ if (!type->pagesize)
+ nand_manufacturer_detect(chip);
+ else
+ nand_decode_id(chip, type);
+
+ /* Get chip options */
+ chip->options |= type->options;
+
+ memorg->eraseblocks_per_lun =
+ DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
+ memorg->pagesize *
+ memorg->pages_per_eraseblock);
+
+ident_done:
+ if (!mtd->name)
+ mtd->name = chip->parameters.model;
+
+ if (chip->options & NAND_BUSWIDTH_AUTO) {
+ WARN_ON(busw & NAND_BUSWIDTH_16);
+ nand_set_defaults(chip);
+ } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
+ /*
+ * Check, if buswidth is correct. Hardware drivers should set
+ * chip correct!
+ */
+ pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
+ maf_id, dev_id);
+ pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
+ mtd->name);
+ pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
+ (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
+ ret = -EINVAL;
+
+ goto free_detect_allocation;
+ }
+
+ nand_decode_bbm_options(chip);
+
+ /* Calculate the address shift from the page size */
+ chip->page_shift = ffs(mtd->writesize) - 1;
+ /* Convert chipsize to number of pages per chip -1 */
+ targetsize = nanddev_target_size(&chip->base);
+ chip->pagemask = (targetsize >> chip->page_shift) - 1;
+
+ chip->bbt_erase_shift = chip->phys_erase_shift =
+ ffs(mtd->erasesize) - 1;
+ if (targetsize & 0xffffffff)
+ chip->chip_shift = ffs((unsigned)targetsize) - 1;
+ else {
+ chip->chip_shift = ffs((unsigned)(targetsize >> 32));
+ chip->chip_shift += 32 - 1;
+ }
+
+ if (chip->chip_shift - chip->page_shift > 16)
+ chip->options |= NAND_ROW_ADDR_3;
+
+ chip->badblockbits = 8;
+
+ nand_legacy_adjust_cmdfunc(chip);
+
+ pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
+ maf_id, dev_id);
+ pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
+ chip->parameters.model);
+ pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
+ (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
+ mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
+ return 0;
+
+free_detect_allocation:
+ kfree(chip->parameters.model);
+
+ return ret;
+}
+
+static enum nand_ecc_engine_type
+of_get_rawnand_ecc_engine_type_legacy(struct device_node *np)
+{
+ enum nand_ecc_legacy_mode {
+ NAND_ECC_INVALID,
+ NAND_ECC_NONE,
+ NAND_ECC_SOFT,
+ NAND_ECC_SOFT_BCH,
+ NAND_ECC_HW,
+ NAND_ECC_HW_SYNDROME,
+ NAND_ECC_ON_DIE,
+ };
+ const char * const nand_ecc_legacy_modes[] = {
+ [NAND_ECC_NONE] = "none",
+ [NAND_ECC_SOFT] = "soft",
+ [NAND_ECC_SOFT_BCH] = "soft_bch",
+ [NAND_ECC_HW] = "hw",
+ [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
+ [NAND_ECC_ON_DIE] = "on-die",
+ };
+ enum nand_ecc_legacy_mode eng_type;
+ const char *pm;
+ int err;
+
+ err = of_property_read_string(np, "nand-ecc-mode", &pm);
+ if (err)
+ return NAND_ECC_ENGINE_TYPE_INVALID;
+
+ for (eng_type = NAND_ECC_NONE;
+ eng_type < ARRAY_SIZE(nand_ecc_legacy_modes); eng_type++) {
+ if (!strcasecmp(pm, nand_ecc_legacy_modes[eng_type])) {
+ switch (eng_type) {
+ case NAND_ECC_NONE:
+ return NAND_ECC_ENGINE_TYPE_NONE;
+ case NAND_ECC_SOFT:
+ case NAND_ECC_SOFT_BCH:
+ return NAND_ECC_ENGINE_TYPE_SOFT;
+ case NAND_ECC_HW:
+ case NAND_ECC_HW_SYNDROME:
+ return NAND_ECC_ENGINE_TYPE_ON_HOST;
+ case NAND_ECC_ON_DIE:
+ return NAND_ECC_ENGINE_TYPE_ON_DIE;
+ default:
+ break;
+ }
+ }
+ }
+
+ return NAND_ECC_ENGINE_TYPE_INVALID;
+}
+
+static enum nand_ecc_placement
+of_get_rawnand_ecc_placement_legacy(struct device_node *np)
+{
+ const char *pm;
+ int err;
+
+ err = of_property_read_string(np, "nand-ecc-mode", &pm);
+ if (!err) {
+ if (!strcasecmp(pm, "hw_syndrome"))
+ return NAND_ECC_PLACEMENT_INTERLEAVED;
+ }
+
+ return NAND_ECC_PLACEMENT_UNKNOWN;
+}
+
+static enum nand_ecc_algo of_get_rawnand_ecc_algo_legacy(struct device_node *np)
+{
+ const char *pm;
+ int err;
+
+ err = of_property_read_string(np, "nand-ecc-mode", &pm);
+ if (!err) {
+ if (!strcasecmp(pm, "soft"))
+ return NAND_ECC_ALGO_HAMMING;
+ else if (!strcasecmp(pm, "soft_bch"))
+ return NAND_ECC_ALGO_BCH;
+ }
+
+ return NAND_ECC_ALGO_UNKNOWN;
+}
+
+static void of_get_nand_ecc_legacy_user_config(struct nand_chip *chip)
+{
+ struct device_node *dn = nand_get_flash_node(chip);
+ struct nand_ecc_props *user_conf = &chip->base.ecc.user_conf;
+
+ if (user_conf->engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
+ user_conf->engine_type = of_get_rawnand_ecc_engine_type_legacy(dn);
+
+ if (user_conf->algo == NAND_ECC_ALGO_UNKNOWN)
+ user_conf->algo = of_get_rawnand_ecc_algo_legacy(dn);
+
+ if (user_conf->placement == NAND_ECC_PLACEMENT_UNKNOWN)
+ user_conf->placement = of_get_rawnand_ecc_placement_legacy(dn);
+}
+
+static int of_get_nand_bus_width(struct nand_chip *chip)
+{
+ struct device_node *dn = nand_get_flash_node(chip);
+ u32 val;
+ int ret;
+
+ ret = of_property_read_u32(dn, "nand-bus-width", &val);
+ if (ret == -EINVAL)
+ /* Buswidth defaults to 8 if the property does not exist .*/
+ return 0;
+ else if (ret)
+ return ret;
+
+ if (val == 16)
+ chip->options |= NAND_BUSWIDTH_16;
+ else if (val != 8)
+ return -EINVAL;
+ return 0;
+}
+
+static int of_get_nand_secure_regions(struct nand_chip *chip)
+{
+ struct device_node *dn = nand_get_flash_node(chip);
+ struct property *prop;
+ int nr_elem, i, j;
+
+ /* Only proceed if the "secure-regions" property is present in DT */
+ prop = of_find_property(dn, "secure-regions", NULL);
+ if (!prop)
+ return 0;
+
+ nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64));
+ if (nr_elem <= 0)
+ return nr_elem;
+
+ chip->nr_secure_regions = nr_elem / 2;
+ chip->secure_regions = kcalloc(chip->nr_secure_regions, sizeof(*chip->secure_regions),
+ GFP_KERNEL);
+ if (!chip->secure_regions)
+ return -ENOMEM;
+
+ for (i = 0, j = 0; i < chip->nr_secure_regions; i++, j += 2) {
+ of_property_read_u64_index(dn, "secure-regions", j,
+ &chip->secure_regions[i].offset);
+ of_property_read_u64_index(dn, "secure-regions", j + 1,
+ &chip->secure_regions[i].size);
+ }
+
+ return 0;
+}
+
+/**
+ * rawnand_dt_parse_gpio_cs - Parse the gpio-cs property of a controller
+ * @dev: Device that will be parsed. Also used for managed allocations.
+ * @cs_array: Array of GPIO desc pointers allocated on success
+ * @ncs_array: Number of entries in @cs_array updated on success.
+ * @return 0 on success, an error otherwise.
+ */
+int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array,
+ unsigned int *ncs_array)
+{
+ struct gpio_desc **descs;
+ int ndescs, i;
+
+ ndescs = gpiod_count(dev, "cs");
+ if (ndescs < 0) {
+ dev_dbg(dev, "No valid cs-gpios property\n");
+ return 0;
+ }
+
+ descs = devm_kcalloc(dev, ndescs, sizeof(*descs), GFP_KERNEL);
+ if (!descs)
+ return -ENOMEM;
+
+ for (i = 0; i < ndescs; i++) {
+ descs[i] = gpiod_get_index_optional(dev, "cs", i,
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(descs[i]))
+ return PTR_ERR(descs[i]);
+ }
+
+ *ncs_array = ndescs;
+ *cs_array = descs;
+
+ return 0;
+}
+EXPORT_SYMBOL(rawnand_dt_parse_gpio_cs);
+
+static int rawnand_dt_init(struct nand_chip *chip)
+{
+ struct nand_device *nand = mtd_to_nanddev(nand_to_mtd(chip));
+ struct device_node *dn = nand_get_flash_node(chip);
+ int ret;
+
+ if (!dn)
+ return 0;
+
+ ret = of_get_nand_bus_width(chip);
+ if (ret)
+ return ret;
+
+ if (of_property_read_bool(dn, "nand-is-boot-medium"))
+ chip->options |= NAND_IS_BOOT_MEDIUM;
+
+ if (of_property_read_bool(dn, "nand-on-flash-bbt"))
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+ of_get_nand_ecc_user_config(nand);
+ of_get_nand_ecc_legacy_user_config(chip);
+
+ /*
+ * If neither the user nor the NAND controller have requested a specific
+ * ECC engine type, we will default to NAND_ECC_ENGINE_TYPE_ON_HOST.
+ */
+ nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+
+ /*
+ * Use the user requested engine type, unless there is none, in this
+ * case default to the NAND controller choice, otherwise fallback to
+ * the raw NAND default one.
+ */
+ if (nand->ecc.user_conf.engine_type != NAND_ECC_ENGINE_TYPE_INVALID)
+ chip->ecc.engine_type = nand->ecc.user_conf.engine_type;
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
+ chip->ecc.engine_type = nand->ecc.defaults.engine_type;
+
+ chip->ecc.placement = nand->ecc.user_conf.placement;
+ chip->ecc.algo = nand->ecc.user_conf.algo;
+ chip->ecc.strength = nand->ecc.user_conf.strength;
+ chip->ecc.size = nand->ecc.user_conf.step_size;
+
+ return 0;
+}
+
+/**
+ * nand_scan_ident - Scan for the NAND device
+ * @chip: NAND chip object
+ * @maxchips: number of chips to scan for
+ * @table: alternative NAND ID table
+ *
+ * This is the first phase of the normal nand_scan() function. It reads the
+ * flash ID and sets up MTD fields accordingly.
+ *
+ * This helper used to be called directly from controller drivers that needed
+ * to tweak some ECC-related parameters before nand_scan_tail(). This separation
+ * prevented dynamic allocations during this phase which was unconvenient and
+ * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks.
+ */
+static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
+ struct nand_flash_dev *table)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+ int nand_maf_id, nand_dev_id;
+ unsigned int i;
+ int ret;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ /* Assume all dies are deselected when we enter nand_scan_ident(). */
+ chip->cur_cs = -1;
+
+ mutex_init(&chip->lock);
+ init_waitqueue_head(&chip->resume_wq);
+
+ /* Enforce the right timings for reset/detection */
+ chip->current_interface_config = nand_get_reset_interface_config();
+
+ ret = rawnand_dt_init(chip);
+ if (ret)
+ return ret;
+
+ if (!mtd->name && mtd->dev.parent)
+ mtd->name = dev_name(mtd->dev.parent);
+
+ /* Set the default functions */
+ nand_set_defaults(chip);
+
+ ret = nand_legacy_check_hooks(chip);
+ if (ret)
+ return ret;
+
+ memorg->ntargets = maxchips;
+
+ /* Read the flash type */
+ ret = nand_detect(chip, table);
+ if (ret) {
+ if (!(chip->options & NAND_SCAN_SILENT_NODEV))
+ pr_warn("No NAND device found\n");
+ nand_deselect_target(chip);
+ return ret;
+ }
+
+ nand_maf_id = chip->id.data[0];
+ nand_dev_id = chip->id.data[1];
+
+ nand_deselect_target(chip);
+
+ /* Check for a chip array */
+ for (i = 1; i < maxchips; i++) {
+ u8 id[2];
+
+ /* See comment in nand_get_flash_type for reset */
+ ret = nand_reset(chip, i);
+ if (ret)
+ break;
+
+ nand_select_target(chip, i);
+ /* Send the command for reading device ID */
+ ret = nand_readid_op(chip, 0, id, sizeof(id));
+ if (ret)
+ break;
+ /* Read manufacturer and device IDs */
+ if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
+ nand_deselect_target(chip);
+ break;
+ }
+ nand_deselect_target(chip);
+ }
+ if (i > 1)
+ pr_info("%d chips detected\n", i);
+
+ /* Store the number of chips and calc total size for mtd */
+ memorg->ntargets = i;
+ mtd->size = i * nanddev_target_size(&chip->base);
+
+ return 0;
+}
+
+static void nand_scan_ident_cleanup(struct nand_chip *chip)
+{
+ kfree(chip->parameters.model);
+ kfree(chip->parameters.onfi);
+}
+
+int rawnand_sw_hamming_init(struct nand_chip *chip)
+{
+ struct nand_ecc_sw_hamming_conf *engine_conf;
+ struct nand_device *base = &chip->base;
+ int ret;
+
+ base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ base->ecc.user_conf.algo = NAND_ECC_ALGO_HAMMING;
+ base->ecc.user_conf.strength = chip->ecc.strength;
+ base->ecc.user_conf.step_size = chip->ecc.size;
+
+ ret = nand_ecc_sw_hamming_init_ctx(base);
+ if (ret)
+ return ret;
+
+ engine_conf = base->ecc.ctx.priv;
+
+ if (chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER)
+ engine_conf->sm_order = true;
+
+ chip->ecc.size = base->ecc.ctx.conf.step_size;
+ chip->ecc.strength = base->ecc.ctx.conf.strength;
+ chip->ecc.total = base->ecc.ctx.total;
+ chip->ecc.steps = nanddev_get_ecc_nsteps(base);
+ chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base);
+
+ return 0;
+}
+EXPORT_SYMBOL(rawnand_sw_hamming_init);
+
+int rawnand_sw_hamming_calculate(struct nand_chip *chip,
+ const unsigned char *buf,
+ unsigned char *code)
+{
+ struct nand_device *base = &chip->base;
+
+ return nand_ecc_sw_hamming_calculate(base, buf, code);
+}
+EXPORT_SYMBOL(rawnand_sw_hamming_calculate);
+
+int rawnand_sw_hamming_correct(struct nand_chip *chip,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ struct nand_device *base = &chip->base;
+
+ return nand_ecc_sw_hamming_correct(base, buf, read_ecc, calc_ecc);
+}
+EXPORT_SYMBOL(rawnand_sw_hamming_correct);
+
+void rawnand_sw_hamming_cleanup(struct nand_chip *chip)
+{
+ struct nand_device *base = &chip->base;
+
+ nand_ecc_sw_hamming_cleanup_ctx(base);
+}
+EXPORT_SYMBOL(rawnand_sw_hamming_cleanup);
+
+int rawnand_sw_bch_init(struct nand_chip *chip)
+{
+ struct nand_device *base = &chip->base;
+ const struct nand_ecc_props *ecc_conf = nanddev_get_ecc_conf(base);
+ int ret;
+
+ base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ base->ecc.user_conf.algo = NAND_ECC_ALGO_BCH;
+ base->ecc.user_conf.step_size = chip->ecc.size;
+ base->ecc.user_conf.strength = chip->ecc.strength;
+
+ ret = nand_ecc_sw_bch_init_ctx(base);
+ if (ret)
+ return ret;
+
+ chip->ecc.size = ecc_conf->step_size;
+ chip->ecc.strength = ecc_conf->strength;
+ chip->ecc.total = base->ecc.ctx.total;
+ chip->ecc.steps = nanddev_get_ecc_nsteps(base);
+ chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base);
+
+ return 0;
+}
+EXPORT_SYMBOL(rawnand_sw_bch_init);
+
+static int rawnand_sw_bch_calculate(struct nand_chip *chip,
+ const unsigned char *buf,
+ unsigned char *code)
+{
+ struct nand_device *base = &chip->base;
+
+ return nand_ecc_sw_bch_calculate(base, buf, code);
+}
+
+int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+ struct nand_device *base = &chip->base;
+
+ return nand_ecc_sw_bch_correct(base, buf, read_ecc, calc_ecc);
+}
+EXPORT_SYMBOL(rawnand_sw_bch_correct);
+
+void rawnand_sw_bch_cleanup(struct nand_chip *chip)
+{
+ struct nand_device *base = &chip->base;
+
+ nand_ecc_sw_bch_cleanup_ctx(base);
+}
+EXPORT_SYMBOL(rawnand_sw_bch_cleanup);
+
+static int nand_set_ecc_on_host_ops(struct nand_chip *chip)
+{
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ switch (ecc->placement) {
+ case NAND_ECC_PLACEMENT_UNKNOWN:
+ case NAND_ECC_PLACEMENT_OOB:
+ /* Use standard hwecc read page function? */
+ if (!ecc->read_page)
+ ecc->read_page = nand_read_page_hwecc;
+ if (!ecc->write_page)
+ ecc->write_page = nand_write_page_hwecc;
+ if (!ecc->read_page_raw)
+ ecc->read_page_raw = nand_read_page_raw;
+ if (!ecc->write_page_raw)
+ ecc->write_page_raw = nand_write_page_raw;
+ if (!ecc->read_oob)
+ ecc->read_oob = nand_read_oob_std;
+ if (!ecc->write_oob)
+ ecc->write_oob = nand_write_oob_std;
+ if (!ecc->read_subpage)
+ ecc->read_subpage = nand_read_subpage;
+ if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
+ ecc->write_subpage = nand_write_subpage_hwecc;
+ fallthrough;
+
+ case NAND_ECC_PLACEMENT_INTERLEAVED:
+ if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
+ (!ecc->read_page ||
+ ecc->read_page == nand_read_page_hwecc ||
+ !ecc->write_page ||
+ ecc->write_page == nand_write_page_hwecc)) {
+ WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
+ return -EINVAL;
+ }
+ /* Use standard syndrome read/write page function? */
+ if (!ecc->read_page)
+ ecc->read_page = nand_read_page_syndrome;
+ if (!ecc->write_page)
+ ecc->write_page = nand_write_page_syndrome;
+ if (!ecc->read_page_raw)
+ ecc->read_page_raw = nand_read_page_raw_syndrome;
+ if (!ecc->write_page_raw)
+ ecc->write_page_raw = nand_write_page_raw_syndrome;
+ if (!ecc->read_oob)
+ ecc->read_oob = nand_read_oob_syndrome;
+ if (!ecc->write_oob)
+ ecc->write_oob = nand_write_oob_syndrome;
+ break;
+
+ default:
+ pr_warn("Invalid NAND_ECC_PLACEMENT %d\n",
+ ecc->placement);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int nand_set_ecc_soft_ops(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_device *nanddev = mtd_to_nanddev(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int ret;
+
+ if (WARN_ON(ecc->engine_type != NAND_ECC_ENGINE_TYPE_SOFT))
+ return -EINVAL;
+
+ switch (ecc->algo) {
+ case NAND_ECC_ALGO_HAMMING:
+ ecc->calculate = rawnand_sw_hamming_calculate;
+ ecc->correct = rawnand_sw_hamming_correct;
+ ecc->read_page = nand_read_page_swecc;
+ ecc->read_subpage = nand_read_subpage;
+ ecc->write_page = nand_write_page_swecc;
+ if (!ecc->read_page_raw)
+ ecc->read_page_raw = nand_read_page_raw;
+ if (!ecc->write_page_raw)
+ ecc->write_page_raw = nand_write_page_raw;
+ ecc->read_oob = nand_read_oob_std;
+ ecc->write_oob = nand_write_oob_std;
+ if (!ecc->size)
+ ecc->size = 256;
+ ecc->bytes = 3;
+ ecc->strength = 1;
+
+ if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC))
+ ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
+
+ ret = rawnand_sw_hamming_init(chip);
+ if (ret) {
+ WARN(1, "Hamming ECC initialization failed!\n");
+ return ret;
+ }
+
+ return 0;
+ case NAND_ECC_ALGO_BCH:
+ if (!IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) {
+ WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
+ return -EINVAL;
+ }
+ ecc->calculate = rawnand_sw_bch_calculate;
+ ecc->correct = rawnand_sw_bch_correct;
+ ecc->read_page = nand_read_page_swecc;
+ ecc->read_subpage = nand_read_subpage;
+ ecc->write_page = nand_write_page_swecc;
+ if (!ecc->read_page_raw)
+ ecc->read_page_raw = nand_read_page_raw;
+ if (!ecc->write_page_raw)
+ ecc->write_page_raw = nand_write_page_raw;
+ ecc->read_oob = nand_read_oob_std;
+ ecc->write_oob = nand_write_oob_std;
+
+ /*
+ * We can only maximize ECC config when the default layout is
+ * used, otherwise we don't know how many bytes can really be
+ * used.
+ */
+ if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH &&
+ mtd->ooblayout != nand_get_large_page_ooblayout())
+ nanddev->ecc.user_conf.flags &= ~NAND_ECC_MAXIMIZE_STRENGTH;
+
+ ret = rawnand_sw_bch_init(chip);
+ if (ret) {
+ WARN(1, "BCH ECC initialization failed!\n");
+ return ret;
+ }
+
+ return 0;
+ default:
+ WARN(1, "Unsupported ECC algorithm!\n");
+ return -EINVAL;
+ }
+}
+
+/**
+ * nand_check_ecc_caps - check the sanity of preset ECC settings
+ * @chip: nand chip info structure
+ * @caps: ECC caps info structure
+ * @oobavail: OOB size that the ECC engine can use
+ *
+ * When ECC step size and strength are already set, check if they are supported
+ * by the controller and the calculated ECC bytes fit within the chip's OOB.
+ * On success, the calculated ECC bytes is set.
+ */
+static int
+nand_check_ecc_caps(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_ecc_step_info *stepinfo;
+ int preset_step = chip->ecc.size;
+ int preset_strength = chip->ecc.strength;
+ int ecc_bytes, nsteps = mtd->writesize / preset_step;
+ int i, j;
+
+ for (i = 0; i < caps->nstepinfos; i++) {
+ stepinfo = &caps->stepinfos[i];
+
+ if (stepinfo->stepsize != preset_step)
+ continue;
+
+ for (j = 0; j < stepinfo->nstrengths; j++) {
+ if (stepinfo->strengths[j] != preset_strength)
+ continue;
+
+ ecc_bytes = caps->calc_ecc_bytes(preset_step,
+ preset_strength);
+ if (WARN_ON_ONCE(ecc_bytes < 0))
+ return ecc_bytes;
+
+ if (ecc_bytes * nsteps > oobavail) {
+ pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
+ preset_step, preset_strength);
+ return -ENOSPC;
+ }
+
+ chip->ecc.bytes = ecc_bytes;
+
+ return 0;
+ }
+ }
+
+ pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
+ preset_step, preset_strength);
+
+ return -ENOTSUPP;
+}
+
+/**
+ * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
+ * @chip: nand chip info structure
+ * @caps: ECC engine caps info structure
+ * @oobavail: OOB size that the ECC engine can use
+ *
+ * If a chip's ECC requirement is provided, try to meet it with the least
+ * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
+ * On success, the chosen ECC settings are set.
+ */
+static int
+nand_match_ecc_req(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail)
+{
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&chip->base);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_ecc_step_info *stepinfo;
+ int req_step = requirements->step_size;
+ int req_strength = requirements->strength;
+ int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
+ int best_step = 0, best_strength = 0, best_ecc_bytes = 0;
+ int best_ecc_bytes_total = INT_MAX;
+ int i, j;
+
+ /* No information provided by the NAND chip */
+ if (!req_step || !req_strength)
+ return -ENOTSUPP;
+
+ /* number of correctable bits the chip requires in a page */
+ req_corr = mtd->writesize / req_step * req_strength;
+
+ for (i = 0; i < caps->nstepinfos; i++) {
+ stepinfo = &caps->stepinfos[i];
+ step_size = stepinfo->stepsize;
+
+ for (j = 0; j < stepinfo->nstrengths; j++) {
+ strength = stepinfo->strengths[j];
+
+ /*
+ * If both step size and strength are smaller than the
+ * chip's requirement, it is not easy to compare the
+ * resulted reliability.
+ */
+ if (step_size < req_step && strength < req_strength)
+ continue;
+
+ if (mtd->writesize % step_size)
+ continue;
+
+ nsteps = mtd->writesize / step_size;
+
+ ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
+ if (WARN_ON_ONCE(ecc_bytes < 0))
+ continue;
+ ecc_bytes_total = ecc_bytes * nsteps;
+
+ if (ecc_bytes_total > oobavail ||
+ strength * nsteps < req_corr)
+ continue;
+
+ /*
+ * We assume the best is to meet the chip's requrement
+ * with the least number of ECC bytes.
+ */
+ if (ecc_bytes_total < best_ecc_bytes_total) {
+ best_ecc_bytes_total = ecc_bytes_total;
+ best_step = step_size;
+ best_strength = strength;
+ best_ecc_bytes = ecc_bytes;
+ }
+ }
+ }
+
+ if (best_ecc_bytes_total == INT_MAX)
+ return -ENOTSUPP;
+
+ chip->ecc.size = best_step;
+ chip->ecc.strength = best_strength;
+ chip->ecc.bytes = best_ecc_bytes;
+
+ return 0;
+}
+
+/**
+ * nand_maximize_ecc - choose the max ECC strength available
+ * @chip: nand chip info structure
+ * @caps: ECC engine caps info structure
+ * @oobavail: OOB size that the ECC engine can use
+ *
+ * Choose the max ECC strength that is supported on the controller, and can fit
+ * within the chip's OOB. On success, the chosen ECC settings are set.
+ */
+static int
+nand_maximize_ecc(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_ecc_step_info *stepinfo;
+ int step_size, strength, nsteps, ecc_bytes, corr;
+ int best_corr = 0;
+ int best_step = 0;
+ int best_strength = 0, best_ecc_bytes = 0;
+ int i, j;
+
+ for (i = 0; i < caps->nstepinfos; i++) {
+ stepinfo = &caps->stepinfos[i];
+ step_size = stepinfo->stepsize;
+
+ /* If chip->ecc.size is already set, respect it */
+ if (chip->ecc.size && step_size != chip->ecc.size)
+ continue;
+
+ for (j = 0; j < stepinfo->nstrengths; j++) {
+ strength = stepinfo->strengths[j];
+
+ if (mtd->writesize % step_size)
+ continue;
+
+ nsteps = mtd->writesize / step_size;
+
+ ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
+ if (WARN_ON_ONCE(ecc_bytes < 0))
+ continue;
+
+ if (ecc_bytes * nsteps > oobavail)
+ continue;
+
+ corr = strength * nsteps;
+
+ /*
+ * If the number of correctable bits is the same,
+ * bigger step_size has more reliability.
+ */
+ if (corr > best_corr ||
+ (corr == best_corr && step_size > best_step)) {
+ best_corr = corr;
+ best_step = step_size;
+ best_strength = strength;
+ best_ecc_bytes = ecc_bytes;
+ }
+ }
+ }
+
+ if (!best_corr)
+ return -ENOTSUPP;
+
+ chip->ecc.size = best_step;
+ chip->ecc.strength = best_strength;
+ chip->ecc.bytes = best_ecc_bytes;
+
+ return 0;
+}
+
+/**
+ * nand_ecc_choose_conf - Set the ECC strength and ECC step size
+ * @chip: nand chip info structure
+ * @caps: ECC engine caps info structure
+ * @oobavail: OOB size that the ECC engine can use
+ *
+ * Choose the ECC configuration according to following logic.
+ *
+ * 1. If both ECC step size and ECC strength are already set (usually by DT)
+ * then check if it is supported by this controller.
+ * 2. If the user provided the nand-ecc-maximize property, then select maximum
+ * ECC strength.
+ * 3. Otherwise, try to match the ECC step size and ECC strength closest
+ * to the chip's requirement. If available OOB size can't fit the chip
+ * requirement then fallback to the maximum ECC step size and ECC strength.
+ *
+ * On success, the chosen ECC settings are set.
+ */
+int nand_ecc_choose_conf(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_device *nanddev = mtd_to_nanddev(mtd);
+
+ if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
+ return -EINVAL;
+
+ if (chip->ecc.size && chip->ecc.strength)
+ return nand_check_ecc_caps(chip, caps, oobavail);
+
+ if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH)
+ return nand_maximize_ecc(chip, caps, oobavail);
+
+ if (!nand_match_ecc_req(chip, caps, oobavail))
+ return 0;
+
+ return nand_maximize_ecc(chip, caps, oobavail);
+}
+EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
+
+static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos)
+{
+ struct nand_chip *chip = container_of(nand, struct nand_chip,
+ base);
+ unsigned int eb = nanddev_pos_to_row(nand, pos);
+ int ret;
+
+ eb >>= nand->rowconv.eraseblock_addr_shift;
+
+ nand_select_target(chip, pos->target);
+ ret = nand_erase_op(chip, eb);
+ nand_deselect_target(chip);
+
+ return ret;
+}
+
+static int rawnand_markbad(struct nand_device *nand,
+ const struct nand_pos *pos)
+{
+ struct nand_chip *chip = container_of(nand, struct nand_chip,
+ base);
+
+ return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
+}
+
+static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos)
+{
+ struct nand_chip *chip = container_of(nand, struct nand_chip,
+ base);
+ int ret;
+
+ nand_select_target(chip, pos->target);
+ ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
+ nand_deselect_target(chip);
+
+ return ret;
+}
+
+static const struct nand_ops rawnand_ops = {
+ .erase = rawnand_erase,
+ .markbad = rawnand_markbad,
+ .isbad = rawnand_isbad,
+};
+
+/**
+ * nand_scan_tail - Scan for the NAND device
+ * @chip: NAND chip object
+ *
+ * This is the second phase of the normal nand_scan() function. It fills out
+ * all the uninitialized function pointers with the defaults and scans for a
+ * bad block table if appropriate.
+ */
+static int nand_scan_tail(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int ret, i;
+
+ /* New bad blocks should be marked in OOB, flash-based BBT, or both */
+ if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
+ !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
+ return -EINVAL;
+ }
+
+ chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
+ if (!chip->data_buf)
+ return -ENOMEM;
+
+ /*
+ * FIXME: some NAND manufacturer drivers expect the first die to be
+ * selected when manufacturer->init() is called. They should be fixed
+ * to explictly select the relevant die when interacting with the NAND
+ * chip.
+ */
+ nand_select_target(chip, 0);
+ ret = nand_manufacturer_init(chip);
+ nand_deselect_target(chip);
+ if (ret)
+ goto err_free_buf;
+
+ /* Set the internal oob buffer location, just after the page data */
+ chip->oob_poi = chip->data_buf + mtd->writesize;
+
+ /*
+ * If no default placement scheme is given, select an appropriate one.
+ */
+ if (!mtd->ooblayout &&
+ !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+ ecc->algo == NAND_ECC_ALGO_BCH) &&
+ !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+ ecc->algo == NAND_ECC_ALGO_HAMMING)) {
+ switch (mtd->oobsize) {
+ case 8:
+ case 16:
+ mtd_set_ooblayout(mtd, nand_get_small_page_ooblayout());
+ break;
+ case 64:
+ case 128:
+ mtd_set_ooblayout(mtd,
+ nand_get_large_page_hamming_ooblayout());
+ break;
+ default:
+ /*
+ * Expose the whole OOB area to users if ECC_NONE
+ * is passed. We could do that for all kind of
+ * ->oobsize, but we must keep the old large/small
+ * page with ECC layout when ->oobsize <= 128 for
+ * compatibility reasons.
+ */
+ if (ecc->engine_type == NAND_ECC_ENGINE_TYPE_NONE) {
+ mtd_set_ooblayout(mtd,
+ nand_get_large_page_ooblayout());
+ break;
+ }
+
+ WARN(1, "No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
+ ret = -EINVAL;
+ goto err_nand_manuf_cleanup;
+ }
+ }
+
+ /*
+ * Check ECC mode, default to software if 3byte/512byte hardware ECC is
+ * selected and we have 256 byte pagesize fallback to software ECC
+ */
+
+ switch (ecc->engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ ret = nand_set_ecc_on_host_ops(chip);
+ if (ret)
+ goto err_nand_manuf_cleanup;
+
+ if (mtd->writesize >= ecc->size) {
+ if (!ecc->strength) {
+ WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
+ ret = -EINVAL;
+ goto err_nand_manuf_cleanup;
+ }
+ break;
+ }
+ pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
+ ecc->size, mtd->writesize);
+ ecc->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ ecc->algo = NAND_ECC_ALGO_HAMMING;
+ fallthrough;
+
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ ret = nand_set_ecc_soft_ops(chip);
+ if (ret)
+ goto err_nand_manuf_cleanup;
+ break;
+
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
+ if (!ecc->read_page || !ecc->write_page) {
+ WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
+ ret = -EINVAL;
+ goto err_nand_manuf_cleanup;
+ }
+ if (!ecc->read_oob)
+ ecc->read_oob = nand_read_oob_std;
+ if (!ecc->write_oob)
+ ecc->write_oob = nand_write_oob_std;
+ break;
+
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ pr_warn("NAND_ECC_ENGINE_TYPE_NONE selected by board driver. This is not recommended!\n");
+ ecc->read_page = nand_read_page_raw;
+ ecc->write_page = nand_write_page_raw;
+ ecc->read_oob = nand_read_oob_std;
+ ecc->read_page_raw = nand_read_page_raw;
+ ecc->write_page_raw = nand_write_page_raw;
+ ecc->write_oob = nand_write_oob_std;
+ ecc->size = mtd->writesize;
+ ecc->bytes = 0;
+ ecc->strength = 0;
+ break;
+
+ default:
+ WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->engine_type);
+ ret = -EINVAL;
+ goto err_nand_manuf_cleanup;
+ }
+
+ if (ecc->correct || ecc->calculate) {
+ ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
+ ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
+ if (!ecc->calc_buf || !ecc->code_buf) {
+ ret = -ENOMEM;
+ goto err_nand_manuf_cleanup;
+ }
+ }
+
+ /* For many systems, the standard OOB write also works for raw */
+ if (!ecc->read_oob_raw)
+ ecc->read_oob_raw = ecc->read_oob;
+ if (!ecc->write_oob_raw)
+ ecc->write_oob_raw = ecc->write_oob;
+
+ /* propagate ecc info to mtd_info */
+ mtd->ecc_strength = ecc->strength;
+ mtd->ecc_step_size = ecc->size;
+
+ /*
+ * Set the number of read / write steps for one page depending on ECC
+ * mode.
+ */
+ if (!ecc->steps)
+ ecc->steps = mtd->writesize / ecc->size;
+ if (ecc->steps * ecc->size != mtd->writesize) {
+ WARN(1, "Invalid ECC parameters\n");
+ ret = -EINVAL;
+ goto err_nand_manuf_cleanup;
+ }
+
+ if (!ecc->total) {
+ ecc->total = ecc->steps * ecc->bytes;
+ chip->base.ecc.ctx.total = ecc->total;
+ }
+
+ if (ecc->total > mtd->oobsize) {
+ WARN(1, "Total number of ECC bytes exceeded oobsize\n");
+ ret = -EINVAL;
+ goto err_nand_manuf_cleanup;
+ }
+
+ /*
+ * The number of bytes available for a client to place data into
+ * the out of band area.
+ */
+ ret = mtd_ooblayout_count_freebytes(mtd);
+ if (ret < 0)
+ ret = 0;
+
+ mtd->oobavail = ret;
+
+ /* ECC sanity check: warn if it's too weak */
+ if (!nand_ecc_is_strong_enough(&chip->base))
+ pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n",
+ mtd->name, chip->ecc.strength, chip->ecc.size,
+ nanddev_get_ecc_requirements(&chip->base)->strength,
+ nanddev_get_ecc_requirements(&chip->base)->step_size);
+
+ /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
+ if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
+ switch (ecc->steps) {
+ case 2:
+ mtd->subpage_sft = 1;
+ break;
+ case 4:
+ case 8:
+ case 16:
+ mtd->subpage_sft = 2;
+ break;
+ }
+ }
+ chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
+
+ /* Invalidate the pagebuffer reference */
+ chip->pagecache.page = -1;
+
+ /* Large page NAND with SOFT_ECC should support subpage reads */
+ switch (ecc->engine_type) {
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ if (chip->page_shift > 9)
+ chip->options |= NAND_SUBPAGE_READ;
+ break;
+
+ default:
+ break;
+ }
+
+ ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner);
+ if (ret)
+ goto err_nand_manuf_cleanup;
+
+ /* Adjust the MTD_CAP_ flags when NAND_ROM is set. */
+ if (chip->options & NAND_ROM)
+ mtd->flags = MTD_CAP_ROM;
+
+ /* Fill in remaining MTD driver data */
+ mtd->_erase = nand_erase;
+ mtd->_point = NULL;
+ mtd->_unpoint = NULL;
+ mtd->_panic_write = panic_nand_write;
+ mtd->_read_oob = nand_read_oob;
+ mtd->_write_oob = nand_write_oob;
+ mtd->_sync = nand_sync;
+ mtd->_lock = nand_lock;
+ mtd->_unlock = nand_unlock;
+ mtd->_suspend = nand_suspend;
+ mtd->_resume = nand_resume;
+ mtd->_reboot = nand_shutdown;
+ mtd->_block_isreserved = nand_block_isreserved;
+ mtd->_block_isbad = nand_block_isbad;
+ mtd->_block_markbad = nand_block_markbad;
+ mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
+
+ /*
+ * Initialize bitflip_threshold to its default prior scan_bbt() call.
+ * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
+ * properly set.
+ */
+ if (!mtd->bitflip_threshold)
+ mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
+
+ /* Find the fastest data interface for this chip */
+ ret = nand_choose_interface_config(chip);
+ if (ret)
+ goto err_nanddev_cleanup;
+
+ /* Enter fastest possible mode on all dies. */
+ for (i = 0; i < nanddev_ntargets(&chip->base); i++) {
+ ret = nand_setup_interface(chip, i);
+ if (ret)
+ goto err_free_interface_config;
+ }
+
+ rawnand_late_check_supported_ops(chip);
+
+ /*
+ * Look for secure regions in the NAND chip. These regions are supposed
+ * to be protected by a secure element like Trustzone. So the read/write
+ * accesses to these regions will be blocked in the runtime by this
+ * driver.
+ */
+ ret = of_get_nand_secure_regions(chip);
+ if (ret)
+ goto err_free_interface_config;
+
+ /* Check, if we should skip the bad block table scan */
+ if (chip->options & NAND_SKIP_BBTSCAN)
+ return 0;
+
+ /* Build bad block table */
+ ret = nand_create_bbt(chip);
+ if (ret)
+ goto err_free_secure_regions;
+
+ return 0;
+
+err_free_secure_regions:
+ kfree(chip->secure_regions);
+
+err_free_interface_config:
+ kfree(chip->best_interface_config);
+
+err_nanddev_cleanup:
+ nanddev_cleanup(&chip->base);
+
+err_nand_manuf_cleanup:
+ nand_manufacturer_cleanup(chip);
+
+err_free_buf:
+ kfree(chip->data_buf);
+ kfree(ecc->code_buf);
+ kfree(ecc->calc_buf);
+
+ return ret;
+}
+
+static int nand_attach(struct nand_chip *chip)
+{
+ if (chip->controller->ops && chip->controller->ops->attach_chip)
+ return chip->controller->ops->attach_chip(chip);
+
+ return 0;
+}
+
+static void nand_detach(struct nand_chip *chip)
+{
+ if (chip->controller->ops && chip->controller->ops->detach_chip)
+ chip->controller->ops->detach_chip(chip);
+}
+
+/**
+ * nand_scan_with_ids - [NAND Interface] Scan for the NAND device
+ * @chip: NAND chip object
+ * @maxchips: number of chips to scan for.
+ * @ids: optional flash IDs table
+ *
+ * This fills out all the uninitialized function pointers with the defaults.
+ * The flash ID is read and the mtd/chip structures are filled with the
+ * appropriate values.
+ */
+int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips,
+ struct nand_flash_dev *ids)
+{
+ int ret;
+
+ if (!maxchips)
+ return -EINVAL;
+
+ ret = nand_scan_ident(chip, maxchips, ids);
+ if (ret)
+ return ret;
+
+ ret = nand_attach(chip);
+ if (ret)
+ goto cleanup_ident;
+
+ ret = nand_scan_tail(chip);
+ if (ret)
+ goto detach_chip;
+
+ return 0;
+
+detach_chip:
+ nand_detach(chip);
+cleanup_ident:
+ nand_scan_ident_cleanup(chip);
+
+ return ret;
+}
+EXPORT_SYMBOL(nand_scan_with_ids);
+
+/**
+ * nand_cleanup - [NAND Interface] Free resources held by the NAND device
+ * @chip: NAND chip object
+ */
+void nand_cleanup(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT) {
+ if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING)
+ rawnand_sw_hamming_cleanup(chip);
+ else if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
+ rawnand_sw_bch_cleanup(chip);
+ }
+
+ nanddev_cleanup(&chip->base);
+
+ /* Free secure regions data */
+ kfree(chip->secure_regions);
+
+ /* Free bad block table memory */
+ kfree(chip->bbt);
+ kfree(chip->data_buf);
+ kfree(chip->ecc.code_buf);
+ kfree(chip->ecc.calc_buf);
+
+ /* Free bad block descriptor memory */
+ if (chip->badblock_pattern && chip->badblock_pattern->options
+ & NAND_BBT_DYNAMICSTRUCT)
+ kfree(chip->badblock_pattern);
+
+ /* Free the data interface */
+ kfree(chip->best_interface_config);
+
+ /* Free manufacturer priv data. */
+ nand_manufacturer_cleanup(chip);
+
+ /* Free controller specific allocations after chip identification */
+ nand_detach(chip);
+
+ /* Free identification phase allocations */
+ nand_scan_ident_cleanup(chip);
+}
+
+EXPORT_SYMBOL_GPL(nand_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
+MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
+MODULE_DESCRIPTION("Generic NAND flash driver code");
diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c
new file mode 100644
index 0000000000..e4664fa6fd
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_bbt.c
@@ -0,0 +1,1491 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Overview:
+ * Bad block table support for the NAND driver
+ *
+ * Copyright © 2004 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Description:
+ *
+ * When nand_scan_bbt is called, then it tries to find the bad block table
+ * depending on the options in the BBT descriptor(s). If no flash based BBT
+ * (NAND_BBT_USE_FLASH) is specified then the device is scanned for factory
+ * marked good / bad blocks. This information is used to create a memory BBT.
+ * Once a new bad block is discovered then the "factory" information is updated
+ * on the device.
+ * If a flash based BBT is specified then the function first tries to find the
+ * BBT on flash. If a BBT is found then the contents are read and the memory
+ * based BBT is created. If a mirrored BBT is selected then the mirror is
+ * searched too and the versions are compared. If the mirror has a greater
+ * version number, then the mirror BBT is used to build the memory based BBT.
+ * If the tables are not versioned, then we "or" the bad block information.
+ * If one of the BBTs is out of date or does not exist it is (re)created.
+ * If no BBT exists at all then the device is scanned for factory marked
+ * good / bad blocks and the bad block tables are created.
+ *
+ * For manufacturer created BBTs like the one found on M-SYS DOC devices
+ * the BBT is searched and read but never created
+ *
+ * The auto generated bad block table is located in the last good blocks
+ * of the device. The table is mirrored, so it can be updated eventually.
+ * The table is marked in the OOB area with an ident pattern and a version
+ * number which indicates which of both tables is more up to date. If the NAND
+ * controller needs the complete OOB area for the ECC information then the
+ * option NAND_BBT_NO_OOB should be used (along with NAND_BBT_USE_FLASH, of
+ * course): it moves the ident pattern and the version byte into the data area
+ * and the OOB area will remain untouched.
+ *
+ * The table uses 2 bits per block
+ * 11b: block is good
+ * 00b: block is factory marked bad
+ * 01b, 10b: block is marked bad due to wear
+ *
+ * The memory bad block table uses the following scheme:
+ * 00b: block is good
+ * 01b: block is marked bad due to wear
+ * 10b: block is reserved (to protect the bbt area)
+ * 11b: block is factory marked bad
+ *
+ * Multichip devices like DOC store the bad block info per floor.
+ *
+ * Following assumptions are made:
+ * - bbts start at a page boundary, if autolocated on a block boundary
+ * - the space necessary for a bbt in FLASH does not exceed a block boundary
+ */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/bbm.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/export.h>
+#include <linux/string.h>
+
+#include "internals.h"
+
+#define BBT_BLOCK_GOOD 0x00
+#define BBT_BLOCK_WORN 0x01
+#define BBT_BLOCK_RESERVED 0x02
+#define BBT_BLOCK_FACTORY_BAD 0x03
+
+#define BBT_ENTRY_MASK 0x03
+#define BBT_ENTRY_SHIFT 2
+
+static inline uint8_t bbt_get_entry(struct nand_chip *chip, int block)
+{
+ uint8_t entry = chip->bbt[block >> BBT_ENTRY_SHIFT];
+ entry >>= (block & BBT_ENTRY_MASK) * 2;
+ return entry & BBT_ENTRY_MASK;
+}
+
+static inline void bbt_mark_entry(struct nand_chip *chip, int block,
+ uint8_t mark)
+{
+ uint8_t msk = (mark & BBT_ENTRY_MASK) << ((block & BBT_ENTRY_MASK) * 2);
+ chip->bbt[block >> BBT_ENTRY_SHIFT] |= msk;
+}
+
+static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
+{
+ if (memcmp(buf, td->pattern, td->len))
+ return -1;
+ return 0;
+}
+
+/**
+ * check_pattern - [GENERIC] check if a pattern is in the buffer
+ * @buf: the buffer to search
+ * @len: the length of buffer to search
+ * @paglen: the pagelength
+ * @td: search pattern descriptor
+ *
+ * Check for a pattern at the given place. Used to search bad block tables and
+ * good / bad block identifiers.
+ */
+static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
+{
+ if (td->options & NAND_BBT_NO_OOB)
+ return check_pattern_no_oob(buf, td);
+
+ /* Compare the pattern */
+ if (memcmp(buf + paglen + td->offs, td->pattern, td->len))
+ return -1;
+
+ return 0;
+}
+
+/**
+ * check_short_pattern - [GENERIC] check if a pattern is in the buffer
+ * @buf: the buffer to search
+ * @td: search pattern descriptor
+ *
+ * Check for a pattern at the given place. Used to search bad block tables and
+ * good / bad block identifiers. Same as check_pattern, but no optional empty
+ * check.
+ */
+static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
+{
+ /* Compare the pattern */
+ if (memcmp(buf + td->offs, td->pattern, td->len))
+ return -1;
+ return 0;
+}
+
+/**
+ * add_marker_len - compute the length of the marker in data area
+ * @td: BBT descriptor used for computation
+ *
+ * The length will be 0 if the marker is located in OOB area.
+ */
+static u32 add_marker_len(struct nand_bbt_descr *td)
+{
+ u32 len;
+
+ if (!(td->options & NAND_BBT_NO_OOB))
+ return 0;
+
+ len = td->len;
+ if (td->options & NAND_BBT_VERSION)
+ len++;
+ return len;
+}
+
+/**
+ * read_bbt - [GENERIC] Read the bad block table starting from page
+ * @this: NAND chip object
+ * @buf: temporary buffer
+ * @page: the starting page
+ * @num: the number of bbt descriptors to read
+ * @td: the bbt describtion table
+ * @offs: block number offset in the table
+ *
+ * Read the bad block table starting from page.
+ */
+static int read_bbt(struct nand_chip *this, uint8_t *buf, int page, int num,
+ struct nand_bbt_descr *td, int offs)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+ int res, ret = 0, i, j, act = 0;
+ size_t retlen, len, totlen;
+ loff_t from;
+ int bits = td->options & NAND_BBT_NRBITS_MSK;
+ uint8_t msk = (uint8_t)((1 << bits) - 1);
+ u32 marker_len;
+ int reserved_block_code = td->reserved_block_code;
+
+ totlen = (num * bits) >> 3;
+ marker_len = add_marker_len(td);
+ from = ((loff_t)page) << this->page_shift;
+
+ while (totlen) {
+ len = min(totlen, (size_t)(1 << this->bbt_erase_shift));
+ if (marker_len) {
+ /*
+ * In case the BBT marker is not in the OOB area it
+ * will be just in the first page.
+ */
+ len -= marker_len;
+ from += marker_len;
+ marker_len = 0;
+ }
+ res = mtd_read(mtd, from, len, &retlen, buf);
+ if (res < 0) {
+ if (mtd_is_eccerr(res)) {
+ pr_info("nand_bbt: ECC error in BBT at 0x%012llx\n",
+ from & ~mtd->writesize);
+ return res;
+ } else if (mtd_is_bitflip(res)) {
+ pr_info("nand_bbt: corrected error in BBT at 0x%012llx\n",
+ from & ~mtd->writesize);
+ ret = res;
+ } else {
+ pr_info("nand_bbt: error reading BBT\n");
+ return res;
+ }
+ }
+
+ /* Analyse data */
+ for (i = 0; i < len; i++) {
+ uint8_t dat = buf[i];
+ for (j = 0; j < 8; j += bits, act++) {
+ uint8_t tmp = (dat >> j) & msk;
+ if (tmp == msk)
+ continue;
+ if (reserved_block_code && (tmp == reserved_block_code)) {
+ pr_info("nand_read_bbt: reserved block at 0x%012llx\n",
+ (loff_t)(offs + act) <<
+ this->bbt_erase_shift);
+ bbt_mark_entry(this, offs + act,
+ BBT_BLOCK_RESERVED);
+ mtd->ecc_stats.bbtblocks++;
+ continue;
+ }
+ /*
+ * Leave it for now, if it's matured we can
+ * move this message to pr_debug.
+ */
+ pr_info("nand_read_bbt: bad block at 0x%012llx\n",
+ (loff_t)(offs + act) <<
+ this->bbt_erase_shift);
+ /* Factory marked bad or worn out? */
+ if (tmp == 0)
+ bbt_mark_entry(this, offs + act,
+ BBT_BLOCK_FACTORY_BAD);
+ else
+ bbt_mark_entry(this, offs + act,
+ BBT_BLOCK_WORN);
+ mtd->ecc_stats.badblocks++;
+ }
+ }
+ totlen -= len;
+ from += len;
+ }
+ return ret;
+}
+
+/**
+ * read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
+ * @this: NAND chip object
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @chip: read the table for a specific chip, -1 read all chips; applies only if
+ * NAND_BBT_PERCHIP option is set
+ *
+ * Read the bad block table for all chips starting at a given page. We assume
+ * that the bbt bits are in consecutive order.
+ */
+static int read_abs_bbt(struct nand_chip *this, uint8_t *buf,
+ struct nand_bbt_descr *td, int chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+ u64 targetsize = nanddev_target_size(&this->base);
+ int res = 0, i;
+
+ if (td->options & NAND_BBT_PERCHIP) {
+ int offs = 0;
+ for (i = 0; i < nanddev_ntargets(&this->base); i++) {
+ if (chip == -1 || chip == i)
+ res = read_bbt(this, buf, td->pages[i],
+ targetsize >> this->bbt_erase_shift,
+ td, offs);
+ if (res)
+ return res;
+ offs += targetsize >> this->bbt_erase_shift;
+ }
+ } else {
+ res = read_bbt(this, buf, td->pages[0],
+ mtd->size >> this->bbt_erase_shift, td, 0);
+ if (res)
+ return res;
+ }
+ return 0;
+}
+
+/* BBT marker is in the first page, no OOB */
+static int scan_read_data(struct nand_chip *this, uint8_t *buf, loff_t offs,
+ struct nand_bbt_descr *td)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+ size_t retlen;
+ size_t len;
+
+ len = td->len;
+ if (td->options & NAND_BBT_VERSION)
+ len++;
+
+ return mtd_read(mtd, offs, len, &retlen, buf);
+}
+
+/**
+ * scan_read_oob - [GENERIC] Scan data+OOB region to buffer
+ * @this: NAND chip object
+ * @buf: temporary buffer
+ * @offs: offset at which to scan
+ * @len: length of data region to read
+ *
+ * Scan read data from data+OOB. May traverse multiple pages, interleaving
+ * page,OOB,page,OOB,... in buf. Completes transfer and returns the "strongest"
+ * ECC condition (error or bitflip). May quit on the first (non-ECC) error.
+ */
+static int scan_read_oob(struct nand_chip *this, uint8_t *buf, loff_t offs,
+ size_t len)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+ struct mtd_oob_ops ops = { };
+ int res, ret = 0;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooboffs = 0;
+ ops.ooblen = mtd->oobsize;
+
+ while (len > 0) {
+ ops.datbuf = buf;
+ ops.len = min(len, (size_t)mtd->writesize);
+ ops.oobbuf = buf + ops.len;
+
+ res = mtd_read_oob(mtd, offs, &ops);
+ if (res) {
+ if (!mtd_is_bitflip_or_eccerr(res))
+ return res;
+ else if (mtd_is_eccerr(res) || !ret)
+ ret = res;
+ }
+
+ buf += mtd->oobsize + mtd->writesize;
+ len -= mtd->writesize;
+ offs += mtd->writesize;
+ }
+ return ret;
+}
+
+static int scan_read(struct nand_chip *this, uint8_t *buf, loff_t offs,
+ size_t len, struct nand_bbt_descr *td)
+{
+ if (td->options & NAND_BBT_NO_OOB)
+ return scan_read_data(this, buf, offs, td);
+ else
+ return scan_read_oob(this, buf, offs, len);
+}
+
+/* Scan write data with oob to flash */
+static int scan_write_bbt(struct nand_chip *this, loff_t offs, size_t len,
+ uint8_t *buf, uint8_t *oob)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+ struct mtd_oob_ops ops = { };
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooboffs = 0;
+ ops.ooblen = mtd->oobsize;
+ ops.datbuf = buf;
+ ops.oobbuf = oob;
+ ops.len = len;
+
+ return mtd_write_oob(mtd, offs, &ops);
+}
+
+static u32 bbt_get_ver_offs(struct nand_chip *this, struct nand_bbt_descr *td)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+ u32 ver_offs = td->veroffs;
+
+ if (!(td->options & NAND_BBT_NO_OOB))
+ ver_offs += mtd->writesize;
+ return ver_offs;
+}
+
+/**
+ * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
+ * @this: NAND chip object
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ *
+ * Read the bad block table(s) for all chips starting at a given page. We
+ * assume that the bbt bits are in consecutive order.
+ */
+static void read_abs_bbts(struct nand_chip *this, uint8_t *buf,
+ struct nand_bbt_descr *td, struct nand_bbt_descr *md)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+
+ /* Read the primary version, if available */
+ if (td->options & NAND_BBT_VERSION) {
+ scan_read(this, buf, (loff_t)td->pages[0] << this->page_shift,
+ mtd->writesize, td);
+ td->version[0] = buf[bbt_get_ver_offs(this, td)];
+ pr_info("Bad block table at page %d, version 0x%02X\n",
+ td->pages[0], td->version[0]);
+ }
+
+ /* Read the mirror version, if available */
+ if (md && (md->options & NAND_BBT_VERSION)) {
+ scan_read(this, buf, (loff_t)md->pages[0] << this->page_shift,
+ mtd->writesize, md);
+ md->version[0] = buf[bbt_get_ver_offs(this, md)];
+ pr_info("Bad block table at page %d, version 0x%02X\n",
+ md->pages[0], md->version[0]);
+ }
+}
+
+/* Scan a given block partially */
+static int scan_block_fast(struct nand_chip *this, struct nand_bbt_descr *bd,
+ loff_t offs, uint8_t *buf)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+
+ struct mtd_oob_ops ops = { };
+ int ret, page_offset;
+
+ ops.ooblen = mtd->oobsize;
+ ops.oobbuf = buf;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.mode = MTD_OPS_PLACE_OOB;
+
+ page_offset = nand_bbm_get_next_page(this, 0);
+
+ while (page_offset >= 0) {
+ /*
+ * Read the full oob until read_oob is fixed to handle single
+ * byte reads for 16 bit buswidth.
+ */
+ ret = mtd_read_oob(mtd, offs + (page_offset * mtd->writesize),
+ &ops);
+ /* Ignore ECC errors when checking for BBM */
+ if (ret && !mtd_is_bitflip_or_eccerr(ret))
+ return ret;
+
+ if (check_short_pattern(buf, bd))
+ return 1;
+
+ page_offset = nand_bbm_get_next_page(this, page_offset + 1);
+ }
+
+ return 0;
+}
+
+/* Check if a potential BBT block is marked as bad */
+static int bbt_block_checkbad(struct nand_chip *this, struct nand_bbt_descr *td,
+ loff_t offs, uint8_t *buf)
+{
+ struct nand_bbt_descr *bd = this->badblock_pattern;
+
+ /*
+ * No need to check for a bad BBT block if the BBM area overlaps with
+ * the bad block table marker area in OOB since writing a BBM here
+ * invalidates the bad block table marker anyway.
+ */
+ if (!(td->options & NAND_BBT_NO_OOB) &&
+ td->offs >= bd->offs && td->offs < bd->offs + bd->len)
+ return 0;
+
+ /*
+ * There is no point in checking for a bad block marker if writing
+ * such marker is not supported
+ */
+ if (this->bbt_options & NAND_BBT_NO_OOB_BBM ||
+ this->options & NAND_NO_BBM_QUIRK)
+ return 0;
+
+ if (scan_block_fast(this, bd, offs, buf) > 0)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * create_bbt - [GENERIC] Create a bad block table by scanning the device
+ * @this: NAND chip object
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ * @chip: create the table for a specific chip, -1 read all chips; applies only
+ * if NAND_BBT_PERCHIP option is set
+ *
+ * Create a bad block table by scanning the device for the given good/bad block
+ * identify pattern.
+ */
+static int create_bbt(struct nand_chip *this, uint8_t *buf,
+ struct nand_bbt_descr *bd, int chip)
+{
+ u64 targetsize = nanddev_target_size(&this->base);
+ struct mtd_info *mtd = nand_to_mtd(this);
+ int i, numblocks, startblock;
+ loff_t from;
+
+ pr_info("Scanning device for bad blocks\n");
+
+ if (chip == -1) {
+ numblocks = mtd->size >> this->bbt_erase_shift;
+ startblock = 0;
+ from = 0;
+ } else {
+ if (chip >= nanddev_ntargets(&this->base)) {
+ pr_warn("create_bbt(): chipnr (%d) > available chips (%d)\n",
+ chip + 1, nanddev_ntargets(&this->base));
+ return -EINVAL;
+ }
+ numblocks = targetsize >> this->bbt_erase_shift;
+ startblock = chip * numblocks;
+ numblocks += startblock;
+ from = (loff_t)startblock << this->bbt_erase_shift;
+ }
+
+ for (i = startblock; i < numblocks; i++) {
+ int ret;
+
+ BUG_ON(bd->options & NAND_BBT_NO_OOB);
+
+ ret = scan_block_fast(this, bd, from, buf);
+ if (ret < 0)
+ return ret;
+
+ if (ret) {
+ bbt_mark_entry(this, i, BBT_BLOCK_FACTORY_BAD);
+ pr_warn("Bad eraseblock %d at 0x%012llx\n",
+ i, (unsigned long long)from);
+ mtd->ecc_stats.badblocks++;
+ }
+
+ from += (1 << this->bbt_erase_shift);
+ }
+ return 0;
+}
+
+/**
+ * search_bbt - [GENERIC] scan the device for a specific bad block table
+ * @this: NAND chip object
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ *
+ * Read the bad block table by searching for a given ident pattern. Search is
+ * preformed either from the beginning up or from the end of the device
+ * downwards. The search starts always at the start of a block. If the option
+ * NAND_BBT_PERCHIP is given, each chip is searched for a bbt, which contains
+ * the bad block information of this chip. This is necessary to provide support
+ * for certain DOC devices.
+ *
+ * The bbt ident pattern resides in the oob area of the first page in a block.
+ */
+static int search_bbt(struct nand_chip *this, uint8_t *buf,
+ struct nand_bbt_descr *td)
+{
+ u64 targetsize = nanddev_target_size(&this->base);
+ struct mtd_info *mtd = nand_to_mtd(this);
+ int i, chips;
+ int startblock, block, dir;
+ int scanlen = mtd->writesize + mtd->oobsize;
+ int bbtblocks;
+ int blocktopage = this->bbt_erase_shift - this->page_shift;
+
+ /* Search direction top -> down? */
+ if (td->options & NAND_BBT_LASTBLOCK) {
+ startblock = (mtd->size >> this->bbt_erase_shift) - 1;
+ dir = -1;
+ } else {
+ startblock = 0;
+ dir = 1;
+ }
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chips = nanddev_ntargets(&this->base);
+ bbtblocks = targetsize >> this->bbt_erase_shift;
+ startblock &= bbtblocks - 1;
+ } else {
+ chips = 1;
+ bbtblocks = mtd->size >> this->bbt_erase_shift;
+ }
+
+ for (i = 0; i < chips; i++) {
+ /* Reset version information */
+ td->version[i] = 0;
+ td->pages[i] = -1;
+ /* Scan the maximum number of blocks */
+ for (block = 0; block < td->maxblocks; block++) {
+
+ int actblock = startblock + dir * block;
+ loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
+
+ /* Check if block is marked bad */
+ if (bbt_block_checkbad(this, td, offs, buf))
+ continue;
+
+ /* Read first page */
+ scan_read(this, buf, offs, mtd->writesize, td);
+ if (!check_pattern(buf, scanlen, mtd->writesize, td)) {
+ td->pages[i] = actblock << blocktopage;
+ if (td->options & NAND_BBT_VERSION) {
+ offs = bbt_get_ver_offs(this, td);
+ td->version[i] = buf[offs];
+ }
+ break;
+ }
+ }
+ startblock += targetsize >> this->bbt_erase_shift;
+ }
+ /* Check, if we found a bbt for each requested chip */
+ for (i = 0; i < chips; i++) {
+ if (td->pages[i] == -1)
+ pr_warn("Bad block table not found for chip %d\n", i);
+ else
+ pr_info("Bad block table found at page %d, version 0x%02X\n",
+ td->pages[i], td->version[i]);
+ }
+ return 0;
+}
+
+/**
+ * search_read_bbts - [GENERIC] scan the device for bad block table(s)
+ * @this: NAND chip object
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ *
+ * Search and read the bad block table(s).
+ */
+static void search_read_bbts(struct nand_chip *this, uint8_t *buf,
+ struct nand_bbt_descr *td,
+ struct nand_bbt_descr *md)
+{
+ /* Search the primary table */
+ search_bbt(this, buf, td);
+
+ /* Search the mirror table */
+ if (md)
+ search_bbt(this, buf, md);
+}
+
+/**
+ * get_bbt_block - Get the first valid eraseblock suitable to store a BBT
+ * @this: the NAND device
+ * @td: the BBT description
+ * @md: the mirror BBT descriptor
+ * @chip: the CHIP selector
+ *
+ * This functions returns a positive block number pointing a valid eraseblock
+ * suitable to store a BBT (i.e. in the range reserved for BBT), or -ENOSPC if
+ * all blocks are already used of marked bad. If td->pages[chip] was already
+ * pointing to a valid block we re-use it, otherwise we search for the next
+ * valid one.
+ */
+static int get_bbt_block(struct nand_chip *this, struct nand_bbt_descr *td,
+ struct nand_bbt_descr *md, int chip)
+{
+ u64 targetsize = nanddev_target_size(&this->base);
+ int startblock, dir, page, numblocks, i;
+
+ /*
+ * There was already a version of the table, reuse the page. This
+ * applies for absolute placement too, as we have the page number in
+ * td->pages.
+ */
+ if (td->pages[chip] != -1)
+ return td->pages[chip] >>
+ (this->bbt_erase_shift - this->page_shift);
+
+ numblocks = (int)(targetsize >> this->bbt_erase_shift);
+ if (!(td->options & NAND_BBT_PERCHIP))
+ numblocks *= nanddev_ntargets(&this->base);
+
+ /*
+ * Automatic placement of the bad block table. Search direction
+ * top -> down?
+ */
+ if (td->options & NAND_BBT_LASTBLOCK) {
+ startblock = numblocks * (chip + 1) - 1;
+ dir = -1;
+ } else {
+ startblock = chip * numblocks;
+ dir = 1;
+ }
+
+ for (i = 0; i < td->maxblocks; i++) {
+ int block = startblock + dir * i;
+
+ /* Check, if the block is bad */
+ switch (bbt_get_entry(this, block)) {
+ case BBT_BLOCK_WORN:
+ case BBT_BLOCK_FACTORY_BAD:
+ continue;
+ }
+
+ page = block << (this->bbt_erase_shift - this->page_shift);
+
+ /* Check, if the block is used by the mirror table */
+ if (!md || md->pages[chip] != page)
+ return block;
+ }
+
+ return -ENOSPC;
+}
+
+/**
+ * mark_bbt_block_bad - Mark one of the block reserved for BBT bad
+ * @this: the NAND device
+ * @td: the BBT description
+ * @chip: the CHIP selector
+ * @block: the BBT block to mark
+ *
+ * Blocks reserved for BBT can become bad. This functions is an helper to mark
+ * such blocks as bad. It takes care of updating the in-memory BBT, marking the
+ * block as bad using a bad block marker and invalidating the associated
+ * td->pages[] entry.
+ */
+static void mark_bbt_block_bad(struct nand_chip *this,
+ struct nand_bbt_descr *td,
+ int chip, int block)
+{
+ loff_t to;
+ int res;
+
+ bbt_mark_entry(this, block, BBT_BLOCK_WORN);
+
+ to = (loff_t)block << this->bbt_erase_shift;
+ res = nand_markbad_bbm(this, to);
+ if (res)
+ pr_warn("nand_bbt: error %d while marking block %d bad\n",
+ res, block);
+
+ td->pages[chip] = -1;
+}
+
+/**
+ * write_bbt - [GENERIC] (Re)write the bad block table
+ * @this: NAND chip object
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ * @chipsel: selector for a specific chip, -1 for all
+ *
+ * (Re)write the bad block table.
+ */
+static int write_bbt(struct nand_chip *this, uint8_t *buf,
+ struct nand_bbt_descr *td, struct nand_bbt_descr *md,
+ int chipsel)
+{
+ u64 targetsize = nanddev_target_size(&this->base);
+ struct mtd_info *mtd = nand_to_mtd(this);
+ struct erase_info einfo;
+ int i, res, chip = 0;
+ int bits, page, offs, numblocks, sft, sftmsk;
+ int nrchips, pageoffs, ooboffs;
+ uint8_t msk[4];
+ uint8_t rcode = td->reserved_block_code;
+ size_t retlen, len = 0;
+ loff_t to;
+ struct mtd_oob_ops ops = { };
+
+ ops.ooblen = mtd->oobsize;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.mode = MTD_OPS_PLACE_OOB;
+
+ if (!rcode)
+ rcode = 0xff;
+ /* Write bad block table per chip rather than per device? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ numblocks = (int)(targetsize >> this->bbt_erase_shift);
+ /* Full device write or specific chip? */
+ if (chipsel == -1) {
+ nrchips = nanddev_ntargets(&this->base);
+ } else {
+ nrchips = chipsel + 1;
+ chip = chipsel;
+ }
+ } else {
+ numblocks = (int)(mtd->size >> this->bbt_erase_shift);
+ nrchips = 1;
+ }
+
+ /* Loop through the chips */
+ while (chip < nrchips) {
+ int block;
+
+ block = get_bbt_block(this, td, md, chip);
+ if (block < 0) {
+ pr_err("No space left to write bad block table\n");
+ res = block;
+ goto outerr;
+ }
+
+ /*
+ * get_bbt_block() returns a block number, shift the value to
+ * get a page number.
+ */
+ page = block << (this->bbt_erase_shift - this->page_shift);
+
+ /* Set up shift count and masks for the flash table */
+ bits = td->options & NAND_BBT_NRBITS_MSK;
+ msk[2] = ~rcode;
+ switch (bits) {
+ case 1: sft = 3; sftmsk = 0x07; msk[0] = 0x00; msk[1] = 0x01;
+ msk[3] = 0x01;
+ break;
+ case 2: sft = 2; sftmsk = 0x06; msk[0] = 0x00; msk[1] = 0x01;
+ msk[3] = 0x03;
+ break;
+ case 4: sft = 1; sftmsk = 0x04; msk[0] = 0x00; msk[1] = 0x0C;
+ msk[3] = 0x0f;
+ break;
+ case 8: sft = 0; sftmsk = 0x00; msk[0] = 0x00; msk[1] = 0x0F;
+ msk[3] = 0xff;
+ break;
+ default: return -EINVAL;
+ }
+
+ to = ((loff_t)page) << this->page_shift;
+
+ /* Must we save the block contents? */
+ if (td->options & NAND_BBT_SAVECONTENT) {
+ /* Make it block aligned */
+ to &= ~(((loff_t)1 << this->bbt_erase_shift) - 1);
+ len = 1 << this->bbt_erase_shift;
+ res = mtd_read(mtd, to, len, &retlen, buf);
+ if (res < 0) {
+ if (retlen != len) {
+ pr_info("nand_bbt: error reading block for writing the bad block table\n");
+ return res;
+ }
+ pr_warn("nand_bbt: ECC error while reading block for writing bad block table\n");
+ }
+ /* Read oob data */
+ ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
+ ops.oobbuf = &buf[len];
+ res = mtd_read_oob(mtd, to + mtd->writesize, &ops);
+ if (res < 0 || ops.oobretlen != ops.ooblen)
+ goto outerr;
+
+ /* Calc the byte offset in the buffer */
+ pageoffs = page - (int)(to >> this->page_shift);
+ offs = pageoffs << this->page_shift;
+ /* Preset the bbt area with 0xff */
+ memset(&buf[offs], 0xff, (size_t)(numblocks >> sft));
+ ooboffs = len + (pageoffs * mtd->oobsize);
+
+ } else if (td->options & NAND_BBT_NO_OOB) {
+ ooboffs = 0;
+ offs = td->len;
+ /* The version byte */
+ if (td->options & NAND_BBT_VERSION)
+ offs++;
+ /* Calc length */
+ len = (size_t)(numblocks >> sft);
+ len += offs;
+ /* Make it page aligned! */
+ len = ALIGN(len, mtd->writesize);
+ /* Preset the buffer with 0xff */
+ memset(buf, 0xff, len);
+ /* Pattern is located at the begin of first page */
+ memcpy(buf, td->pattern, td->len);
+ } else {
+ /* Calc length */
+ len = (size_t)(numblocks >> sft);
+ /* Make it page aligned! */
+ len = ALIGN(len, mtd->writesize);
+ /* Preset the buffer with 0xff */
+ memset(buf, 0xff, len +
+ (len >> this->page_shift)* mtd->oobsize);
+ offs = 0;
+ ooboffs = len;
+ /* Pattern is located in oob area of first page */
+ memcpy(&buf[ooboffs + td->offs], td->pattern, td->len);
+ }
+
+ if (td->options & NAND_BBT_VERSION)
+ buf[ooboffs + td->veroffs] = td->version[chip];
+
+ /* Walk through the memory table */
+ for (i = 0; i < numblocks; i++) {
+ uint8_t dat;
+ int sftcnt = (i << (3 - sft)) & sftmsk;
+ dat = bbt_get_entry(this, chip * numblocks + i);
+ /* Do not store the reserved bbt blocks! */
+ buf[offs + (i >> sft)] &= ~(msk[dat] << sftcnt);
+ }
+
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.addr = to;
+ einfo.len = 1 << this->bbt_erase_shift;
+ res = nand_erase_nand(this, &einfo, 1);
+ if (res < 0) {
+ pr_warn("nand_bbt: error while erasing BBT block %d\n",
+ res);
+ mark_bbt_block_bad(this, td, chip, block);
+ continue;
+ }
+
+ res = scan_write_bbt(this, to, len, buf,
+ td->options & NAND_BBT_NO_OOB ?
+ NULL : &buf[len]);
+ if (res < 0) {
+ pr_warn("nand_bbt: error while writing BBT block %d\n",
+ res);
+ mark_bbt_block_bad(this, td, chip, block);
+ continue;
+ }
+
+ pr_info("Bad block table written to 0x%012llx, version 0x%02X\n",
+ (unsigned long long)to, td->version[chip]);
+
+ /* Mark it as used */
+ td->pages[chip++] = page;
+ }
+ return 0;
+
+ outerr:
+ pr_warn("nand_bbt: error while writing bad block table %d\n", res);
+ return res;
+}
+
+/**
+ * nand_memory_bbt - [GENERIC] create a memory based bad block table
+ * @this: NAND chip object
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function creates a memory based bbt by scanning the device for
+ * manufacturer / software marked good / bad blocks.
+ */
+static inline int nand_memory_bbt(struct nand_chip *this,
+ struct nand_bbt_descr *bd)
+{
+ u8 *pagebuf = nand_get_data_buf(this);
+
+ return create_bbt(this, pagebuf, bd, -1);
+}
+
+/**
+ * check_create - [GENERIC] create and write bbt(s) if necessary
+ * @this: the NAND device
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function checks the results of the previous call to read_bbt and creates
+ * / updates the bbt(s) if necessary. Creation is necessary if no bbt was found
+ * for the chip/device. Update is necessary if one of the tables is missing or
+ * the version nr. of one table is less than the other.
+ */
+static int check_create(struct nand_chip *this, uint8_t *buf,
+ struct nand_bbt_descr *bd)
+{
+ int i, chips, writeops, create, chipsel, res, res2;
+ struct nand_bbt_descr *td = this->bbt_td;
+ struct nand_bbt_descr *md = this->bbt_md;
+ struct nand_bbt_descr *rd, *rd2;
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP)
+ chips = nanddev_ntargets(&this->base);
+ else
+ chips = 1;
+
+ for (i = 0; i < chips; i++) {
+ writeops = 0;
+ create = 0;
+ rd = NULL;
+ rd2 = NULL;
+ res = res2 = 0;
+ /* Per chip or per device? */
+ chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1;
+ /* Mirrored table available? */
+ if (md) {
+ if (td->pages[i] == -1 && md->pages[i] == -1) {
+ create = 1;
+ writeops = 0x03;
+ } else if (td->pages[i] == -1) {
+ rd = md;
+ writeops = 0x01;
+ } else if (md->pages[i] == -1) {
+ rd = td;
+ writeops = 0x02;
+ } else if (td->version[i] == md->version[i]) {
+ rd = td;
+ if (!(td->options & NAND_BBT_VERSION))
+ rd2 = md;
+ } else if (((int8_t)(td->version[i] - md->version[i])) > 0) {
+ rd = td;
+ writeops = 0x02;
+ } else {
+ rd = md;
+ writeops = 0x01;
+ }
+ } else {
+ if (td->pages[i] == -1) {
+ create = 1;
+ writeops = 0x01;
+ } else {
+ rd = td;
+ }
+ }
+
+ if (create) {
+ /* Create the bad block table by scanning the device? */
+ if (!(td->options & NAND_BBT_CREATE))
+ continue;
+
+ /* Create the table in memory by scanning the chip(s) */
+ if (!(this->bbt_options & NAND_BBT_CREATE_EMPTY))
+ create_bbt(this, buf, bd, chipsel);
+
+ td->version[i] = 1;
+ if (md)
+ md->version[i] = 1;
+ }
+
+ /* Read back first? */
+ if (rd) {
+ res = read_abs_bbt(this, buf, rd, chipsel);
+ if (mtd_is_eccerr(res)) {
+ /* Mark table as invalid */
+ rd->pages[i] = -1;
+ rd->version[i] = 0;
+ i--;
+ continue;
+ }
+ }
+ /* If they weren't versioned, read both */
+ if (rd2) {
+ res2 = read_abs_bbt(this, buf, rd2, chipsel);
+ if (mtd_is_eccerr(res2)) {
+ /* Mark table as invalid */
+ rd2->pages[i] = -1;
+ rd2->version[i] = 0;
+ i--;
+ continue;
+ }
+ }
+
+ /* Scrub the flash table(s)? */
+ if (mtd_is_bitflip(res) || mtd_is_bitflip(res2))
+ writeops = 0x03;
+
+ /* Update version numbers before writing */
+ if (md) {
+ td->version[i] = max(td->version[i], md->version[i]);
+ md->version[i] = td->version[i];
+ }
+
+ /* Write the bad block table to the device? */
+ if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
+ res = write_bbt(this, buf, td, md, chipsel);
+ if (res < 0)
+ return res;
+ }
+
+ /* Write the mirror bad block table to the device? */
+ if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
+ res = write_bbt(this, buf, md, td, chipsel);
+ if (res < 0)
+ return res;
+ }
+ }
+ return 0;
+}
+
+/**
+ * nand_update_bbt - update bad block table(s)
+ * @this: the NAND device
+ * @offs: the offset of the newly marked block
+ *
+ * The function updates the bad block table(s).
+ */
+static int nand_update_bbt(struct nand_chip *this, loff_t offs)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+ int len, res = 0;
+ int chip, chipsel;
+ uint8_t *buf;
+ struct nand_bbt_descr *td = this->bbt_td;
+ struct nand_bbt_descr *md = this->bbt_md;
+
+ if (!this->bbt || !td)
+ return -EINVAL;
+
+ /* Allocate a temporary buffer for one eraseblock incl. oob */
+ len = (1 << this->bbt_erase_shift);
+ len += (len >> this->page_shift) * mtd->oobsize;
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chip = (int)(offs >> this->chip_shift);
+ chipsel = chip;
+ } else {
+ chip = 0;
+ chipsel = -1;
+ }
+
+ td->version[chip]++;
+ if (md)
+ md->version[chip]++;
+
+ /* Write the bad block table to the device? */
+ if (td->options & NAND_BBT_WRITE) {
+ res = write_bbt(this, buf, td, md, chipsel);
+ if (res < 0)
+ goto out;
+ }
+ /* Write the mirror bad block table to the device? */
+ if (md && (md->options & NAND_BBT_WRITE)) {
+ res = write_bbt(this, buf, md, td, chipsel);
+ }
+
+ out:
+ kfree(buf);
+ return res;
+}
+
+/**
+ * mark_bbt_region - [GENERIC] mark the bad block table regions
+ * @this: the NAND device
+ * @td: bad block table descriptor
+ *
+ * The bad block table regions are marked as "bad" to prevent accidental
+ * erasures / writes. The regions are identified by the mark 0x02.
+ */
+static void mark_bbt_region(struct nand_chip *this, struct nand_bbt_descr *td)
+{
+ u64 targetsize = nanddev_target_size(&this->base);
+ struct mtd_info *mtd = nand_to_mtd(this);
+ int i, j, chips, block, nrblocks, update;
+ uint8_t oldval;
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chips = nanddev_ntargets(&this->base);
+ nrblocks = (int)(targetsize >> this->bbt_erase_shift);
+ } else {
+ chips = 1;
+ nrblocks = (int)(mtd->size >> this->bbt_erase_shift);
+ }
+
+ for (i = 0; i < chips; i++) {
+ if ((td->options & NAND_BBT_ABSPAGE) ||
+ !(td->options & NAND_BBT_WRITE)) {
+ if (td->pages[i] == -1)
+ continue;
+ block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift);
+ oldval = bbt_get_entry(this, block);
+ bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
+ if ((oldval != BBT_BLOCK_RESERVED) &&
+ td->reserved_block_code)
+ nand_update_bbt(this, (loff_t)block <<
+ this->bbt_erase_shift);
+ continue;
+ }
+ update = 0;
+ if (td->options & NAND_BBT_LASTBLOCK)
+ block = ((i + 1) * nrblocks) - td->maxblocks;
+ else
+ block = i * nrblocks;
+ for (j = 0; j < td->maxblocks; j++) {
+ oldval = bbt_get_entry(this, block);
+ bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
+ if (oldval != BBT_BLOCK_RESERVED)
+ update = 1;
+ block++;
+ }
+ /*
+ * If we want reserved blocks to be recorded to flash, and some
+ * new ones have been marked, then we need to update the stored
+ * bbts. This should only happen once.
+ */
+ if (update && td->reserved_block_code)
+ nand_update_bbt(this, (loff_t)(block - 1) <<
+ this->bbt_erase_shift);
+ }
+}
+
+/**
+ * verify_bbt_descr - verify the bad block description
+ * @this: the NAND device
+ * @bd: the table to verify
+ *
+ * This functions performs a few sanity checks on the bad block description
+ * table.
+ */
+static void verify_bbt_descr(struct nand_chip *this, struct nand_bbt_descr *bd)
+{
+ u64 targetsize = nanddev_target_size(&this->base);
+ struct mtd_info *mtd = nand_to_mtd(this);
+ u32 pattern_len;
+ u32 bits;
+ u32 table_size;
+
+ if (!bd)
+ return;
+
+ pattern_len = bd->len;
+ bits = bd->options & NAND_BBT_NRBITS_MSK;
+
+ BUG_ON((this->bbt_options & NAND_BBT_NO_OOB) &&
+ !(this->bbt_options & NAND_BBT_USE_FLASH));
+ BUG_ON(!bits);
+
+ if (bd->options & NAND_BBT_VERSION)
+ pattern_len++;
+
+ if (bd->options & NAND_BBT_NO_OOB) {
+ BUG_ON(!(this->bbt_options & NAND_BBT_USE_FLASH));
+ BUG_ON(!(this->bbt_options & NAND_BBT_NO_OOB));
+ BUG_ON(bd->offs);
+ if (bd->options & NAND_BBT_VERSION)
+ BUG_ON(bd->veroffs != bd->len);
+ BUG_ON(bd->options & NAND_BBT_SAVECONTENT);
+ }
+
+ if (bd->options & NAND_BBT_PERCHIP)
+ table_size = targetsize >> this->bbt_erase_shift;
+ else
+ table_size = mtd->size >> this->bbt_erase_shift;
+ table_size >>= 3;
+ table_size *= bits;
+ if (bd->options & NAND_BBT_NO_OOB)
+ table_size += pattern_len;
+ BUG_ON(table_size > (1 << this->bbt_erase_shift));
+}
+
+/**
+ * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
+ * @this: the NAND device
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function checks, if a bad block table(s) is/are already available. If
+ * not it scans the device for manufacturer marked good / bad blocks and writes
+ * the bad block table(s) to the selected place.
+ *
+ * The bad block table memory is allocated here. It must be freed by calling
+ * the nand_free_bbt function.
+ */
+static int nand_scan_bbt(struct nand_chip *this, struct nand_bbt_descr *bd)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+ int len, res;
+ uint8_t *buf;
+ struct nand_bbt_descr *td = this->bbt_td;
+ struct nand_bbt_descr *md = this->bbt_md;
+
+ len = (mtd->size >> (this->bbt_erase_shift + 2)) ? : 1;
+ /*
+ * Allocate memory (2bit per block) and clear the memory bad block
+ * table.
+ */
+ this->bbt = kzalloc(len, GFP_KERNEL);
+ if (!this->bbt)
+ return -ENOMEM;
+
+ /*
+ * If no primary table descriptor is given, scan the device to build a
+ * memory based bad block table.
+ */
+ if (!td) {
+ if ((res = nand_memory_bbt(this, bd))) {
+ pr_err("nand_bbt: can't scan flash and build the RAM-based BBT\n");
+ goto err_free_bbt;
+ }
+ return 0;
+ }
+ verify_bbt_descr(this, td);
+ verify_bbt_descr(this, md);
+
+ /* Allocate a temporary buffer for one eraseblock incl. oob */
+ len = (1 << this->bbt_erase_shift);
+ len += (len >> this->page_shift) * mtd->oobsize;
+ buf = vmalloc(len);
+ if (!buf) {
+ res = -ENOMEM;
+ goto err_free_bbt;
+ }
+
+ /* Is the bbt at a given page? */
+ if (td->options & NAND_BBT_ABSPAGE) {
+ read_abs_bbts(this, buf, td, md);
+ } else {
+ /* Search the bad block table using a pattern in oob */
+ search_read_bbts(this, buf, td, md);
+ }
+
+ res = check_create(this, buf, bd);
+ if (res)
+ goto err_free_buf;
+
+ /* Prevent the bbt regions from erasing / writing */
+ mark_bbt_region(this, td);
+ if (md)
+ mark_bbt_region(this, md);
+
+ vfree(buf);
+ return 0;
+
+err_free_buf:
+ vfree(buf);
+err_free_bbt:
+ kfree(this->bbt);
+ this->bbt = NULL;
+ return res;
+}
+
+/*
+ * Define some generic bad / good block scan pattern which are used
+ * while scanning a device for factory marked good / bad blocks.
+ */
+static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
+
+/* Generic flash bbt descriptors */
+static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
+static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 8,
+ .len = 4,
+ .veroffs = 12,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 8,
+ .len = 4,
+ .veroffs = 12,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+ .pattern = mirror_pattern
+};
+
+static struct nand_bbt_descr bbt_main_no_oob_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
+ | NAND_BBT_NO_OOB,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_no_oob_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
+ | NAND_BBT_NO_OOB,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+ .pattern = mirror_pattern
+};
+
+#define BADBLOCK_SCAN_MASK (~NAND_BBT_NO_OOB)
+/**
+ * nand_create_badblock_pattern - [INTERN] Creates a BBT descriptor structure
+ * @this: NAND chip to create descriptor for
+ *
+ * This function allocates and initializes a nand_bbt_descr for BBM detection
+ * based on the properties of @this. The new descriptor is stored in
+ * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when
+ * passed to this function.
+ */
+static int nand_create_badblock_pattern(struct nand_chip *this)
+{
+ struct nand_bbt_descr *bd;
+ if (this->badblock_pattern) {
+ pr_warn("Bad block pattern already allocated; not replacing\n");
+ return -EINVAL;
+ }
+ bd = kzalloc(sizeof(*bd), GFP_KERNEL);
+ if (!bd)
+ return -ENOMEM;
+ bd->options = this->bbt_options & BADBLOCK_SCAN_MASK;
+ bd->offs = this->badblockpos;
+ bd->len = (this->options & NAND_BUSWIDTH_16) ? 2 : 1;
+ bd->pattern = scan_ff_pattern;
+ bd->options |= NAND_BBT_DYNAMICSTRUCT;
+ this->badblock_pattern = bd;
+ return 0;
+}
+
+/**
+ * nand_create_bbt - [NAND Interface] Select a default bad block table for the device
+ * @this: NAND chip object
+ *
+ * This function selects the default bad block table support for the device and
+ * calls the nand_scan_bbt function.
+ */
+int nand_create_bbt(struct nand_chip *this)
+{
+ int ret;
+
+ /* Is a flash based bad block table requested? */
+ if (this->bbt_options & NAND_BBT_USE_FLASH) {
+ /* Use the default pattern descriptors */
+ if (!this->bbt_td) {
+ if (this->bbt_options & NAND_BBT_NO_OOB) {
+ this->bbt_td = &bbt_main_no_oob_descr;
+ this->bbt_md = &bbt_mirror_no_oob_descr;
+ } else {
+ this->bbt_td = &bbt_main_descr;
+ this->bbt_md = &bbt_mirror_descr;
+ }
+ }
+ } else {
+ this->bbt_td = NULL;
+ this->bbt_md = NULL;
+ }
+
+ if (!this->badblock_pattern) {
+ ret = nand_create_badblock_pattern(this);
+ if (ret)
+ return ret;
+ }
+
+ return nand_scan_bbt(this, this->badblock_pattern);
+}
+EXPORT_SYMBOL(nand_create_bbt);
+
+/**
+ * nand_isreserved_bbt - [NAND Interface] Check if a block is reserved
+ * @this: NAND chip object
+ * @offs: offset in the device
+ */
+int nand_isreserved_bbt(struct nand_chip *this, loff_t offs)
+{
+ int block;
+
+ block = (int)(offs >> this->bbt_erase_shift);
+ return bbt_get_entry(this, block) == BBT_BLOCK_RESERVED;
+}
+
+/**
+ * nand_isbad_bbt - [NAND Interface] Check if a block is bad
+ * @this: NAND chip object
+ * @offs: offset in the device
+ * @allowbbt: allow access to bad block table region
+ */
+int nand_isbad_bbt(struct nand_chip *this, loff_t offs, int allowbbt)
+{
+ int block, res;
+
+ block = (int)(offs >> this->bbt_erase_shift);
+ res = bbt_get_entry(this, block);
+
+ pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n",
+ (unsigned int)offs, block, res);
+
+ if (mtd_check_expert_analysis_mode())
+ return 0;
+
+ switch (res) {
+ case BBT_BLOCK_GOOD:
+ return 0;
+ case BBT_BLOCK_WORN:
+ return 1;
+ case BBT_BLOCK_RESERVED:
+ return allowbbt ? 0 : 1;
+ }
+ return 1;
+}
+
+/**
+ * nand_markbad_bbt - [NAND Interface] Mark a block bad in the BBT
+ * @this: NAND chip object
+ * @offs: offset of the bad block
+ */
+int nand_markbad_bbt(struct nand_chip *this, loff_t offs)
+{
+ int block, ret = 0;
+
+ block = (int)(offs >> this->bbt_erase_shift);
+
+ /* Mark bad block in memory */
+ bbt_mark_entry(this, block, BBT_BLOCK_WORN);
+
+ /* Update flash-based bad block table */
+ if (this->bbt_options & NAND_BBT_USE_FLASH)
+ ret = nand_update_bbt(this, offs);
+
+ return ret;
+}
diff --git a/drivers/mtd/nand/raw/nand_esmt.c b/drivers/mtd/nand/raw/nand_esmt.c
new file mode 100644
index 0000000000..4412c407ae
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_esmt.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Toradex AG
+ *
+ * Author: Marcel Ziswiler <marcel.ziswiler@toradex.com>
+ */
+
+#include <linux/mtd/rawnand.h>
+#include "internals.h"
+
+static void esmt_nand_decode_id(struct nand_chip *chip)
+{
+ struct nand_device *base = &chip->base;
+ struct nand_ecc_props requirements = {};
+
+ nand_decode_ext_id(chip);
+
+ /* Extract ECC requirements from 5th id byte. */
+ if (chip->id.len >= 5 && nand_is_slc(chip)) {
+ requirements.step_size = 512;
+ switch (chip->id.data[4] & 0x3) {
+ case 0x0:
+ requirements.strength = 4;
+ break;
+ case 0x1:
+ requirements.strength = 2;
+ break;
+ case 0x2:
+ requirements.strength = 1;
+ break;
+ default:
+ WARN(1, "Could not get ECC info");
+ requirements.step_size = 0;
+ break;
+ }
+ }
+
+ nanddev_set_ecc_requirements(base, &requirements);
+}
+
+static int esmt_nand_init(struct nand_chip *chip)
+{
+ if (nand_is_slc(chip))
+ /*
+ * It is known that some ESMT SLC NANDs have been shipped
+ * with the factory bad block markers in the first or last page
+ * of the block, instead of the first or second page. To be on
+ * the safe side, let's check all three locations.
+ */
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE |
+ NAND_BBM_LASTPAGE;
+
+ return 0;
+}
+
+const struct nand_manufacturer_ops esmt_nand_manuf_ops = {
+ .detect = esmt_nand_decode_id,
+ .init = esmt_nand_init,
+};
diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c
new file mode 100644
index 0000000000..39076735a3
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_hynix.c
@@ -0,0 +1,748 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Free Electrons
+ * Copyright (C) 2017 NextThing Co
+ *
+ * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
+ */
+
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+#include "internals.h"
+
+#define NAND_HYNIX_CMD_SET_PARAMS 0x36
+#define NAND_HYNIX_CMD_APPLY_PARAMS 0x16
+
+#define NAND_HYNIX_1XNM_RR_REPEAT 8
+
+/**
+ * struct hynix_read_retry - read-retry data
+ * @nregs: number of register to set when applying a new read-retry mode
+ * @regs: register offsets (NAND chip dependent)
+ * @values: array of values to set in registers. The array size is equal to
+ * (nregs * nmodes)
+ */
+struct hynix_read_retry {
+ int nregs;
+ const u8 *regs;
+ u8 values[];
+};
+
+/**
+ * struct hynix_nand - private Hynix NAND struct
+ * @nand_technology: manufacturing process expressed in picometer
+ * @read_retry: read-retry information
+ */
+struct hynix_nand {
+ const struct hynix_read_retry *read_retry;
+};
+
+/**
+ * struct hynix_read_retry_otp - structure describing how the read-retry OTP
+ * area
+ * @nregs: number of hynix private registers to set before reading the reading
+ * the OTP area
+ * @regs: registers that should be configured
+ * @values: values that should be set in regs
+ * @page: the address to pass to the READ_PAGE command. Depends on the NAND
+ * chip
+ * @size: size of the read-retry OTP section
+ */
+struct hynix_read_retry_otp {
+ int nregs;
+ const u8 *regs;
+ const u8 *values;
+ int page;
+ int size;
+};
+
+static bool hynix_nand_has_valid_jedecid(struct nand_chip *chip)
+{
+ u8 jedecid[5] = { };
+ int ret;
+
+ ret = nand_readid_op(chip, 0x40, jedecid, sizeof(jedecid));
+ if (ret)
+ return false;
+
+ return !strncmp("JEDEC", jedecid, sizeof(jedecid));
+}
+
+static int hynix_nand_cmd_op(struct nand_chip *chip, u8 cmd)
+{
+ if (nand_has_exec_op(chip)) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(cmd, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->legacy.cmdfunc(chip, cmd, -1, -1);
+
+ return 0;
+}
+
+static int hynix_nand_reg_write_op(struct nand_chip *chip, u8 addr, u8 val)
+{
+ u16 column = ((u16)addr << 8) | addr;
+
+ if (nand_has_exec_op(chip)) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_ADDR(1, &addr, 0),
+ NAND_OP_8BIT_DATA_OUT(1, &val, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_NONE, column, -1);
+ chip->legacy.write_byte(chip, val);
+
+ return 0;
+}
+
+static int hynix_nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
+{
+ struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
+ const u8 *values;
+ int i, ret;
+
+ values = hynix->read_retry->values +
+ (retry_mode * hynix->read_retry->nregs);
+
+ /* Enter 'Set Hynix Parameters' mode */
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure the NAND in the requested read-retry mode.
+ * This is done by setting pre-defined values in internal NAND
+ * registers.
+ *
+ * The set of registers is NAND specific, and the values are either
+ * predefined or extracted from an OTP area on the NAND (values are
+ * probably tweaked at production in this case).
+ */
+ for (i = 0; i < hynix->read_retry->nregs; i++) {
+ ret = hynix_nand_reg_write_op(chip, hynix->read_retry->regs[i],
+ values[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* Apply the new settings. */
+ return hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
+}
+
+/**
+ * hynix_get_majority - get the value that is occurring the most in a given
+ * set of values
+ * @in: the array of values to test
+ * @repeat: the size of the in array
+ * @out: pointer used to store the output value
+ *
+ * This function implements the 'majority check' logic that is supposed to
+ * overcome the unreliability of MLC NANDs when reading the OTP area storing
+ * the read-retry parameters.
+ *
+ * It's based on a pretty simple assumption: if we repeat the same value
+ * several times and then take the one that is occurring the most, we should
+ * find the correct value.
+ * Let's hope this dummy algorithm prevents us from losing the read-retry
+ * parameters.
+ */
+static int hynix_get_majority(const u8 *in, int repeat, u8 *out)
+{
+ int i, j, half = repeat / 2;
+
+ /*
+ * We only test the first half of the in array because we must ensure
+ * that the value is at least occurring repeat / 2 times.
+ *
+ * This loop is suboptimal since we may count the occurrences of the
+ * same value several time, but we are doing that on small sets, which
+ * makes it acceptable.
+ */
+ for (i = 0; i < half; i++) {
+ int cnt = 0;
+ u8 val = in[i];
+
+ /* Count all values that are matching the one at index i. */
+ for (j = i + 1; j < repeat; j++) {
+ if (in[j] == val)
+ cnt++;
+ }
+
+ /* We found a value occurring more than repeat / 2. */
+ if (cnt > half) {
+ *out = val;
+ return 0;
+ }
+ }
+
+ return -EIO;
+}
+
+static int hynix_read_rr_otp(struct nand_chip *chip,
+ const struct hynix_read_retry_otp *info,
+ void *buf)
+{
+ int i, ret;
+
+ ret = nand_reset_op(chip);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < info->nregs; i++) {
+ ret = hynix_nand_reg_write_op(chip, info->regs[i],
+ info->values[i]);
+ if (ret)
+ return ret;
+ }
+
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
+ if (ret)
+ return ret;
+
+ /* Sequence to enter OTP mode? */
+ ret = hynix_nand_cmd_op(chip, 0x17);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, 0x4);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, 0x19);
+ if (ret)
+ return ret;
+
+ /* Now read the page */
+ ret = nand_read_page_op(chip, info->page, 0, buf, info->size);
+ if (ret)
+ return ret;
+
+ /* Put everything back to normal */
+ ret = nand_reset_op(chip);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_reg_write_op(chip, 0x38, 0);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
+ if (ret)
+ return ret;
+
+ return nand_read_page_op(chip, 0, 0, NULL, 0);
+}
+
+#define NAND_HYNIX_1XNM_RR_COUNT_OFFS 0
+#define NAND_HYNIX_1XNM_RR_REG_COUNT_OFFS 8
+#define NAND_HYNIX_1XNM_RR_SET_OFFS(x, setsize, inv) \
+ (16 + ((((x) * 2) + ((inv) ? 1 : 0)) * (setsize)))
+
+static int hynix_mlc_1xnm_rr_value(const u8 *buf, int nmodes, int nregs,
+ int mode, int reg, bool inv, u8 *val)
+{
+ u8 tmp[NAND_HYNIX_1XNM_RR_REPEAT];
+ int val_offs = (mode * nregs) + reg;
+ int set_size = nmodes * nregs;
+ int i, ret;
+
+ for (i = 0; i < NAND_HYNIX_1XNM_RR_REPEAT; i++) {
+ int set_offs = NAND_HYNIX_1XNM_RR_SET_OFFS(i, set_size, inv);
+
+ tmp[i] = buf[val_offs + set_offs];
+ }
+
+ ret = hynix_get_majority(tmp, NAND_HYNIX_1XNM_RR_REPEAT, val);
+ if (ret)
+ return ret;
+
+ if (inv)
+ *val = ~*val;
+
+ return 0;
+}
+
+static u8 hynix_1xnm_mlc_read_retry_regs[] = {
+ 0xcc, 0xbf, 0xaa, 0xab, 0xcd, 0xad, 0xae, 0xaf
+};
+
+static int hynix_mlc_1xnm_rr_init(struct nand_chip *chip,
+ const struct hynix_read_retry_otp *info)
+{
+ struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
+ struct hynix_read_retry *rr = NULL;
+ int ret, i, j;
+ u8 nregs, nmodes;
+ u8 *buf;
+
+ buf = kmalloc(info->size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = hynix_read_rr_otp(chip, info, buf);
+ if (ret)
+ goto out;
+
+ ret = hynix_get_majority(buf, NAND_HYNIX_1XNM_RR_REPEAT,
+ &nmodes);
+ if (ret)
+ goto out;
+
+ ret = hynix_get_majority(buf + NAND_HYNIX_1XNM_RR_REPEAT,
+ NAND_HYNIX_1XNM_RR_REPEAT,
+ &nregs);
+ if (ret)
+ goto out;
+
+ rr = kzalloc(sizeof(*rr) + (nregs * nmodes), GFP_KERNEL);
+ if (!rr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < nmodes; i++) {
+ for (j = 0; j < nregs; j++) {
+ u8 *val = rr->values + (i * nregs);
+
+ ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j,
+ false, val);
+ if (!ret)
+ continue;
+
+ ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j,
+ true, val);
+ if (ret)
+ goto out;
+ }
+ }
+
+ rr->nregs = nregs;
+ rr->regs = hynix_1xnm_mlc_read_retry_regs;
+ hynix->read_retry = rr;
+ chip->ops.setup_read_retry = hynix_nand_setup_read_retry;
+ chip->read_retries = nmodes;
+
+out:
+ kfree(buf);
+
+ if (ret)
+ kfree(rr);
+
+ return ret;
+}
+
+static const u8 hynix_mlc_1xnm_rr_otp_regs[] = { 0x38 };
+static const u8 hynix_mlc_1xnm_rr_otp_values[] = { 0x52 };
+
+static const struct hynix_read_retry_otp hynix_mlc_1xnm_rr_otps[] = {
+ {
+ .nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs),
+ .regs = hynix_mlc_1xnm_rr_otp_regs,
+ .values = hynix_mlc_1xnm_rr_otp_values,
+ .page = 0x21f,
+ .size = 784
+ },
+ {
+ .nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs),
+ .regs = hynix_mlc_1xnm_rr_otp_regs,
+ .values = hynix_mlc_1xnm_rr_otp_values,
+ .page = 0x200,
+ .size = 528,
+ },
+};
+
+static int hynix_nand_rr_init(struct nand_chip *chip)
+{
+ int i, ret = 0;
+ bool valid_jedecid;
+
+ valid_jedecid = hynix_nand_has_valid_jedecid(chip);
+
+ /*
+ * We only support read-retry for 1xnm NANDs, and those NANDs all
+ * expose a valid JEDEC ID.
+ */
+ if (valid_jedecid) {
+ u8 nand_tech = chip->id.data[5] >> 4;
+
+ /* 1xnm technology */
+ if (nand_tech == 4) {
+ for (i = 0; i < ARRAY_SIZE(hynix_mlc_1xnm_rr_otps);
+ i++) {
+ /*
+ * FIXME: Hynix recommend to copy the
+ * read-retry OTP area into a normal page.
+ */
+ ret = hynix_mlc_1xnm_rr_init(chip,
+ hynix_mlc_1xnm_rr_otps);
+ if (!ret)
+ break;
+ }
+ }
+ }
+
+ if (ret)
+ pr_warn("failed to initialize read-retry infrastructure");
+
+ return 0;
+}
+
+static void hynix_nand_extract_oobsize(struct nand_chip *chip,
+ bool valid_jedecid)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+ u8 oobsize;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ oobsize = ((chip->id.data[3] >> 2) & 0x3) |
+ ((chip->id.data[3] >> 4) & 0x4);
+
+ if (valid_jedecid) {
+ switch (oobsize) {
+ case 0:
+ memorg->oobsize = 2048;
+ break;
+ case 1:
+ memorg->oobsize = 1664;
+ break;
+ case 2:
+ memorg->oobsize = 1024;
+ break;
+ case 3:
+ memorg->oobsize = 640;
+ break;
+ default:
+ /*
+ * We should never reach this case, but if that
+ * happens, this probably means Hynix decided to use
+ * a different extended ID format, and we should find
+ * a way to support it.
+ */
+ WARN(1, "Invalid OOB size");
+ break;
+ }
+ } else {
+ switch (oobsize) {
+ case 0:
+ memorg->oobsize = 128;
+ break;
+ case 1:
+ memorg->oobsize = 224;
+ break;
+ case 2:
+ memorg->oobsize = 448;
+ break;
+ case 3:
+ memorg->oobsize = 64;
+ break;
+ case 4:
+ memorg->oobsize = 32;
+ break;
+ case 5:
+ memorg->oobsize = 16;
+ break;
+ case 6:
+ memorg->oobsize = 640;
+ break;
+ default:
+ /*
+ * We should never reach this case, but if that
+ * happens, this probably means Hynix decided to use
+ * a different extended ID format, and we should find
+ * a way to support it.
+ */
+ WARN(1, "Invalid OOB size");
+ break;
+ }
+
+ /*
+ * The datasheet of H27UCG8T2BTR mentions that the "Redundant
+ * Area Size" is encoded "per 8KB" (page size). This chip uses
+ * a page size of 16KiB. The datasheet mentions an OOB size of
+ * 1.280 bytes, but the OOB size encoded in the ID bytes (using
+ * the existing logic above) is 640 bytes.
+ * Update the OOB size for this chip by taking the value
+ * determined above and scaling it to the actual page size (so
+ * the actual OOB size for this chip is: 640 * 16k / 8k).
+ */
+ if (chip->id.data[1] == 0xde)
+ memorg->oobsize *= memorg->pagesize / SZ_8K;
+ }
+
+ mtd->oobsize = memorg->oobsize;
+}
+
+static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
+ bool valid_jedecid)
+{
+ struct nand_device *base = &chip->base;
+ struct nand_ecc_props requirements = {};
+ u8 ecc_level = (chip->id.data[4] >> 4) & 0x7;
+
+ if (valid_jedecid) {
+ /* Reference: H27UCG8T2E datasheet */
+ requirements.step_size = 1024;
+
+ switch (ecc_level) {
+ case 0:
+ requirements.step_size = 0;
+ requirements.strength = 0;
+ break;
+ case 1:
+ requirements.strength = 4;
+ break;
+ case 2:
+ requirements.strength = 24;
+ break;
+ case 3:
+ requirements.strength = 32;
+ break;
+ case 4:
+ requirements.strength = 40;
+ break;
+ case 5:
+ requirements.strength = 50;
+ break;
+ case 6:
+ requirements.strength = 60;
+ break;
+ default:
+ /*
+ * We should never reach this case, but if that
+ * happens, this probably means Hynix decided to use
+ * a different extended ID format, and we should find
+ * a way to support it.
+ */
+ WARN(1, "Invalid ECC requirements");
+ }
+ } else {
+ /*
+ * The ECC requirements field meaning depends on the
+ * NAND technology.
+ */
+ u8 nand_tech = chip->id.data[5] & 0x7;
+
+ if (nand_tech < 3) {
+ /* > 26nm, reference: H27UBG8T2A datasheet */
+ if (ecc_level < 5) {
+ requirements.step_size = 512;
+ requirements.strength = 1 << ecc_level;
+ } else if (ecc_level < 7) {
+ if (ecc_level == 5)
+ requirements.step_size = 2048;
+ else
+ requirements.step_size = 1024;
+ requirements.strength = 24;
+ } else {
+ /*
+ * We should never reach this case, but if that
+ * happens, this probably means Hynix decided
+ * to use a different extended ID format, and
+ * we should find a way to support it.
+ */
+ WARN(1, "Invalid ECC requirements");
+ }
+ } else {
+ /* <= 26nm, reference: H27UBG8T2B datasheet */
+ if (!ecc_level) {
+ requirements.step_size = 0;
+ requirements.strength = 0;
+ } else if (ecc_level < 5) {
+ requirements.step_size = 512;
+ requirements.strength = 1 << (ecc_level - 1);
+ } else {
+ requirements.step_size = 1024;
+ requirements.strength = 24 +
+ (8 * (ecc_level - 5));
+ }
+ }
+ }
+
+ nanddev_set_ecc_requirements(base, &requirements);
+}
+
+static void hynix_nand_extract_scrambling_requirements(struct nand_chip *chip,
+ bool valid_jedecid)
+{
+ u8 nand_tech;
+
+ /* We need scrambling on all TLC NANDs*/
+ if (nanddev_bits_per_cell(&chip->base) > 2)
+ chip->options |= NAND_NEED_SCRAMBLING;
+
+ /* And on MLC NANDs with sub-3xnm process */
+ if (valid_jedecid) {
+ nand_tech = chip->id.data[5] >> 4;
+
+ /* < 3xnm */
+ if (nand_tech > 0)
+ chip->options |= NAND_NEED_SCRAMBLING;
+ } else {
+ nand_tech = chip->id.data[5] & 0x7;
+
+ /* < 32nm */
+ if (nand_tech > 2)
+ chip->options |= NAND_NEED_SCRAMBLING;
+ }
+}
+
+static void hynix_nand_decode_id(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+ bool valid_jedecid;
+ u8 tmp;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ /*
+ * Exclude all SLC NANDs from this advanced detection scheme.
+ * According to the ranges defined in several datasheets, it might
+ * appear that even SLC NANDs could fall in this extended ID scheme.
+ * If that the case rework the test to let SLC NANDs go through the
+ * detection process.
+ */
+ if (chip->id.len < 6 || nand_is_slc(chip)) {
+ nand_decode_ext_id(chip);
+ return;
+ }
+
+ /* Extract pagesize */
+ memorg->pagesize = 2048 << (chip->id.data[3] & 0x03);
+ mtd->writesize = memorg->pagesize;
+
+ tmp = (chip->id.data[3] >> 4) & 0x3;
+ /*
+ * When bit7 is set that means we start counting at 1MiB, otherwise
+ * we start counting at 128KiB and shift this value the content of
+ * ID[3][4:5].
+ * The only exception is when ID[3][4:5] == 3 and ID[3][7] == 0, in
+ * this case the erasesize is set to 768KiB.
+ */
+ if (chip->id.data[3] & 0x80) {
+ memorg->pages_per_eraseblock = (SZ_1M << tmp) /
+ memorg->pagesize;
+ mtd->erasesize = SZ_1M << tmp;
+ } else if (tmp == 3) {
+ memorg->pages_per_eraseblock = (SZ_512K + SZ_256K) /
+ memorg->pagesize;
+ mtd->erasesize = SZ_512K + SZ_256K;
+ } else {
+ memorg->pages_per_eraseblock = (SZ_128K << tmp) /
+ memorg->pagesize;
+ mtd->erasesize = SZ_128K << tmp;
+ }
+
+ /*
+ * Modern Toggle DDR NANDs have a valid JEDECID even though they are
+ * not exposing a valid JEDEC parameter table.
+ * These NANDs use a different NAND ID scheme.
+ */
+ valid_jedecid = hynix_nand_has_valid_jedecid(chip);
+
+ hynix_nand_extract_oobsize(chip, valid_jedecid);
+ hynix_nand_extract_ecc_requirements(chip, valid_jedecid);
+ hynix_nand_extract_scrambling_requirements(chip, valid_jedecid);
+}
+
+static void hynix_nand_cleanup(struct nand_chip *chip)
+{
+ struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
+
+ if (!hynix)
+ return;
+
+ kfree(hynix->read_retry);
+ kfree(hynix);
+ nand_set_manufacturer_data(chip, NULL);
+}
+
+static int
+h27ucg8t2atrbc_choose_interface_config(struct nand_chip *chip,
+ struct nand_interface_config *iface)
+{
+ onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 4);
+
+ return nand_choose_best_sdr_timings(chip, iface, NULL);
+}
+
+static int h27ucg8t2etrbc_init(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ chip->options |= NAND_NEED_SCRAMBLING;
+ mtd_set_pairing_scheme(mtd, &dist3_pairing_scheme);
+
+ return 0;
+}
+
+static int hynix_nand_init(struct nand_chip *chip)
+{
+ struct hynix_nand *hynix;
+ int ret;
+
+ if (!nand_is_slc(chip))
+ chip->options |= NAND_BBM_LASTPAGE;
+ else
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
+
+ hynix = kzalloc(sizeof(*hynix), GFP_KERNEL);
+ if (!hynix)
+ return -ENOMEM;
+
+ nand_set_manufacturer_data(chip, hynix);
+
+ if (!strncmp("H27UCG8T2ATR-BC", chip->parameters.model,
+ sizeof("H27UCG8T2ATR-BC") - 1))
+ chip->ops.choose_interface_config =
+ h27ucg8t2atrbc_choose_interface_config;
+
+ if (!strncmp("H27UCG8T2ETR-BC", chip->parameters.model,
+ sizeof("H27UCG8T2ETR-BC") - 1))
+ h27ucg8t2etrbc_init(chip);
+
+ ret = hynix_nand_rr_init(chip);
+ if (ret)
+ hynix_nand_cleanup(chip);
+
+ return ret;
+}
+
+static void hynix_fixup_onfi_param_page(struct nand_chip *chip,
+ struct nand_onfi_params *p)
+{
+ /*
+ * Certain chips might report a 0 on sdr_timing_mode field
+ * (bytes 129-130). This has been seen on H27U4G8F2GDA-BI.
+ * According to ONFI specification, bit 0 of this field "shall be 1".
+ * Forcibly set this bit.
+ */
+ p->sdr_timing_modes |= cpu_to_le16(BIT(0));
+}
+
+const struct nand_manufacturer_ops hynix_nand_manuf_ops = {
+ .detect = hynix_nand_decode_id,
+ .init = hynix_nand_init,
+ .cleanup = hynix_nand_cleanup,
+ .fixup_onfi_param_page = hynix_fixup_onfi_param_page,
+};
diff --git a/drivers/mtd/nand/raw/nand_ids.c b/drivers/mtd/nand/raw/nand_ids.c
new file mode 100644
index 0000000000..650351c62a
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_ids.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2002 Thomas Gleixner (tglx@linutronix.de)
+ */
+
+#include <linux/sizes.h>
+
+#include "internals.h"
+
+#define LP_OPTIONS 0
+#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
+
+#define SP_OPTIONS NAND_NEED_READRDY
+#define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16)
+
+/*
+ * The chip ID list:
+ * name, device ID, page size, chip size in MiB, eraseblock size, options
+ *
+ * If page size and eraseblock size are 0, the sizes are taken from the
+ * extended chip ID.
+ */
+struct nand_flash_dev nand_flash_ids[] = {
+ /*
+ * Some incompatible NAND chips share device ID's and so must be
+ * listed by full ID. We list them first so that we can easily identify
+ * the most specific match.
+ */
+ {"TC58NVG0S3E 1G 3.3V 8-bit",
+ { .id = {0x98, 0xd1, 0x90, 0x15, 0x76, 0x14, 0x01, 0x00} },
+ SZ_2K, SZ_128, SZ_128K, 0, 8, 64, NAND_ECC_INFO(1, SZ_512), },
+ {"TC58NVG2S0F 4G 3.3V 8-bit",
+ { .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} },
+ SZ_4K, SZ_512, SZ_256K, 0, 8, 224, NAND_ECC_INFO(4, SZ_512) },
+ {"TC58NVG2S0H 4G 3.3V 8-bit",
+ { .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x16, 0x08, 0x00} },
+ SZ_4K, SZ_512, SZ_256K, 0, 8, 256, NAND_ECC_INFO(8, SZ_512) },
+ {"TC58NVG3S0F 8G 3.3V 8-bit",
+ { .id = {0x98, 0xd3, 0x90, 0x26, 0x76, 0x15, 0x02, 0x08} },
+ SZ_4K, SZ_1K, SZ_256K, 0, 8, 232, NAND_ECC_INFO(4, SZ_512) },
+ {"TC58NVG5D2 32G 3.3V 8-bit",
+ { .id = {0x98, 0xd7, 0x94, 0x32, 0x76, 0x56, 0x09, 0x00} },
+ SZ_8K, SZ_4K, SZ_1M, 0, 8, 640, NAND_ECC_INFO(40, SZ_1K) },
+ {"TC58NVG6D2 64G 3.3V 8-bit",
+ { .id = {0x98, 0xde, 0x94, 0x82, 0x76, 0x56, 0x04, 0x20} },
+ SZ_8K, SZ_8K, SZ_2M, 0, 8, 640, NAND_ECC_INFO(40, SZ_1K) },
+ {"SDTNQGAMA 64G 3.3V 8-bit",
+ { .id = {0x45, 0xde, 0x94, 0x93, 0x76, 0x57} },
+ SZ_16K, SZ_8K, SZ_4M, 0, 6, 1280, NAND_ECC_INFO(40, SZ_1K) },
+ {"SDTNRGAMA 64G 3.3V 8-bit",
+ { .id = {0x45, 0xde, 0x94, 0x93, 0x76, 0x50} },
+ SZ_16K, SZ_8K, SZ_4M, 0, 6, 1280, NAND_ECC_INFO(40, SZ_1K) },
+ {"H27UCG8T2ATR-BC 64G 3.3V 8-bit",
+ { .id = {0xad, 0xde, 0x94, 0xda, 0x74, 0xc4} },
+ SZ_8K, SZ_8K, SZ_2M, NAND_NEED_SCRAMBLING, 6, 640,
+ NAND_ECC_INFO(40, SZ_1K) },
+ {"H27UCG8T2ETR-BC 64G 3.3V 8-bit",
+ { .id = {0xad, 0xde, 0x14, 0xa7, 0x42, 0x4a} },
+ SZ_16K, SZ_8K, SZ_4M, NAND_NEED_SCRAMBLING, 6, 1664,
+ NAND_ECC_INFO(40, SZ_1K) },
+ {"TH58NVG2S3HBAI4 4G 3.3V 8-bit",
+ { .id = {0x98, 0xdc, 0x91, 0x15, 0x76} },
+ SZ_2K, SZ_512, SZ_128K, 0, 5, 128, NAND_ECC_INFO(8, SZ_512) },
+ {"TH58NVG3S0HBAI4 8G 3.3V 8-bit",
+ { .id = {0x98, 0xd3, 0x91, 0x26, 0x76} },
+ SZ_4K, SZ_1K, SZ_256K, 0, 5, 256, NAND_ECC_INFO(8, SZ_512)},
+
+ LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE5, 4, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xD6, 8, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xE6, 8, SZ_8K, SP_OPTIONS),
+
+ LEGACY_ID_NAND("NAND 16MiB 1,8V 8-bit", 0x33, 16, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 16MiB 3,3V 8-bit", 0x73, 16, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 16MiB 1,8V 16-bit", 0x43, 16, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 16MiB 3,3V 16-bit", 0x53, 16, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 32MiB 1,8V 8-bit", 0x35, 32, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 32MiB 3,3V 8-bit", 0x75, 32, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 32MiB 1,8V 16-bit", 0x45, 32, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 32MiB 3,3V 16-bit", 0x55, 32, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 64MiB 1,8V 8-bit", 0x36, 64, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 64MiB 3,3V 8-bit", 0x76, 64, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 64MiB 1,8V 16-bit", 0x46, 64, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 64MiB 3,3V 16-bit", 0x56, 64, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit", 0x78, 128, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit", 0x39, 128, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 128MiB 3,3V 8-bit", 0x79, 128, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x72, 128, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x49, 128, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x74, 128, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x59, 128, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 256MiB 3,3V 8-bit", 0x71, 256, SZ_16K, SP_OPTIONS),
+
+ /*
+ * These are the new chips with large page size. Their page size and
+ * eraseblock size are determined from the extended ID bytes.
+ */
+
+ /* 512 Megabit */
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit", 0xA2, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit", 0xA0, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xF2, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xD0, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xF0, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB2, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB0, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC2, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC0, 64, LP_OPTIONS16),
+
+ /* 1 Gigabit */
+ EXTENDED_ID_NAND("NAND 128MiB 1,8V 8-bit", 0xA1, 128, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit", 0xF1, 128, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit", 0xD1, 128, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xB1, 128, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 128MiB 3,3V 16-bit", 0xC1, 128, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xAD, 128, LP_OPTIONS16),
+
+ /* 2 Gigabit */
+ EXTENDED_ID_NAND("NAND 256MiB 1,8V 8-bit", 0xAA, 256, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 256MiB 3,3V 8-bit", 0xDA, 256, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 256MiB 1,8V 16-bit", 0xBA, 256, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 256MiB 3,3V 16-bit", 0xCA, 256, LP_OPTIONS16),
+
+ /* 4 Gigabit */
+ EXTENDED_ID_NAND("NAND 512MiB 1,8V 8-bit", 0xAC, 512, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 512MiB 3,3V 8-bit", 0xDC, 512, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 512MiB 1,8V 16-bit", 0xBC, 512, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 512MiB 3,3V 16-bit", 0xCC, 512, LP_OPTIONS16),
+
+ /* 8 Gigabit */
+ EXTENDED_ID_NAND("NAND 1GiB 1,8V 8-bit", 0xA3, 1024, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 1GiB 3,3V 8-bit", 0xD3, 1024, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 1GiB 1,8V 16-bit", 0xB3, 1024, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 1GiB 3,3V 16-bit", 0xC3, 1024, LP_OPTIONS16),
+
+ /* 16 Gigabit */
+ EXTENDED_ID_NAND("NAND 2GiB 1,8V 8-bit", 0xA5, 2048, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 2GiB 3,3V 8-bit", 0xD5, 2048, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 2GiB 1,8V 16-bit", 0xB5, 2048, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 2GiB 3,3V 16-bit", 0xC5, 2048, LP_OPTIONS16),
+
+ /* 32 Gigabit */
+ EXTENDED_ID_NAND("NAND 4GiB 1,8V 8-bit", 0xA7, 4096, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 4GiB 3,3V 8-bit", 0xD7, 4096, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 4GiB 1,8V 16-bit", 0xB7, 4096, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 4GiB 3,3V 16-bit", 0xC7, 4096, LP_OPTIONS16),
+
+ /* 64 Gigabit */
+ EXTENDED_ID_NAND("NAND 8GiB 1,8V 8-bit", 0xAE, 8192, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 8GiB 3,3V 8-bit", 0xDE, 8192, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 8GiB 1,8V 16-bit", 0xBE, 8192, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 8GiB 3,3V 16-bit", 0xCE, 8192, LP_OPTIONS16),
+
+ /* 128 Gigabit */
+ EXTENDED_ID_NAND("NAND 16GiB 1,8V 8-bit", 0x1A, 16384, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 16GiB 3,3V 8-bit", 0x3A, 16384, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 16GiB 1,8V 16-bit", 0x2A, 16384, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 16GiB 3,3V 16-bit", 0x4A, 16384, LP_OPTIONS16),
+
+ /* 256 Gigabit */
+ EXTENDED_ID_NAND("NAND 32GiB 1,8V 8-bit", 0x1C, 32768, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 32GiB 3,3V 8-bit", 0x3C, 32768, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 32GiB 1,8V 16-bit", 0x2C, 32768, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 32GiB 3,3V 16-bit", 0x4C, 32768, LP_OPTIONS16),
+
+ /* 512 Gigabit */
+ EXTENDED_ID_NAND("NAND 64GiB 1,8V 8-bit", 0x1E, 65536, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64GiB 3,3V 8-bit", 0x3E, 65536, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64GiB 1,8V 16-bit", 0x2E, 65536, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64GiB 3,3V 16-bit", 0x4E, 65536, LP_OPTIONS16),
+
+ {NULL}
+};
+
+/* Manufacturer IDs */
+static const struct nand_manufacturer_desc nand_manufacturer_descs[] = {
+ {NAND_MFR_AMD, "AMD/Spansion", &amd_nand_manuf_ops},
+ {NAND_MFR_ATO, "ATO"},
+ {NAND_MFR_EON, "Eon"},
+ {NAND_MFR_ESMT, "ESMT", &esmt_nand_manuf_ops},
+ {NAND_MFR_FUJITSU, "Fujitsu"},
+ {NAND_MFR_HYNIX, "Hynix", &hynix_nand_manuf_ops},
+ {NAND_MFR_INTEL, "Intel"},
+ {NAND_MFR_MACRONIX, "Macronix", &macronix_nand_manuf_ops},
+ {NAND_MFR_MICRON, "Micron", &micron_nand_manuf_ops},
+ {NAND_MFR_NATIONAL, "National"},
+ {NAND_MFR_RENESAS, "Renesas"},
+ {NAND_MFR_SAMSUNG, "Samsung", &samsung_nand_manuf_ops},
+ {NAND_MFR_SANDISK, "SanDisk", &sandisk_nand_manuf_ops},
+ {NAND_MFR_STMICRO, "ST Micro"},
+ {NAND_MFR_TOSHIBA, "Toshiba", &toshiba_nand_manuf_ops},
+ {NAND_MFR_WINBOND, "Winbond"},
+};
+
+/**
+ * nand_get_manufacturer_desc - Get manufacturer information from the
+ * manufacturer ID
+ * @id: manufacturer ID
+ *
+ * Returns a nand_manufacturer_desc object if the manufacturer is defined
+ * in the NAND manufacturers database, NULL otherwise.
+ */
+const struct nand_manufacturer_desc *nand_get_manufacturer_desc(u8 id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nand_manufacturer_descs); i++)
+ if (nand_manufacturer_descs[i].id == id)
+ return &nand_manufacturer_descs[i];
+
+ return NULL;
+}
diff --git a/drivers/mtd/nand/raw/nand_jedec.c b/drivers/mtd/nand/raw/nand_jedec.c
new file mode 100644
index 0000000000..b3cc8f3605
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_jedec.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
+ * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Credits:
+ * David Woodhouse for adding multichip support
+ *
+ * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
+ * rework for 2K page size chips
+ *
+ * This file contains all ONFI helpers.
+ */
+
+#include <linux/slab.h>
+
+#include "internals.h"
+
+#define JEDEC_PARAM_PAGES 3
+
+/*
+ * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
+ */
+int nand_jedec_detect(struct nand_chip *chip)
+{
+ struct nand_device *base = &chip->base;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+ struct nand_jedec_params *p;
+ struct jedec_ecc_info *ecc;
+ bool use_datain = false;
+ int jedec_version = 0;
+ char id[5];
+ int i, val, ret;
+ u16 crc;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ /* Try JEDEC for unknown chip or LP */
+ ret = nand_readid_op(chip, 0x40, id, sizeof(id));
+ if (ret || strncmp(id, "JEDEC", sizeof(id)))
+ return 0;
+
+ /* JEDEC chip: allocate a buffer to hold its parameter page */
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ if (!nand_has_exec_op(chip) || chip->controller->supported_op.data_only_read)
+ use_datain = true;
+
+ for (i = 0; i < JEDEC_PARAM_PAGES; i++) {
+ if (!i)
+ ret = nand_read_param_page_op(chip, 0x40, p,
+ sizeof(*p));
+ else if (use_datain)
+ ret = nand_read_data_op(chip, p, sizeof(*p), true,
+ false);
+ else
+ ret = nand_change_read_column_op(chip, sizeof(*p) * i,
+ p, sizeof(*p), true);
+ if (ret) {
+ ret = 0;
+ goto free_jedec_param_page;
+ }
+
+ crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)p, 510);
+ if (crc == le16_to_cpu(p->crc))
+ break;
+ }
+
+ if (i == JEDEC_PARAM_PAGES) {
+ pr_err("Could not find valid JEDEC parameter page; aborting\n");
+ goto free_jedec_param_page;
+ }
+
+ /* Check version */
+ val = le16_to_cpu(p->revision);
+ if (val & (1 << 2))
+ jedec_version = 10;
+ else if (val & (1 << 1))
+ jedec_version = 1; /* vendor specific version */
+
+ if (!jedec_version) {
+ pr_info("unsupported JEDEC version: %d\n", val);
+ goto free_jedec_param_page;
+ }
+
+ sanitize_string(p->manufacturer, sizeof(p->manufacturer));
+ sanitize_string(p->model, sizeof(p->model));
+ chip->parameters.model = kstrdup(p->model, GFP_KERNEL);
+ if (!chip->parameters.model) {
+ ret = -ENOMEM;
+ goto free_jedec_param_page;
+ }
+
+ if (p->opt_cmd[0] & JEDEC_OPT_CMD_READ_CACHE)
+ chip->parameters.supports_read_cache = true;
+
+ memorg->pagesize = le32_to_cpu(p->byte_per_page);
+ mtd->writesize = memorg->pagesize;
+
+ /* Please reference to the comment for nand_flash_detect_onfi. */
+ memorg->pages_per_eraseblock =
+ 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
+ mtd->erasesize = memorg->pages_per_eraseblock * memorg->pagesize;
+
+ memorg->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+ mtd->oobsize = memorg->oobsize;
+
+ memorg->luns_per_target = p->lun_count;
+ memorg->planes_per_lun = 1 << p->multi_plane_addr;
+
+ /* Please reference to the comment for nand_flash_detect_onfi. */
+ memorg->eraseblocks_per_lun =
+ 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
+ memorg->bits_per_cell = p->bits_per_cell;
+
+ if (le16_to_cpu(p->features) & JEDEC_FEATURE_16_BIT_BUS)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ /* ECC info */
+ ecc = &p->ecc_info[0];
+
+ if (ecc->codeword_size >= 9) {
+ struct nand_ecc_props requirements = {
+ .strength = ecc->ecc_bits,
+ .step_size = 1 << ecc->codeword_size,
+ };
+
+ nanddev_set_ecc_requirements(base, &requirements);
+ } else {
+ pr_warn("Invalid codeword size\n");
+ }
+
+ ret = 1;
+
+free_jedec_param_page:
+ kfree(p);
+ return ret;
+}
diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c
new file mode 100644
index 0000000000..743792edf9
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_legacy.c
@@ -0,0 +1,644 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
+ * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Credits:
+ * David Woodhouse for adding multichip support
+ *
+ * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
+ * rework for 2K page size chips
+ *
+ * This file contains all legacy helpers/code that should be removed
+ * at some point.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/nmi.h>
+
+#include "internals.h"
+
+/**
+ * nand_read_byte - [DEFAULT] read one byte from the chip
+ * @chip: NAND chip object
+ *
+ * Default read function for 8bit buswidth
+ */
+static uint8_t nand_read_byte(struct nand_chip *chip)
+{
+ return readb(chip->legacy.IO_ADDR_R);
+}
+
+/**
+ * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
+ * @chip: NAND chip object
+ *
+ * Default read function for 16bit buswidth with endianness conversion.
+ *
+ */
+static uint8_t nand_read_byte16(struct nand_chip *chip)
+{
+ return (uint8_t) cpu_to_le16(readw(chip->legacy.IO_ADDR_R));
+}
+
+/**
+ * nand_select_chip - [DEFAULT] control CE line
+ * @chip: NAND chip object
+ * @chipnr: chipnumber to select, -1 for deselect
+ *
+ * Default select function for 1 chip devices.
+ */
+static void nand_select_chip(struct nand_chip *chip, int chipnr)
+{
+ switch (chipnr) {
+ case -1:
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ 0 | NAND_CTRL_CHANGE);
+ break;
+ case 0:
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+/**
+ * nand_write_byte - [DEFAULT] write single byte to chip
+ * @chip: NAND chip object
+ * @byte: value to write
+ *
+ * Default function to write a byte to I/O[7:0]
+ */
+static void nand_write_byte(struct nand_chip *chip, uint8_t byte)
+{
+ chip->legacy.write_buf(chip, &byte, 1);
+}
+
+/**
+ * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
+ * @chip: NAND chip object
+ * @byte: value to write
+ *
+ * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
+ */
+static void nand_write_byte16(struct nand_chip *chip, uint8_t byte)
+{
+ uint16_t word = byte;
+
+ /*
+ * It's not entirely clear what should happen to I/O[15:8] when writing
+ * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
+ *
+ * When the host supports a 16-bit bus width, only data is
+ * transferred at the 16-bit width. All address and command line
+ * transfers shall use only the lower 8-bits of the data bus. During
+ * command transfers, the host may place any value on the upper
+ * 8-bits of the data bus. During address transfers, the host shall
+ * set the upper 8-bits of the data bus to 00h.
+ *
+ * One user of the write_byte callback is nand_set_features. The
+ * four parameters are specified to be written to I/O[7:0], but this is
+ * neither an address nor a command transfer. Let's assume a 0 on the
+ * upper I/O lines is OK.
+ */
+ chip->legacy.write_buf(chip, (uint8_t *)&word, 2);
+}
+
+/**
+ * nand_write_buf - [DEFAULT] write buffer to chip
+ * @chip: NAND chip object
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * Default write function for 8bit buswidth.
+ */
+static void nand_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
+{
+ iowrite8_rep(chip->legacy.IO_ADDR_W, buf, len);
+}
+
+/**
+ * nand_read_buf - [DEFAULT] read chip data into buffer
+ * @chip: NAND chip object
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * Default read function for 8bit buswidth.
+ */
+static void nand_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
+{
+ ioread8_rep(chip->legacy.IO_ADDR_R, buf, len);
+}
+
+/**
+ * nand_write_buf16 - [DEFAULT] write buffer to chip
+ * @chip: NAND chip object
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * Default write function for 16bit buswidth.
+ */
+static void nand_write_buf16(struct nand_chip *chip, const uint8_t *buf,
+ int len)
+{
+ u16 *p = (u16 *) buf;
+
+ iowrite16_rep(chip->legacy.IO_ADDR_W, p, len >> 1);
+}
+
+/**
+ * nand_read_buf16 - [DEFAULT] read chip data into buffer
+ * @chip: NAND chip object
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * Default read function for 16bit buswidth.
+ */
+static void nand_read_buf16(struct nand_chip *chip, uint8_t *buf, int len)
+{
+ u16 *p = (u16 *) buf;
+
+ ioread16_rep(chip->legacy.IO_ADDR_R, p, len >> 1);
+}
+
+/**
+ * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
+ * @chip: NAND chip object
+ * @timeo: Timeout
+ *
+ * Helper function for nand_wait_ready used when needing to wait in interrupt
+ * context.
+ */
+static void panic_nand_wait_ready(struct nand_chip *chip, unsigned long timeo)
+{
+ int i;
+
+ /* Wait for the device to get ready */
+ for (i = 0; i < timeo; i++) {
+ if (chip->legacy.dev_ready(chip))
+ break;
+ touch_softlockup_watchdog();
+ mdelay(1);
+ }
+}
+
+/**
+ * nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
+ * @chip: NAND chip object
+ *
+ * Wait for the ready pin after a command, and warn if a timeout occurs.
+ */
+void nand_wait_ready(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned long timeo = 400;
+
+ if (mtd->oops_panic_write)
+ return panic_nand_wait_ready(chip, timeo);
+
+ /* Wait until command is processed or timeout occurs */
+ timeo = jiffies + msecs_to_jiffies(timeo);
+ do {
+ if (chip->legacy.dev_ready(chip))
+ return;
+ cond_resched();
+ } while (time_before(jiffies, timeo));
+
+ if (!chip->legacy.dev_ready(chip))
+ pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
+}
+EXPORT_SYMBOL_GPL(nand_wait_ready);
+
+/**
+ * nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
+ * @chip: NAND chip object
+ * @timeo: Timeout in ms
+ *
+ * Wait for status ready (i.e. command done) or timeout.
+ */
+static void nand_wait_status_ready(struct nand_chip *chip, unsigned long timeo)
+{
+ int ret;
+
+ timeo = jiffies + msecs_to_jiffies(timeo);
+ do {
+ u8 status;
+
+ ret = nand_read_data_op(chip, &status, sizeof(status), true,
+ false);
+ if (ret)
+ return;
+
+ if (status & NAND_STATUS_READY)
+ break;
+ touch_softlockup_watchdog();
+ } while (time_before(jiffies, timeo));
+};
+
+/**
+ * nand_command - [DEFAULT] Send command to NAND device
+ * @chip: NAND chip object
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
+ *
+ * Send command to NAND device. This function is used for small page devices
+ * (512 Bytes per page).
+ */
+static void nand_command(struct nand_chip *chip, unsigned int command,
+ int column, int page_addr)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
+
+ /* Write out the command to the device */
+ if (command == NAND_CMD_SEQIN) {
+ int readcmd;
+
+ if (column >= mtd->writesize) {
+ /* OOB area */
+ column -= mtd->writesize;
+ readcmd = NAND_CMD_READOOB;
+ } else if (column < 256) {
+ /* First 256 bytes --> READ0 */
+ readcmd = NAND_CMD_READ0;
+ } else {
+ column -= 256;
+ readcmd = NAND_CMD_READ1;
+ }
+ chip->legacy.cmd_ctrl(chip, readcmd, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ }
+ if (command != NAND_CMD_NONE)
+ chip->legacy.cmd_ctrl(chip, command, ctrl);
+
+ /* Address cycle, when necessary */
+ ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (chip->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
+ column >>= 1;
+ chip->legacy.cmd_ctrl(chip, column, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ }
+ if (page_addr != -1) {
+ chip->legacy.cmd_ctrl(chip, page_addr, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ chip->legacy.cmd_ctrl(chip, page_addr >> 8, ctrl);
+ if (chip->options & NAND_ROW_ADDR_3)
+ chip->legacy.cmd_ctrl(chip, page_addr >> 16, ctrl);
+ }
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+
+ /*
+ * Program and erase have their own busy handlers status and sequential
+ * in needs no delay
+ */
+ switch (command) {
+
+ case NAND_CMD_NONE:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_READID:
+ case NAND_CMD_SET_FEATURES:
+ return;
+
+ case NAND_CMD_RESET:
+ if (chip->legacy.dev_ready)
+ break;
+ udelay(chip->legacy.chip_delay);
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_STATUS,
+ NAND_CTRL_CLE | NAND_CTRL_CHANGE);
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+ /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
+ nand_wait_status_ready(chip, 250);
+ return;
+
+ /* This applies to read commands */
+ case NAND_CMD_READ0:
+ /*
+ * READ0 is sometimes used to exit GET STATUS mode. When this
+ * is the case no address cycles are requested, and we can use
+ * this information to detect that we should not wait for the
+ * device to be ready.
+ */
+ if (column == -1 && page_addr == -1)
+ return;
+ fallthrough;
+ default:
+ /*
+ * If we don't have access to the busy pin, we apply the given
+ * command delay
+ */
+ if (!chip->legacy.dev_ready) {
+ udelay(chip->legacy.chip_delay);
+ return;
+ }
+ }
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine.
+ */
+ ndelay(100);
+
+ nand_wait_ready(chip);
+}
+
+static void nand_ccs_delay(struct nand_chip *chip)
+{
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+
+ /*
+ * The controller already takes care of waiting for tCCS when the RNDIN
+ * or RNDOUT command is sent, return directly.
+ */
+ if (!(chip->options & NAND_WAIT_TCCS))
+ return;
+
+ /*
+ * Wait tCCS_min if it is correctly defined, otherwise wait 500ns
+ * (which should be safe for all NANDs).
+ */
+ if (!IS_ERR(sdr) && nand_controller_can_setup_interface(chip))
+ ndelay(sdr->tCCS_min / 1000);
+ else
+ ndelay(500);
+}
+
+/**
+ * nand_command_lp - [DEFAULT] Send command to NAND large page device
+ * @chip: NAND chip object
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
+ *
+ * Send command to NAND device. This is the version for the new large page
+ * devices. We don't have the separate regions as we have in the small page
+ * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
+ */
+static void nand_command_lp(struct nand_chip *chip, unsigned int command,
+ int column, int page_addr)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /* Emulate NAND_CMD_READOOB */
+ if (command == NAND_CMD_READOOB) {
+ column += mtd->writesize;
+ command = NAND_CMD_READ0;
+ }
+
+ /* Command latch cycle */
+ if (command != NAND_CMD_NONE)
+ chip->legacy.cmd_ctrl(chip, command,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+
+ if (column != -1 || page_addr != -1) {
+ int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
+
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (chip->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
+ column >>= 1;
+ chip->legacy.cmd_ctrl(chip, column, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+
+ /* Only output a single addr cycle for 8bits opcodes. */
+ if (!nand_opcode_8bits(command))
+ chip->legacy.cmd_ctrl(chip, column >> 8, ctrl);
+ }
+ if (page_addr != -1) {
+ chip->legacy.cmd_ctrl(chip, page_addr, ctrl);
+ chip->legacy.cmd_ctrl(chip, page_addr >> 8,
+ NAND_NCE | NAND_ALE);
+ if (chip->options & NAND_ROW_ADDR_3)
+ chip->legacy.cmd_ctrl(chip, page_addr >> 16,
+ NAND_NCE | NAND_ALE);
+ }
+ }
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+
+ /*
+ * Program and erase have their own busy handlers status, sequential
+ * in and status need no delay.
+ */
+ switch (command) {
+
+ case NAND_CMD_NONE:
+ case NAND_CMD_CACHEDPROG:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_READID:
+ case NAND_CMD_SET_FEATURES:
+ return;
+
+ case NAND_CMD_RNDIN:
+ nand_ccs_delay(chip);
+ return;
+
+ case NAND_CMD_RESET:
+ if (chip->legacy.dev_ready)
+ break;
+ udelay(chip->legacy.chip_delay);
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_STATUS,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+ /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
+ nand_wait_status_ready(chip, 250);
+ return;
+
+ case NAND_CMD_RNDOUT:
+ /* No ready / busy check necessary */
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_RNDOUTSTART,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+
+ nand_ccs_delay(chip);
+ return;
+
+ case NAND_CMD_READ0:
+ /*
+ * READ0 is sometimes used to exit GET STATUS mode. When this
+ * is the case no address cycles are requested, and we can use
+ * this information to detect that READSTART should not be
+ * issued.
+ */
+ if (column == -1 && page_addr == -1)
+ return;
+
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_READSTART,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+ fallthrough; /* This applies to read commands */
+ default:
+ /*
+ * If we don't have access to the busy pin, we apply the given
+ * command delay.
+ */
+ if (!chip->legacy.dev_ready) {
+ udelay(chip->legacy.chip_delay);
+ return;
+ }
+ }
+
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine.
+ */
+ ndelay(100);
+
+ nand_wait_ready(chip);
+}
+
+/**
+ * nand_get_set_features_notsupp - set/get features stub returning -ENOTSUPP
+ * @chip: nand chip info structure
+ * @addr: feature address.
+ * @subfeature_param: the subfeature parameters, a four bytes array.
+ *
+ * Should be used by NAND controller drivers that do not support the SET/GET
+ * FEATURES operations.
+ */
+int nand_get_set_features_notsupp(struct nand_chip *chip, int addr,
+ u8 *subfeature_param)
+{
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(nand_get_set_features_notsupp);
+
+/**
+ * nand_wait - [DEFAULT] wait until the command is done
+ * @chip: NAND chip structure
+ *
+ * Wait for command done. This applies to erase and program only.
+ */
+static int nand_wait(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned long timeo = 400;
+ u8 status;
+ int ret;
+
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in any
+ * case on any machine.
+ */
+ ndelay(100);
+
+ ret = nand_status_op(chip, NULL);
+ if (ret)
+ return ret;
+
+ if (mtd->oops_panic_write) {
+ panic_nand_wait(chip, timeo);
+ } else {
+ timeo = jiffies + msecs_to_jiffies(timeo);
+ do {
+ if (chip->legacy.dev_ready) {
+ if (chip->legacy.dev_ready(chip))
+ break;
+ } else {
+ ret = nand_read_data_op(chip, &status,
+ sizeof(status), true,
+ false);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_READY)
+ break;
+ }
+ cond_resched();
+ } while (time_before(jiffies, timeo));
+ }
+
+ ret = nand_read_data_op(chip, &status, sizeof(status), true, false);
+ if (ret)
+ return ret;
+
+ /* This can happen if in case of timeout or buggy dev_ready */
+ WARN_ON(!(status & NAND_STATUS_READY));
+ return status;
+}
+
+void nand_legacy_set_defaults(struct nand_chip *chip)
+{
+ unsigned int busw = chip->options & NAND_BUSWIDTH_16;
+
+ if (nand_has_exec_op(chip))
+ return;
+
+ /* check for proper chip_delay setup, set 20us if not */
+ if (!chip->legacy.chip_delay)
+ chip->legacy.chip_delay = 20;
+
+ /* check, if a user supplied command function given */
+ if (!chip->legacy.cmdfunc)
+ chip->legacy.cmdfunc = nand_command;
+
+ /* check, if a user supplied wait function given */
+ if (chip->legacy.waitfunc == NULL)
+ chip->legacy.waitfunc = nand_wait;
+
+ if (!chip->legacy.select_chip)
+ chip->legacy.select_chip = nand_select_chip;
+
+ /* If called twice, pointers that depend on busw may need to be reset */
+ if (!chip->legacy.read_byte || chip->legacy.read_byte == nand_read_byte)
+ chip->legacy.read_byte = busw ? nand_read_byte16 : nand_read_byte;
+ if (!chip->legacy.write_buf || chip->legacy.write_buf == nand_write_buf)
+ chip->legacy.write_buf = busw ? nand_write_buf16 : nand_write_buf;
+ if (!chip->legacy.write_byte || chip->legacy.write_byte == nand_write_byte)
+ chip->legacy.write_byte = busw ? nand_write_byte16 : nand_write_byte;
+ if (!chip->legacy.read_buf || chip->legacy.read_buf == nand_read_buf)
+ chip->legacy.read_buf = busw ? nand_read_buf16 : nand_read_buf;
+}
+
+void nand_legacy_adjust_cmdfunc(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /* Do not replace user supplied command function! */
+ if (mtd->writesize > 512 && chip->legacy.cmdfunc == nand_command)
+ chip->legacy.cmdfunc = nand_command_lp;
+}
+
+int nand_legacy_check_hooks(struct nand_chip *chip)
+{
+ /*
+ * ->legacy.cmdfunc() is legacy and will only be used if ->exec_op() is
+ * not populated.
+ */
+ if (nand_has_exec_op(chip))
+ return 0;
+
+ /*
+ * Default functions assigned for ->legacy.cmdfunc() and
+ * ->legacy.select_chip() both expect ->legacy.cmd_ctrl() to be
+ * populated.
+ */
+ if ((!chip->legacy.cmdfunc || !chip->legacy.select_chip) &&
+ !chip->legacy.cmd_ctrl) {
+ pr_err("->legacy.cmd_ctrl() should be provided\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c
new file mode 100644
index 0000000000..e229de32ff
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_macronix.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Free Electrons
+ * Copyright (C) 2017 NextThing Co
+ *
+ * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
+ */
+
+#include <linux/slab.h>
+#include "linux/delay.h"
+#include "internals.h"
+
+#define MACRONIX_READ_RETRY_BIT BIT(0)
+#define MACRONIX_NUM_READ_RETRY_MODES 6
+
+#define ONFI_FEATURE_ADDR_MXIC_PROTECTION 0xA0
+#define MXIC_BLOCK_PROTECTION_ALL_LOCK 0x38
+#define MXIC_BLOCK_PROTECTION_ALL_UNLOCK 0x0
+
+#define ONFI_FEATURE_ADDR_MXIC_RANDOMIZER 0xB0
+#define MACRONIX_RANDOMIZER_BIT BIT(1)
+#define MACRONIX_RANDOMIZER_ENPGM BIT(0)
+#define MACRONIX_RANDOMIZER_RANDEN BIT(1)
+#define MACRONIX_RANDOMIZER_RANDOPT BIT(2)
+#define MACRONIX_RANDOMIZER_MODE_ENTER \
+ (MACRONIX_RANDOMIZER_ENPGM | \
+ MACRONIX_RANDOMIZER_RANDEN | \
+ MACRONIX_RANDOMIZER_RANDOPT)
+#define MACRONIX_RANDOMIZER_MODE_EXIT \
+ (MACRONIX_RANDOMIZER_RANDEN | \
+ MACRONIX_RANDOMIZER_RANDOPT)
+
+#define MXIC_CMD_POWER_DOWN 0xB9
+
+#define ONFI_FEATURE_ADDR_30LFXG18AC_OTP 0x90
+#define MACRONIX_30LFXG18AC_OTP_START_PAGE 2
+#define MACRONIX_30LFXG18AC_OTP_PAGES 30
+#define MACRONIX_30LFXG18AC_OTP_PAGE_SIZE 2112
+#define MACRONIX_30LFXG18AC_OTP_SIZE_BYTES \
+ (MACRONIX_30LFXG18AC_OTP_PAGES * \
+ MACRONIX_30LFXG18AC_OTP_PAGE_SIZE)
+
+#define MACRONIX_30LFXG18AC_OTP_EN BIT(0)
+
+struct nand_onfi_vendor_macronix {
+ u8 reserved;
+ u8 reliability_func;
+} __packed;
+
+static int macronix_nand_setup_read_retry(struct nand_chip *chip, int mode)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+
+ if (!chip->parameters.supports_set_get_features ||
+ !test_bit(ONFI_FEATURE_ADDR_READ_RETRY,
+ chip->parameters.set_feature_list))
+ return -ENOTSUPP;
+
+ feature[0] = mode;
+ return nand_set_features(chip, ONFI_FEATURE_ADDR_READ_RETRY, feature);
+}
+
+static int macronix_nand_randomizer_check_enable(struct nand_chip *chip)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+ int ret;
+
+ ret = nand_get_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ feature);
+ if (ret < 0)
+ return ret;
+
+ if (feature[0])
+ return feature[0];
+
+ feature[0] = MACRONIX_RANDOMIZER_MODE_ENTER;
+ ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ feature);
+ if (ret < 0)
+ return ret;
+
+ /* RANDEN and RANDOPT OTP bits are programmed */
+ feature[0] = 0x0;
+ ret = nand_prog_page_op(chip, 0, 0, feature, 1);
+ if (ret < 0)
+ return ret;
+
+ ret = nand_get_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ feature);
+ if (ret < 0)
+ return ret;
+
+ feature[0] &= MACRONIX_RANDOMIZER_MODE_EXIT;
+ ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ feature);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void macronix_nand_onfi_init(struct nand_chip *chip)
+{
+ struct nand_parameters *p = &chip->parameters;
+ struct nand_onfi_vendor_macronix *mxic;
+ struct device_node *dn = nand_get_flash_node(chip);
+ int rand_otp;
+ int ret;
+
+ if (!p->onfi)
+ return;
+
+ rand_otp = of_property_read_bool(dn, "mxic,enable-randomizer-otp");
+
+ mxic = (struct nand_onfi_vendor_macronix *)p->onfi->vendor;
+ /* Subpage write is prohibited in randomizer operatoin */
+ if (rand_otp && chip->options & NAND_NO_SUBPAGE_WRITE &&
+ mxic->reliability_func & MACRONIX_RANDOMIZER_BIT) {
+ if (p->supports_set_get_features) {
+ bitmap_set(p->set_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_RANDOMIZER, 1);
+ bitmap_set(p->get_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_RANDOMIZER, 1);
+ ret = macronix_nand_randomizer_check_enable(chip);
+ if (ret < 0) {
+ bitmap_clear(p->set_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ 1);
+ bitmap_clear(p->get_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ 1);
+ pr_info("Macronix NAND randomizer failed\n");
+ } else {
+ pr_info("Macronix NAND randomizer enabled\n");
+ }
+ }
+ }
+
+ if ((mxic->reliability_func & MACRONIX_READ_RETRY_BIT) == 0)
+ return;
+
+ chip->read_retries = MACRONIX_NUM_READ_RETRY_MODES;
+ chip->ops.setup_read_retry = macronix_nand_setup_read_retry;
+
+ if (p->supports_set_get_features) {
+ bitmap_set(p->set_feature_list,
+ ONFI_FEATURE_ADDR_READ_RETRY, 1);
+ bitmap_set(p->get_feature_list,
+ ONFI_FEATURE_ADDR_READ_RETRY, 1);
+ }
+}
+
+/*
+ * Macronix AC series does not support using SET/GET_FEATURES to change
+ * the timings unlike what is declared in the parameter page. Unflag
+ * this feature to avoid unnecessary downturns.
+ */
+static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
+{
+ int i;
+ static const char * const broken_get_timings[] = {
+ "MX30LF1G18AC",
+ "MX30LF1G28AC",
+ "MX30LF2G18AC",
+ "MX30LF2G28AC",
+ "MX30LF4G18AC",
+ "MX30LF4G28AC",
+ "MX60LF8G18AC",
+ "MX30UF1G18AC",
+ "MX30UF1G16AC",
+ "MX30UF2G18AC",
+ "MX30UF2G16AC",
+ "MX30UF4G18AC",
+ "MX30UF4G16AC",
+ "MX30UF4G28AC",
+ };
+
+ if (!chip->parameters.supports_set_get_features)
+ return;
+
+ i = match_string(broken_get_timings, ARRAY_SIZE(broken_get_timings),
+ chip->parameters.model);
+ if (i < 0)
+ return;
+
+ bitmap_clear(chip->parameters.get_feature_list,
+ ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+ bitmap_clear(chip->parameters.set_feature_list,
+ ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+}
+
+/*
+ * Macronix NAND supports Block Protection by Protectoin(PT) pin;
+ * active high at power-on which protects the entire chip even the #WP is
+ * disabled. Lock/unlock protection area can be partition according to
+ * protection bits, i.e. upper 1/2 locked, upper 1/4 locked and so on.
+ */
+static int mxic_nand_lock(struct nand_chip *chip, loff_t ofs, uint64_t len)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+ int ret;
+
+ feature[0] = MXIC_BLOCK_PROTECTION_ALL_LOCK;
+ nand_select_target(chip, 0);
+ ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_PROTECTION,
+ feature);
+ nand_deselect_target(chip);
+ if (ret)
+ pr_err("%s all blocks failed\n", __func__);
+
+ return ret;
+}
+
+static int mxic_nand_unlock(struct nand_chip *chip, loff_t ofs, uint64_t len)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+ int ret;
+
+ feature[0] = MXIC_BLOCK_PROTECTION_ALL_UNLOCK;
+ nand_select_target(chip, 0);
+ ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_PROTECTION,
+ feature);
+ nand_deselect_target(chip);
+ if (ret)
+ pr_err("%s all blocks failed\n", __func__);
+
+ return ret;
+}
+
+static void macronix_nand_block_protection_support(struct nand_chip *chip)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+ int ret;
+
+ bitmap_set(chip->parameters.get_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
+
+ feature[0] = MXIC_BLOCK_PROTECTION_ALL_UNLOCK;
+ nand_select_target(chip, 0);
+ ret = nand_get_features(chip, ONFI_FEATURE_ADDR_MXIC_PROTECTION,
+ feature);
+ nand_deselect_target(chip);
+ if (ret || feature[0] != MXIC_BLOCK_PROTECTION_ALL_LOCK) {
+ if (ret)
+ pr_err("Block protection check failed\n");
+
+ bitmap_clear(chip->parameters.get_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
+ return;
+ }
+
+ bitmap_set(chip->parameters.set_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
+
+ chip->ops.lock_area = mxic_nand_lock;
+ chip->ops.unlock_area = mxic_nand_unlock;
+}
+
+static int nand_power_down_op(struct nand_chip *chip)
+{
+ int ret;
+
+ if (nand_has_exec_op(chip)) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(MXIC_CMD_POWER_DOWN, 0),
+ };
+
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ ret = nand_exec_op(chip, &op);
+ if (ret)
+ return ret;
+
+ } else {
+ chip->legacy.cmdfunc(chip, MXIC_CMD_POWER_DOWN, -1, -1);
+ }
+
+ return 0;
+}
+
+static int mxic_nand_suspend(struct nand_chip *chip)
+{
+ int ret;
+
+ nand_select_target(chip, 0);
+ ret = nand_power_down_op(chip);
+ if (ret < 0)
+ pr_err("Suspending MXIC NAND chip failed (%d)\n", ret);
+ nand_deselect_target(chip);
+
+ return ret;
+}
+
+static void mxic_nand_resume(struct nand_chip *chip)
+{
+ /*
+ * Toggle #CS pin to resume NAND device and don't care
+ * of the others CLE, #WE, #RE pins status.
+ * A NAND controller ensure it is able to assert/de-assert #CS
+ * by sending any byte over the NAND bus.
+ * i.e.,
+ * NAND power down command or reset command w/o R/B# status checking.
+ */
+ nand_select_target(chip, 0);
+ nand_power_down_op(chip);
+ /* The minimum of a recovery time tRDP is 35 us */
+ usleep_range(35, 100);
+ nand_deselect_target(chip);
+}
+
+static void macronix_nand_deep_power_down_support(struct nand_chip *chip)
+{
+ int i;
+ static const char * const deep_power_down_dev[] = {
+ "MX30UF1G28AD",
+ "MX30UF2G28AD",
+ "MX30UF4G28AD",
+ };
+
+ i = match_string(deep_power_down_dev, ARRAY_SIZE(deep_power_down_dev),
+ chip->parameters.model);
+ if (i < 0)
+ return;
+
+ chip->ops.suspend = mxic_nand_suspend;
+ chip->ops.resume = mxic_nand_resume;
+}
+
+static int macronix_30lfxg18ac_get_otp_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen,
+ struct otp_info *buf)
+{
+ if (len < sizeof(*buf))
+ return -EINVAL;
+
+ /* Always report that OTP is unlocked. Reason is that this
+ * type of flash chip doesn't provide way to check that OTP
+ * is locked or not: subfeature parameter is implemented as
+ * volatile register. Technically OTP region could be locked
+ * and become readonly, but as there is no way to check it,
+ * don't allow to lock it ('_lock_user_prot_reg' callback
+ * always returns -EOPNOTSUPP) and thus we report that OTP
+ * is unlocked.
+ */
+ buf->locked = 0;
+ buf->start = 0;
+ buf->length = MACRONIX_30LFXG18AC_OTP_SIZE_BYTES;
+
+ *retlen = sizeof(*buf);
+
+ return 0;
+}
+
+static int macronix_30lfxg18ac_otp_enable(struct nand_chip *nand)
+{
+ u8 feature_buf[ONFI_SUBFEATURE_PARAM_LEN] = { 0 };
+
+ feature_buf[0] = MACRONIX_30LFXG18AC_OTP_EN;
+ return nand_set_features(nand, ONFI_FEATURE_ADDR_30LFXG18AC_OTP,
+ feature_buf);
+}
+
+static int macronix_30lfxg18ac_otp_disable(struct nand_chip *nand)
+{
+ u8 feature_buf[ONFI_SUBFEATURE_PARAM_LEN] = { 0 };
+
+ return nand_set_features(nand, ONFI_FEATURE_ADDR_30LFXG18AC_OTP,
+ feature_buf);
+}
+
+static int __macronix_30lfxg18ac_rw_otp(struct mtd_info *mtd,
+ loff_t offs_in_flash,
+ size_t len, size_t *retlen,
+ u_char *buf, bool write)
+{
+ struct nand_chip *nand;
+ size_t bytes_handled;
+ off_t offs_in_page;
+ u64 page;
+ int ret;
+
+ nand = mtd_to_nand(mtd);
+ nand_select_target(nand, 0);
+
+ ret = macronix_30lfxg18ac_otp_enable(nand);
+ if (ret)
+ goto out_otp;
+
+ page = offs_in_flash;
+ /* 'page' will be result of division. */
+ offs_in_page = do_div(page, MACRONIX_30LFXG18AC_OTP_PAGE_SIZE);
+ bytes_handled = 0;
+
+ while (bytes_handled < len &&
+ page < MACRONIX_30LFXG18AC_OTP_PAGES) {
+ size_t bytes_to_handle;
+ u64 phys_page = page + MACRONIX_30LFXG18AC_OTP_START_PAGE;
+
+ bytes_to_handle = min_t(size_t, len - bytes_handled,
+ MACRONIX_30LFXG18AC_OTP_PAGE_SIZE -
+ offs_in_page);
+
+ if (write)
+ ret = nand_prog_page_op(nand, phys_page, offs_in_page,
+ &buf[bytes_handled], bytes_to_handle);
+ else
+ ret = nand_read_page_op(nand, phys_page, offs_in_page,
+ &buf[bytes_handled], bytes_to_handle);
+ if (ret)
+ goto out_otp;
+
+ bytes_handled += bytes_to_handle;
+ offs_in_page = 0;
+ page++;
+ }
+
+ *retlen = bytes_handled;
+
+out_otp:
+ if (ret)
+ dev_err(&mtd->dev, "failed to perform OTP IO: %i\n", ret);
+
+ ret = macronix_30lfxg18ac_otp_disable(nand);
+ if (ret)
+ dev_err(&mtd->dev, "failed to leave OTP mode after %s\n",
+ write ? "write" : "read");
+
+ nand_deselect_target(nand);
+
+ return ret;
+}
+
+static int macronix_30lfxg18ac_write_otp(struct mtd_info *mtd, loff_t to,
+ size_t len, size_t *rlen,
+ const u_char *buf)
+{
+ return __macronix_30lfxg18ac_rw_otp(mtd, to, len, rlen, (u_char *)buf,
+ true);
+}
+
+static int macronix_30lfxg18ac_read_otp(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *rlen,
+ u_char *buf)
+{
+ return __macronix_30lfxg18ac_rw_otp(mtd, from, len, rlen, buf, false);
+}
+
+static int macronix_30lfxg18ac_lock_otp(struct mtd_info *mtd, loff_t from,
+ size_t len)
+{
+ /* See comment in 'macronix_30lfxg18ac_get_otp_info()'. */
+ return -EOPNOTSUPP;
+}
+
+static void macronix_nand_setup_otp(struct nand_chip *chip)
+{
+ static const char * const supported_otp_models[] = {
+ "MX30LF1G18AC",
+ "MX30LF2G18AC",
+ "MX30LF4G18AC",
+ };
+ struct mtd_info *mtd;
+
+ if (match_string(supported_otp_models,
+ ARRAY_SIZE(supported_otp_models),
+ chip->parameters.model) < 0)
+ return;
+
+ if (!chip->parameters.supports_set_get_features)
+ return;
+
+ bitmap_set(chip->parameters.get_feature_list,
+ ONFI_FEATURE_ADDR_30LFXG18AC_OTP, 1);
+ bitmap_set(chip->parameters.set_feature_list,
+ ONFI_FEATURE_ADDR_30LFXG18AC_OTP, 1);
+
+ mtd = nand_to_mtd(chip);
+ mtd->_get_user_prot_info = macronix_30lfxg18ac_get_otp_info;
+ mtd->_read_user_prot_reg = macronix_30lfxg18ac_read_otp;
+ mtd->_write_user_prot_reg = macronix_30lfxg18ac_write_otp;
+ mtd->_lock_user_prot_reg = macronix_30lfxg18ac_lock_otp;
+}
+
+static int macronix_nand_init(struct nand_chip *chip)
+{
+ if (nand_is_slc(chip))
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
+
+ macronix_nand_fix_broken_get_timings(chip);
+ macronix_nand_onfi_init(chip);
+ macronix_nand_block_protection_support(chip);
+ macronix_nand_deep_power_down_support(chip);
+ macronix_nand_setup_otp(chip);
+
+ return 0;
+}
+
+const struct nand_manufacturer_ops macronix_nand_manuf_ops = {
+ .init = macronix_nand_init,
+};
diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c
new file mode 100644
index 0000000000..c019288190
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_micron.c
@@ -0,0 +1,599 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Free Electrons
+ * Copyright (C) 2017 NextThing Co
+ *
+ * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
+ */
+
+#include <linux/slab.h>
+
+#include "internals.h"
+
+/*
+ * Special Micron status bit 3 indicates that the block has been
+ * corrected by on-die ECC and should be rewritten.
+ */
+#define NAND_ECC_STATUS_WRITE_RECOMMENDED BIT(3)
+
+/*
+ * On chips with 8-bit ECC and additional bit can be used to distinguish
+ * cases where a errors were corrected without needing a rewrite
+ *
+ * Bit 4 Bit 3 Bit 0 Description
+ * ----- ----- ----- -----------
+ * 0 0 0 No Errors
+ * 0 0 1 Multiple uncorrected errors
+ * 0 1 0 4 - 6 errors corrected, recommend rewrite
+ * 0 1 1 Reserved
+ * 1 0 0 1 - 3 errors corrected
+ * 1 0 1 Reserved
+ * 1 1 0 7 - 8 errors corrected, recommend rewrite
+ */
+#define NAND_ECC_STATUS_MASK (BIT(4) | BIT(3) | BIT(0))
+#define NAND_ECC_STATUS_UNCORRECTABLE BIT(0)
+#define NAND_ECC_STATUS_4_6_CORRECTED BIT(3)
+#define NAND_ECC_STATUS_1_3_CORRECTED BIT(4)
+#define NAND_ECC_STATUS_7_8_CORRECTED (BIT(4) | BIT(3))
+
+struct nand_onfi_vendor_micron {
+ u8 two_plane_read;
+ u8 read_cache;
+ u8 read_unique_id;
+ u8 dq_imped;
+ u8 dq_imped_num_settings;
+ u8 dq_imped_feat_addr;
+ u8 rb_pulldown_strength;
+ u8 rb_pulldown_strength_feat_addr;
+ u8 rb_pulldown_strength_num_settings;
+ u8 otp_mode;
+ u8 otp_page_start;
+ u8 otp_data_prot_addr;
+ u8 otp_num_pages;
+ u8 otp_feat_addr;
+ u8 read_retry_options;
+ u8 reserved[72];
+ u8 param_revision;
+} __packed;
+
+struct micron_on_die_ecc {
+ bool forced;
+ bool enabled;
+ void *rawbuf;
+};
+
+struct micron_nand {
+ struct micron_on_die_ecc ecc;
+};
+
+static int micron_nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode};
+
+ return nand_set_features(chip, ONFI_FEATURE_ADDR_READ_RETRY, feature);
+}
+
+/*
+ * Configure chip properties from Micron vendor-specific ONFI table
+ */
+static int micron_nand_onfi_init(struct nand_chip *chip)
+{
+ struct nand_parameters *p = &chip->parameters;
+
+ if (p->onfi) {
+ struct nand_onfi_vendor_micron *micron = (void *)p->onfi->vendor;
+
+ chip->read_retries = micron->read_retry_options;
+ chip->ops.setup_read_retry = micron_nand_setup_read_retry;
+ }
+
+ if (p->supports_set_get_features) {
+ set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->set_feature_list);
+ set_bit(ONFI_FEATURE_ON_DIE_ECC, p->set_feature_list);
+ set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->get_feature_list);
+ set_bit(ONFI_FEATURE_ON_DIE_ECC, p->get_feature_list);
+ }
+
+ return 0;
+}
+
+static int micron_nand_on_die_4_ooblayout_ecc(struct mtd_info *mtd,
+ int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section >= 4)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 8;
+ oobregion->length = 8;
+
+ return 0;
+}
+
+static int micron_nand_on_die_4_ooblayout_free(struct mtd_info *mtd,
+ int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section >= 4)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 2;
+ oobregion->length = 6;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops micron_nand_on_die_4_ooblayout_ops = {
+ .ecc = micron_nand_on_die_4_ooblayout_ecc,
+ .free = micron_nand_on_die_4_ooblayout_free,
+};
+
+static int micron_nand_on_die_8_ooblayout_ecc(struct mtd_info *mtd,
+ int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = mtd->oobsize - chip->ecc.total;
+ oobregion->length = chip->ecc.total;
+
+ return 0;
+}
+
+static int micron_nand_on_die_8_ooblayout_free(struct mtd_info *mtd,
+ int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 2;
+ oobregion->length = mtd->oobsize - chip->ecc.total - 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops micron_nand_on_die_8_ooblayout_ops = {
+ .ecc = micron_nand_on_die_8_ooblayout_ecc,
+ .free = micron_nand_on_die_8_ooblayout_free,
+};
+
+static int micron_nand_on_die_ecc_setup(struct nand_chip *chip, bool enable)
+{
+ struct micron_nand *micron = nand_get_manufacturer_data(chip);
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN] = { 0, };
+ int ret;
+
+ if (micron->ecc.forced)
+ return 0;
+
+ if (micron->ecc.enabled == enable)
+ return 0;
+
+ if (enable)
+ feature[0] |= ONFI_FEATURE_ON_DIE_ECC_EN;
+
+ ret = nand_set_features(chip, ONFI_FEATURE_ON_DIE_ECC, feature);
+ if (!ret)
+ micron->ecc.enabled = enable;
+
+ return ret;
+}
+
+static int micron_nand_on_die_ecc_status_4(struct nand_chip *chip, u8 status,
+ void *buf, int page,
+ int oob_required)
+{
+ struct micron_nand *micron = nand_get_manufacturer_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int step, max_bitflips = 0;
+ bool use_datain = false;
+ int ret;
+
+ if (!(status & NAND_ECC_STATUS_WRITE_RECOMMENDED)) {
+ if (status & NAND_STATUS_FAIL)
+ mtd->ecc_stats.failed++;
+
+ return 0;
+ }
+
+ /*
+ * The internal ECC doesn't tell us the number of bitflips that have
+ * been corrected, but tells us if it recommends to rewrite the block.
+ * If it's the case, we need to read the page in raw mode and compare
+ * its content to the corrected version to extract the actual number of
+ * bitflips.
+ * But before we do that, we must make sure we have all OOB bytes read
+ * in non-raw mode, even if the user did not request those bytes.
+ */
+ if (!oob_required) {
+ /*
+ * We first check which operation is supported by the controller
+ * before running it. This trick makes it possible to support
+ * all controllers, even the most constraints, without almost
+ * any performance hit.
+ *
+ * TODO: could be enhanced to avoid repeating the same check
+ * over and over in the fast path.
+ */
+ if (!nand_has_exec_op(chip) ||
+ !nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
+ true))
+ use_datain = true;
+
+ if (use_datain)
+ ret = nand_read_data_op(chip, chip->oob_poi,
+ mtd->oobsize, false, false);
+ else
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi,
+ mtd->oobsize, false);
+ if (ret)
+ return ret;
+ }
+
+ micron_nand_on_die_ecc_setup(chip, false);
+
+ ret = nand_read_page_op(chip, page, 0, micron->ecc.rawbuf,
+ mtd->writesize + mtd->oobsize);
+ if (ret)
+ return ret;
+
+ for (step = 0; step < chip->ecc.steps; step++) {
+ unsigned int offs, i, nbitflips = 0;
+ u8 *rawbuf, *corrbuf;
+
+ offs = step * chip->ecc.size;
+ rawbuf = micron->ecc.rawbuf + offs;
+ corrbuf = buf + offs;
+
+ for (i = 0; i < chip->ecc.size; i++)
+ nbitflips += hweight8(corrbuf[i] ^ rawbuf[i]);
+
+ offs = (step * 16) + 4;
+ rawbuf = micron->ecc.rawbuf + mtd->writesize + offs;
+ corrbuf = chip->oob_poi + offs;
+
+ for (i = 0; i < chip->ecc.bytes + 4; i++)
+ nbitflips += hweight8(corrbuf[i] ^ rawbuf[i]);
+
+ if (WARN_ON(nbitflips > chip->ecc.strength))
+ return -EINVAL;
+
+ max_bitflips = max(nbitflips, max_bitflips);
+ mtd->ecc_stats.corrected += nbitflips;
+ }
+
+ return max_bitflips;
+}
+
+static int micron_nand_on_die_ecc_status_8(struct nand_chip *chip, u8 status)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /*
+ * With 8/512 we have more information but still don't know precisely
+ * how many bit-flips were seen.
+ */
+ switch (status & NAND_ECC_STATUS_MASK) {
+ case NAND_ECC_STATUS_UNCORRECTABLE:
+ mtd->ecc_stats.failed++;
+ return 0;
+ case NAND_ECC_STATUS_1_3_CORRECTED:
+ mtd->ecc_stats.corrected += 3;
+ return 3;
+ case NAND_ECC_STATUS_4_6_CORRECTED:
+ mtd->ecc_stats.corrected += 6;
+ /* rewrite recommended */
+ return 6;
+ case NAND_ECC_STATUS_7_8_CORRECTED:
+ mtd->ecc_stats.corrected += 8;
+ /* rewrite recommended */
+ return 8;
+ default:
+ return 0;
+ }
+}
+
+static int
+micron_nand_read_page_on_die_ecc(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ bool use_datain = false;
+ u8 status;
+ int ret, max_bitflips = 0;
+
+ ret = micron_nand_on_die_ecc_setup(chip, true);
+ if (ret)
+ return ret;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ goto out;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ goto out;
+
+ /*
+ * We first check which operation is supported by the controller before
+ * running it. This trick makes it possible to support all controllers,
+ * even the most constraints, without almost any performance hit.
+ *
+ * TODO: could be enhanced to avoid repeating the same check over and
+ * over in the fast path.
+ */
+ if (!nand_has_exec_op(chip) ||
+ !nand_read_data_op(chip, buf, mtd->writesize, false, true))
+ use_datain = true;
+
+ if (use_datain) {
+ ret = nand_exit_status_op(chip);
+ if (ret)
+ goto out;
+
+ ret = nand_read_data_op(chip, buf, mtd->writesize, false,
+ false);
+ if (!ret && oob_required)
+ ret = nand_read_data_op(chip, chip->oob_poi,
+ mtd->oobsize, false, false);
+ } else {
+ ret = nand_change_read_column_op(chip, 0, buf, mtd->writesize,
+ false);
+ if (!ret && oob_required)
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi,
+ mtd->oobsize, false);
+ }
+
+ if (chip->ecc.strength == 4)
+ max_bitflips = micron_nand_on_die_ecc_status_4(chip, status,
+ buf, page,
+ oob_required);
+ else
+ max_bitflips = micron_nand_on_die_ecc_status_8(chip, status);
+
+out:
+ micron_nand_on_die_ecc_setup(chip, false);
+
+ return ret ? ret : max_bitflips;
+}
+
+static int
+micron_nand_write_page_on_die_ecc(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ int ret;
+
+ ret = micron_nand_on_die_ecc_setup(chip, true);
+ if (ret)
+ return ret;
+
+ ret = nand_write_page_raw(chip, buf, oob_required, page);
+ micron_nand_on_die_ecc_setup(chip, false);
+
+ return ret;
+}
+
+enum {
+ /* The NAND flash doesn't support on-die ECC */
+ MICRON_ON_DIE_UNSUPPORTED,
+
+ /*
+ * The NAND flash supports on-die ECC and it can be
+ * enabled/disabled by a set features command.
+ */
+ MICRON_ON_DIE_SUPPORTED,
+
+ /*
+ * The NAND flash supports on-die ECC, and it cannot be
+ * disabled.
+ */
+ MICRON_ON_DIE_MANDATORY,
+};
+
+#define MICRON_ID_INTERNAL_ECC_MASK GENMASK(1, 0)
+#define MICRON_ID_ECC_ENABLED BIT(7)
+
+/*
+ * Try to detect if the NAND support on-die ECC. To do this, we enable
+ * the feature, and read back if it has been enabled as expected. We
+ * also check if it can be disabled, because some Micron NANDs do not
+ * allow disabling the on-die ECC and we don't support such NANDs for
+ * now.
+ *
+ * This function also has the side effect of disabling on-die ECC if
+ * it had been left enabled by the firmware/bootloader.
+ */
+static int micron_supports_on_die_ecc(struct nand_chip *chip)
+{
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&chip->base);
+ u8 id[5];
+ int ret;
+
+ if (!chip->parameters.onfi)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ if (nanddev_bits_per_cell(&chip->base) != 1)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ /*
+ * We only support on-die ECC of 4/512 or 8/512
+ */
+ if (requirements->strength != 4 && requirements->strength != 8)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ /* 0x2 means on-die ECC is available. */
+ if (chip->id.len != 5 ||
+ (chip->id.data[4] & MICRON_ID_INTERNAL_ECC_MASK) != 0x2)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ /*
+ * It seems that there are devices which do not support ECC officially.
+ * At least the MT29F2G08ABAGA / MT29F2G08ABBGA devices supports
+ * enabling the ECC feature but don't reflect that to the READ_ID table.
+ * So we have to guarantee that we disable the ECC feature directly
+ * after we did the READ_ID table command. Later we can evaluate the
+ * ECC_ENABLE support.
+ */
+ ret = micron_nand_on_die_ecc_setup(chip, true);
+ if (ret)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ ret = nand_readid_op(chip, 0, id, sizeof(id));
+ if (ret)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ ret = micron_nand_on_die_ecc_setup(chip, false);
+ if (ret)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ if (!(id[4] & MICRON_ID_ECC_ENABLED))
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ ret = nand_readid_op(chip, 0, id, sizeof(id));
+ if (ret)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ if (id[4] & MICRON_ID_ECC_ENABLED)
+ return MICRON_ON_DIE_MANDATORY;
+
+ /*
+ * We only support on-die ECC of 4/512 or 8/512
+ */
+ if (requirements->strength != 4 && requirements->strength != 8)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ return MICRON_ON_DIE_SUPPORTED;
+}
+
+static int micron_nand_init(struct nand_chip *chip)
+{
+ struct nand_device *base = &chip->base;
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(base);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct micron_nand *micron;
+ int ondie;
+ int ret;
+
+ micron = kzalloc(sizeof(*micron), GFP_KERNEL);
+ if (!micron)
+ return -ENOMEM;
+
+ nand_set_manufacturer_data(chip, micron);
+
+ ret = micron_nand_onfi_init(chip);
+ if (ret)
+ goto err_free_manuf_data;
+
+ chip->options |= NAND_BBM_FIRSTPAGE;
+
+ if (mtd->writesize == 2048)
+ chip->options |= NAND_BBM_SECONDPAGE;
+
+ ondie = micron_supports_on_die_ecc(chip);
+
+ if (ondie == MICRON_ON_DIE_MANDATORY &&
+ chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_DIE) {
+ pr_err("On-die ECC forcefully enabled, not supported\n");
+ ret = -EINVAL;
+ goto err_free_manuf_data;
+ }
+
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE) {
+ if (ondie == MICRON_ON_DIE_UNSUPPORTED) {
+ pr_err("On-die ECC selected but not supported\n");
+ ret = -EINVAL;
+ goto err_free_manuf_data;
+ }
+
+ if (ondie == MICRON_ON_DIE_MANDATORY) {
+ micron->ecc.forced = true;
+ micron->ecc.enabled = true;
+ }
+
+ /*
+ * In case of 4bit on-die ECC, we need a buffer to store a
+ * page dumped in raw mode so that we can compare its content
+ * to the same page after ECC correction happened and extract
+ * the real number of bitflips from this comparison.
+ * That's not needed for 8-bit ECC, because the status expose
+ * a better approximation of the number of bitflips in a page.
+ */
+ if (requirements->strength == 4) {
+ micron->ecc.rawbuf = kmalloc(mtd->writesize +
+ mtd->oobsize,
+ GFP_KERNEL);
+ if (!micron->ecc.rawbuf) {
+ ret = -ENOMEM;
+ goto err_free_manuf_data;
+ }
+ }
+
+ if (requirements->strength == 4)
+ mtd_set_ooblayout(mtd,
+ &micron_nand_on_die_4_ooblayout_ops);
+ else
+ mtd_set_ooblayout(mtd,
+ &micron_nand_on_die_8_ooblayout_ops);
+
+ chip->ecc.bytes = requirements->strength * 2;
+ chip->ecc.size = 512;
+ chip->ecc.strength = requirements->strength;
+ chip->ecc.algo = NAND_ECC_ALGO_BCH;
+ chip->ecc.read_page = micron_nand_read_page_on_die_ecc;
+ chip->ecc.write_page = micron_nand_write_page_on_die_ecc;
+
+ if (ondie == MICRON_ON_DIE_MANDATORY) {
+ chip->ecc.read_page_raw = nand_read_page_raw_notsupp;
+ chip->ecc.write_page_raw = nand_write_page_raw_notsupp;
+ } else {
+ if (!chip->ecc.read_page_raw)
+ chip->ecc.read_page_raw = nand_read_page_raw;
+ if (!chip->ecc.write_page_raw)
+ chip->ecc.write_page_raw = nand_write_page_raw;
+ }
+ }
+
+ return 0;
+
+err_free_manuf_data:
+ kfree(micron->ecc.rawbuf);
+ kfree(micron);
+
+ return ret;
+}
+
+static void micron_nand_cleanup(struct nand_chip *chip)
+{
+ struct micron_nand *micron = nand_get_manufacturer_data(chip);
+
+ kfree(micron->ecc.rawbuf);
+ kfree(micron);
+}
+
+static void micron_fixup_onfi_param_page(struct nand_chip *chip,
+ struct nand_onfi_params *p)
+{
+ /*
+ * MT29F1G08ABAFAWP-ITE:F and possibly others report 00 00 for the
+ * revision number field of the ONFI parameter page. Assume ONFI
+ * version 1.0 if the revision number is 00 00.
+ */
+ if (le16_to_cpu(p->revision) == 0)
+ p->revision = cpu_to_le16(ONFI_VERSION_1_0);
+}
+
+const struct nand_manufacturer_ops micron_nand_manuf_ops = {
+ .init = micron_nand_init,
+ .cleanup = micron_nand_cleanup,
+ .fixup_onfi_param_page = micron_fixup_onfi_param_page,
+};
diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c
new file mode 100644
index 0000000000..861975e44b
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_onfi.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
+ * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Credits:
+ * David Woodhouse for adding multichip support
+ *
+ * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
+ * rework for 2K page size chips
+ *
+ * This file contains all ONFI helpers.
+ */
+
+#include <linux/slab.h>
+
+#include "internals.h"
+
+#define ONFI_PARAM_PAGES 3
+
+u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
+{
+ int i;
+ while (len--) {
+ crc ^= *p++ << 8;
+ for (i = 0; i < 8; i++)
+ crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
+ }
+
+ return crc;
+}
+
+/* Parse the Extended Parameter Page. */
+static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
+ struct nand_onfi_params *p)
+{
+ struct nand_device *base = &chip->base;
+ struct nand_ecc_props requirements;
+ struct onfi_ext_param_page *ep;
+ struct onfi_ext_section *s;
+ struct onfi_ext_ecc_info *ecc;
+ uint8_t *cursor;
+ int ret;
+ int len;
+ int i;
+
+ len = le16_to_cpu(p->ext_param_page_length) * 16;
+ ep = kmalloc(len, GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ /*
+ * Use the Change Read Column command to skip the ONFI param pages and
+ * ensure we read at the right location.
+ */
+ ret = nand_change_read_column_op(chip,
+ sizeof(*p) * p->num_of_param_pages,
+ ep, len, true);
+ if (ret)
+ goto ext_out;
+
+ ret = -EINVAL;
+ if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
+ != le16_to_cpu(ep->crc))) {
+ pr_debug("fail in the CRC.\n");
+ goto ext_out;
+ }
+
+ /*
+ * Check the signature.
+ * Do not strictly follow the ONFI spec, maybe changed in future.
+ */
+ if (strncmp(ep->sig, "EPPS", 4)) {
+ pr_debug("The signature is invalid.\n");
+ goto ext_out;
+ }
+
+ /* find the ECC section. */
+ cursor = (uint8_t *)(ep + 1);
+ for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
+ s = ep->sections + i;
+ if (s->type == ONFI_SECTION_TYPE_2)
+ break;
+ cursor += s->length * 16;
+ }
+ if (i == ONFI_EXT_SECTION_MAX) {
+ pr_debug("We can not find the ECC section.\n");
+ goto ext_out;
+ }
+
+ /* get the info we want. */
+ ecc = (struct onfi_ext_ecc_info *)cursor;
+
+ if (!ecc->codeword_size) {
+ pr_debug("Invalid codeword size\n");
+ goto ext_out;
+ }
+
+ requirements.strength = ecc->ecc_bits;
+ requirements.step_size = 1 << ecc->codeword_size;
+ nanddev_set_ecc_requirements(base, &requirements);
+
+ ret = 0;
+
+ext_out:
+ kfree(ep);
+ return ret;
+}
+
+/*
+ * Recover data with bit-wise majority
+ */
+static void nand_bit_wise_majority(const void **srcbufs,
+ unsigned int nsrcbufs,
+ void *dstbuf,
+ unsigned int bufsize)
+{
+ int i, j, k;
+
+ for (i = 0; i < bufsize; i++) {
+ u8 val = 0;
+
+ for (j = 0; j < 8; j++) {
+ unsigned int cnt = 0;
+
+ for (k = 0; k < nsrcbufs; k++) {
+ const u8 *srcbuf = srcbufs[k];
+
+ if (srcbuf[i] & BIT(j))
+ cnt++;
+ }
+
+ if (cnt > nsrcbufs / 2)
+ val |= BIT(j);
+ }
+
+ ((u8 *)dstbuf)[i] = val;
+ }
+}
+
+/*
+ * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
+ */
+int nand_onfi_detect(struct nand_chip *chip)
+{
+ struct nand_device *base = &chip->base;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+ struct nand_onfi_params *p = NULL, *pbuf;
+ struct onfi_params *onfi;
+ bool use_datain = false;
+ int onfi_version = 0;
+ char id[4];
+ int i, ret, val;
+ u16 crc;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ /* Try ONFI for unknown chip or LP */
+ ret = nand_readid_op(chip, 0x20, id, sizeof(id));
+ if (ret || strncmp(id, "ONFI", 4))
+ return 0;
+
+ /* ONFI chip: allocate a buffer to hold its parameter page */
+ pbuf = kzalloc((sizeof(*pbuf) * ONFI_PARAM_PAGES), GFP_KERNEL);
+ if (!pbuf)
+ return -ENOMEM;
+
+ if (!nand_has_exec_op(chip) || chip->controller->supported_op.data_only_read)
+ use_datain = true;
+
+ for (i = 0; i < ONFI_PARAM_PAGES; i++) {
+ if (!i)
+ ret = nand_read_param_page_op(chip, 0, &pbuf[i],
+ sizeof(*pbuf));
+ else if (use_datain)
+ ret = nand_read_data_op(chip, &pbuf[i], sizeof(*pbuf),
+ true, false);
+ else
+ ret = nand_change_read_column_op(chip, sizeof(*pbuf) * i,
+ &pbuf[i], sizeof(*pbuf),
+ true);
+ if (ret) {
+ ret = 0;
+ goto free_onfi_param_page;
+ }
+
+ crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)&pbuf[i], 254);
+ if (crc == le16_to_cpu(pbuf[i].crc)) {
+ p = &pbuf[i];
+ break;
+ }
+ }
+
+ if (i == ONFI_PARAM_PAGES) {
+ const void *srcbufs[ONFI_PARAM_PAGES];
+ unsigned int j;
+
+ for (j = 0; j < ONFI_PARAM_PAGES; j++)
+ srcbufs[j] = pbuf + j;
+
+ pr_warn("Could not find a valid ONFI parameter page, trying bit-wise majority to recover it\n");
+ nand_bit_wise_majority(srcbufs, ONFI_PARAM_PAGES, pbuf,
+ sizeof(*pbuf));
+
+ crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)pbuf, 254);
+ if (crc != le16_to_cpu(pbuf->crc)) {
+ pr_err("ONFI parameter recovery failed, aborting\n");
+ goto free_onfi_param_page;
+ }
+ p = pbuf;
+ }
+
+ if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
+ chip->manufacturer.desc->ops->fixup_onfi_param_page)
+ chip->manufacturer.desc->ops->fixup_onfi_param_page(chip, p);
+
+ /* Check version */
+ val = le16_to_cpu(p->revision);
+ if (val & ONFI_VERSION_2_3)
+ onfi_version = 23;
+ else if (val & ONFI_VERSION_2_2)
+ onfi_version = 22;
+ else if (val & ONFI_VERSION_2_1)
+ onfi_version = 21;
+ else if (val & ONFI_VERSION_2_0)
+ onfi_version = 20;
+ else if (val & ONFI_VERSION_1_0)
+ onfi_version = 10;
+
+ if (!onfi_version) {
+ pr_info("unsupported ONFI version: %d\n", val);
+ goto free_onfi_param_page;
+ }
+
+ sanitize_string(p->manufacturer, sizeof(p->manufacturer));
+ sanitize_string(p->model, sizeof(p->model));
+ chip->parameters.model = kstrdup(p->model, GFP_KERNEL);
+ if (!chip->parameters.model) {
+ ret = -ENOMEM;
+ goto free_onfi_param_page;
+ }
+
+ memorg->pagesize = le32_to_cpu(p->byte_per_page);
+ mtd->writesize = memorg->pagesize;
+
+ /*
+ * pages_per_block and blocks_per_lun may not be a power-of-2 size
+ * (don't ask me who thought of this...). MTD assumes that these
+ * dimensions will be power-of-2, so just truncate the remaining area.
+ */
+ memorg->pages_per_eraseblock =
+ 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
+ mtd->erasesize = memorg->pages_per_eraseblock * memorg->pagesize;
+
+ memorg->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+ mtd->oobsize = memorg->oobsize;
+
+ memorg->luns_per_target = p->lun_count;
+ memorg->planes_per_lun = 1 << p->interleaved_bits;
+
+ /* See erasesize comment */
+ memorg->eraseblocks_per_lun =
+ 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
+ memorg->max_bad_eraseblocks_per_lun = le32_to_cpu(p->blocks_per_lun);
+ memorg->bits_per_cell = p->bits_per_cell;
+
+ if (le16_to_cpu(p->features) & ONFI_FEATURE_16_BIT_BUS)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ if (p->ecc_bits != 0xff) {
+ struct nand_ecc_props requirements = {
+ .strength = p->ecc_bits,
+ .step_size = 512,
+ };
+
+ nanddev_set_ecc_requirements(base, &requirements);
+ } else if (onfi_version >= 21 &&
+ (le16_to_cpu(p->features) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
+
+ /*
+ * The nand_flash_detect_ext_param_page() uses the
+ * Change Read Column command which maybe not supported
+ * by the chip->legacy.cmdfunc. So try to update the
+ * chip->legacy.cmdfunc now. We do not replace user supplied
+ * command function.
+ */
+ nand_legacy_adjust_cmdfunc(chip);
+
+ /* The Extended Parameter Page is supported since ONFI 2.1. */
+ if (nand_flash_detect_ext_param_page(chip, p))
+ pr_warn("Failed to detect ONFI extended param page\n");
+ } else {
+ pr_warn("Could not retrieve ONFI ECC requirements\n");
+ }
+
+ /* Save some parameters from the parameter page for future use */
+ if (le16_to_cpu(p->opt_cmd) & ONFI_OPT_CMD_SET_GET_FEATURES) {
+ chip->parameters.supports_set_get_features = true;
+ bitmap_set(chip->parameters.get_feature_list,
+ ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+ bitmap_set(chip->parameters.set_feature_list,
+ ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+ }
+
+ if (le16_to_cpu(p->opt_cmd) & ONFI_OPT_CMD_READ_CACHE)
+ chip->parameters.supports_read_cache = true;
+
+ onfi = kzalloc(sizeof(*onfi), GFP_KERNEL);
+ if (!onfi) {
+ ret = -ENOMEM;
+ goto free_model;
+ }
+
+ onfi->version = onfi_version;
+ onfi->tPROG = le16_to_cpu(p->t_prog);
+ onfi->tBERS = le16_to_cpu(p->t_bers);
+ onfi->tR = le16_to_cpu(p->t_r);
+ onfi->tCCS = le16_to_cpu(p->t_ccs);
+ onfi->fast_tCAD = le16_to_cpu(p->nvddr_nvddr2_features) & BIT(0);
+ onfi->sdr_timing_modes = le16_to_cpu(p->sdr_timing_modes);
+ if (le16_to_cpu(p->features) & ONFI_FEATURE_NV_DDR)
+ onfi->nvddr_timing_modes = le16_to_cpu(p->nvddr_timing_modes);
+ onfi->vendor_revision = le16_to_cpu(p->vendor_revision);
+ memcpy(onfi->vendor, p->vendor, sizeof(p->vendor));
+ chip->parameters.onfi = onfi;
+
+ /* Identification done, free the full ONFI parameter page and exit */
+ kfree(pbuf);
+
+ return 1;
+
+free_model:
+ kfree(chip->parameters.model);
+free_onfi_param_page:
+ kfree(pbuf);
+
+ return ret;
+}
diff --git a/drivers/mtd/nand/raw/nand_samsung.c b/drivers/mtd/nand/raw/nand_samsung.c
new file mode 100644
index 0000000000..0be6b75638
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_samsung.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Free Electrons
+ * Copyright (C) 2017 NextThing Co
+ *
+ * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
+ */
+
+#include "internals.h"
+
+static void samsung_nand_decode_id(struct nand_chip *chip)
+{
+ struct nand_device *base = &chip->base;
+ struct nand_ecc_props requirements = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ /* New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44) */
+ if (chip->id.len == 6 && !nand_is_slc(chip) &&
+ chip->id.data[5] != 0x00) {
+ u8 extid = chip->id.data[3];
+
+ /* Get pagesize */
+ memorg->pagesize = 2048 << (extid & 0x03);
+ mtd->writesize = memorg->pagesize;
+
+ extid >>= 2;
+
+ /* Get oobsize */
+ switch (((extid >> 2) & 0x4) | (extid & 0x3)) {
+ case 1:
+ memorg->oobsize = 128;
+ break;
+ case 2:
+ memorg->oobsize = 218;
+ break;
+ case 3:
+ memorg->oobsize = 400;
+ break;
+ case 4:
+ memorg->oobsize = 436;
+ break;
+ case 5:
+ memorg->oobsize = 512;
+ break;
+ case 6:
+ memorg->oobsize = 640;
+ break;
+ default:
+ /*
+ * We should never reach this case, but if that
+ * happens, this probably means Samsung decided to use
+ * a different extended ID format, and we should find
+ * a way to support it.
+ */
+ WARN(1, "Invalid OOB size value");
+ break;
+ }
+
+ mtd->oobsize = memorg->oobsize;
+
+ /* Get blocksize */
+ extid >>= 2;
+ memorg->pages_per_eraseblock = (128 * 1024) <<
+ (((extid >> 1) & 0x04) |
+ (extid & 0x03)) /
+ memorg->pagesize;
+ mtd->erasesize = (128 * 1024) <<
+ (((extid >> 1) & 0x04) | (extid & 0x03));
+
+ /* Extract ECC requirements from 5th id byte*/
+ extid = (chip->id.data[4] >> 4) & 0x07;
+ if (extid < 5) {
+ requirements.step_size = 512;
+ requirements.strength = 1 << extid;
+ } else {
+ requirements.step_size = 1024;
+ switch (extid) {
+ case 5:
+ requirements.strength = 24;
+ break;
+ case 6:
+ requirements.strength = 40;
+ break;
+ case 7:
+ requirements.strength = 60;
+ break;
+ default:
+ WARN(1, "Could not decode ECC info");
+ requirements.step_size = 0;
+ }
+ }
+ } else {
+ nand_decode_ext_id(chip);
+
+ if (nand_is_slc(chip)) {
+ switch (chip->id.data[1]) {
+ /* K9F4G08U0D-S[I|C]B0(T00) */
+ case 0xDC:
+ requirements.step_size = 512;
+ requirements.strength = 1;
+ break;
+
+ /* K9F1G08U0E 21nm chips do not support subpage write */
+ case 0xF1:
+ if (chip->id.len > 4 &&
+ (chip->id.data[4] & GENMASK(1, 0)) == 0x1)
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ nanddev_set_ecc_requirements(base, &requirements);
+}
+
+static int samsung_nand_init(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (mtd->writesize > 512)
+ chip->options |= NAND_SAMSUNG_LP_OPTIONS;
+
+ if (!nand_is_slc(chip))
+ chip->options |= NAND_BBM_LASTPAGE;
+ else
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
+
+ return 0;
+}
+
+const struct nand_manufacturer_ops samsung_nand_manuf_ops = {
+ .detect = samsung_nand_decode_id,
+ .init = samsung_nand_init,
+};
diff --git a/drivers/mtd/nand/raw/nand_sandisk.c b/drivers/mtd/nand/raw/nand_sandisk.c
new file mode 100644
index 0000000000..7c66e4187d
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_sandisk.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "internals.h"
+
+static int
+sdtnqgama_choose_interface_config(struct nand_chip *chip,
+ struct nand_interface_config *iface)
+{
+ onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 0);
+
+ return nand_choose_best_sdr_timings(chip, iface, NULL);
+}
+
+static int sandisk_nand_init(struct nand_chip *chip)
+{
+ if (!strncmp("SDTNQGAMA", chip->parameters.model,
+ sizeof("SDTNQGAMA") - 1))
+ chip->ops.choose_interface_config =
+ &sdtnqgama_choose_interface_config;
+
+ return 0;
+}
+
+const struct nand_manufacturer_ops sandisk_nand_manuf_ops = {
+ .init = sandisk_nand_init,
+};
diff --git a/drivers/mtd/nand/raw/nand_timings.c b/drivers/mtd/nand/raw/nand_timings.c
new file mode 100644
index 0000000000..7b41afc372
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_timings.c
@@ -0,0 +1,737 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Free Electrons
+ *
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ */
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/export.h>
+
+#include "internals.h"
+
+#define ONFI_DYN_TIMING_MAX U16_MAX
+
+/*
+ * For non-ONFI chips we use the highest possible value for tPROG and tBERS.
+ * tR and tCCS will take the default values precised in the ONFI specification
+ * for timing mode 0, respectively 200us and 500ns.
+ *
+ * These four values are tweaked to be more accurate in the case of ONFI chips.
+ */
+static const struct nand_interface_config onfi_sdr_timings[] = {
+ /* Mode 0 */
+ {
+ .type = NAND_SDR_IFACE,
+ .timings.mode = 0,
+ .timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tADL_min = 400000,
+ .tALH_min = 20000,
+ .tALS_min = 50000,
+ .tAR_min = 25000,
+ .tCEA_max = 100000,
+ .tCEH_min = 20000,
+ .tCH_min = 20000,
+ .tCHZ_max = 100000,
+ .tCLH_min = 20000,
+ .tCLR_min = 20000,
+ .tCLS_min = 50000,
+ .tCOH_min = 0,
+ .tCS_min = 70000,
+ .tDH_min = 20000,
+ .tDS_min = 40000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 10000,
+ .tITC_max = 1000000,
+ .tRC_min = 100000,
+ .tREA_max = 40000,
+ .tREH_min = 30000,
+ .tRHOH_min = 0,
+ .tRHW_min = 200000,
+ .tRHZ_max = 200000,
+ .tRLOH_min = 0,
+ .tRP_min = 50000,
+ .tRR_min = 40000,
+ .tRST_max = 250000000000ULL,
+ .tWB_max = 200000,
+ .tWC_min = 100000,
+ .tWH_min = 30000,
+ .tWHR_min = 120000,
+ .tWP_min = 50000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 1 */
+ {
+ .type = NAND_SDR_IFACE,
+ .timings.mode = 1,
+ .timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tADL_min = 400000,
+ .tALH_min = 10000,
+ .tALS_min = 25000,
+ .tAR_min = 10000,
+ .tCEA_max = 45000,
+ .tCEH_min = 20000,
+ .tCH_min = 10000,
+ .tCHZ_max = 50000,
+ .tCLH_min = 10000,
+ .tCLR_min = 10000,
+ .tCLS_min = 25000,
+ .tCOH_min = 15000,
+ .tCS_min = 35000,
+ .tDH_min = 10000,
+ .tDS_min = 20000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 50000,
+ .tREA_max = 30000,
+ .tREH_min = 15000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 0,
+ .tRP_min = 25000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 45000,
+ .tWH_min = 15000,
+ .tWHR_min = 80000,
+ .tWP_min = 25000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 2 */
+ {
+ .type = NAND_SDR_IFACE,
+ .timings.mode = 2,
+ .timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tADL_min = 400000,
+ .tALH_min = 10000,
+ .tALS_min = 15000,
+ .tAR_min = 10000,
+ .tCEA_max = 30000,
+ .tCEH_min = 20000,
+ .tCH_min = 10000,
+ .tCHZ_max = 50000,
+ .tCLH_min = 10000,
+ .tCLR_min = 10000,
+ .tCLS_min = 15000,
+ .tCOH_min = 15000,
+ .tCS_min = 25000,
+ .tDH_min = 5000,
+ .tDS_min = 15000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 35000,
+ .tREA_max = 25000,
+ .tREH_min = 15000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 0,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tRP_min = 17000,
+ .tWC_min = 35000,
+ .tWH_min = 15000,
+ .tWHR_min = 80000,
+ .tWP_min = 17000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 3 */
+ {
+ .type = NAND_SDR_IFACE,
+ .timings.mode = 3,
+ .timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tADL_min = 400000,
+ .tALH_min = 5000,
+ .tALS_min = 10000,
+ .tAR_min = 10000,
+ .tCEA_max = 25000,
+ .tCEH_min = 20000,
+ .tCH_min = 5000,
+ .tCHZ_max = 50000,
+ .tCLH_min = 5000,
+ .tCLR_min = 10000,
+ .tCLS_min = 10000,
+ .tCOH_min = 15000,
+ .tCS_min = 25000,
+ .tDH_min = 5000,
+ .tDS_min = 10000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 30000,
+ .tREA_max = 20000,
+ .tREH_min = 10000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 0,
+ .tRP_min = 15000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 30000,
+ .tWH_min = 10000,
+ .tWHR_min = 80000,
+ .tWP_min = 15000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 4 */
+ {
+ .type = NAND_SDR_IFACE,
+ .timings.mode = 4,
+ .timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tADL_min = 400000,
+ .tALH_min = 5000,
+ .tALS_min = 10000,
+ .tAR_min = 10000,
+ .tCEA_max = 25000,
+ .tCEH_min = 20000,
+ .tCH_min = 5000,
+ .tCHZ_max = 30000,
+ .tCLH_min = 5000,
+ .tCLR_min = 10000,
+ .tCLS_min = 10000,
+ .tCOH_min = 15000,
+ .tCS_min = 20000,
+ .tDH_min = 5000,
+ .tDS_min = 10000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 25000,
+ .tREA_max = 20000,
+ .tREH_min = 10000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 5000,
+ .tRP_min = 12000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 25000,
+ .tWH_min = 10000,
+ .tWHR_min = 80000,
+ .tWP_min = 12000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 5 */
+ {
+ .type = NAND_SDR_IFACE,
+ .timings.mode = 5,
+ .timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tADL_min = 400000,
+ .tALH_min = 5000,
+ .tALS_min = 10000,
+ .tAR_min = 10000,
+ .tCEA_max = 25000,
+ .tCEH_min = 20000,
+ .tCH_min = 5000,
+ .tCHZ_max = 30000,
+ .tCLH_min = 5000,
+ .tCLR_min = 10000,
+ .tCLS_min = 10000,
+ .tCOH_min = 15000,
+ .tCS_min = 15000,
+ .tDH_min = 5000,
+ .tDS_min = 7000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 20000,
+ .tREA_max = 16000,
+ .tREH_min = 7000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 5000,
+ .tRP_min = 10000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 20000,
+ .tWH_min = 7000,
+ .tWHR_min = 80000,
+ .tWP_min = 10000,
+ .tWW_min = 100000,
+ },
+ },
+};
+
+static const struct nand_interface_config onfi_nvddr_timings[] = {
+ /* Mode 0 */
+ {
+ .type = NAND_NVDDR_IFACE,
+ .timings.mode = 0,
+ .timings.nvddr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tAC_min = 3000,
+ .tAC_max = 25000,
+ .tADL_min = 400000,
+ .tCAD_min = 45000,
+ .tCAH_min = 10000,
+ .tCALH_min = 10000,
+ .tCALS_min = 10000,
+ .tCAS_min = 10000,
+ .tCEH_min = 20000,
+ .tCH_min = 10000,
+ .tCK_min = 50000,
+ .tCS_min = 35000,
+ .tDH_min = 5000,
+ .tDQSCK_min = 3000,
+ .tDQSCK_max = 25000,
+ .tDQSD_min = 0,
+ .tDQSD_max = 18000,
+ .tDQSHZ_max = 20000,
+ .tDQSQ_max = 5000,
+ .tDS_min = 5000,
+ .tDSC_min = 50000,
+ .tFEAT_max = 1000000,
+ .tITC_max = 1000000,
+ .tQHS_max = 6000,
+ .tRHW_min = 100000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWHR_min = 80000,
+ .tWRCK_min = 20000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 1 */
+ {
+ .type = NAND_NVDDR_IFACE,
+ .timings.mode = 1,
+ .timings.nvddr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tAC_min = 3000,
+ .tAC_max = 25000,
+ .tADL_min = 400000,
+ .tCAD_min = 45000,
+ .tCAH_min = 5000,
+ .tCALH_min = 5000,
+ .tCALS_min = 5000,
+ .tCAS_min = 5000,
+ .tCEH_min = 20000,
+ .tCH_min = 5000,
+ .tCK_min = 30000,
+ .tCS_min = 25000,
+ .tDH_min = 2500,
+ .tDQSCK_min = 3000,
+ .tDQSCK_max = 25000,
+ .tDQSD_min = 0,
+ .tDQSD_max = 18000,
+ .tDQSHZ_max = 20000,
+ .tDQSQ_max = 2500,
+ .tDS_min = 3000,
+ .tDSC_min = 30000,
+ .tFEAT_max = 1000000,
+ .tITC_max = 1000000,
+ .tQHS_max = 3000,
+ .tRHW_min = 100000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWHR_min = 80000,
+ .tWRCK_min = 20000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 2 */
+ {
+ .type = NAND_NVDDR_IFACE,
+ .timings.mode = 2,
+ .timings.nvddr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tAC_min = 3000,
+ .tAC_max = 25000,
+ .tADL_min = 400000,
+ .tCAD_min = 45000,
+ .tCAH_min = 4000,
+ .tCALH_min = 4000,
+ .tCALS_min = 4000,
+ .tCAS_min = 4000,
+ .tCEH_min = 20000,
+ .tCH_min = 4000,
+ .tCK_min = 20000,
+ .tCS_min = 15000,
+ .tDH_min = 1700,
+ .tDQSCK_min = 3000,
+ .tDQSCK_max = 25000,
+ .tDQSD_min = 0,
+ .tDQSD_max = 18000,
+ .tDQSHZ_max = 20000,
+ .tDQSQ_max = 1700,
+ .tDS_min = 2000,
+ .tDSC_min = 20000,
+ .tFEAT_max = 1000000,
+ .tITC_max = 1000000,
+ .tQHS_max = 2000,
+ .tRHW_min = 100000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWHR_min = 80000,
+ .tWRCK_min = 20000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 3 */
+ {
+ .type = NAND_NVDDR_IFACE,
+ .timings.mode = 3,
+ .timings.nvddr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tAC_min = 3000,
+ .tAC_max = 25000,
+ .tADL_min = 400000,
+ .tCAD_min = 45000,
+ .tCAH_min = 3000,
+ .tCALH_min = 3000,
+ .tCALS_min = 3000,
+ .tCAS_min = 3000,
+ .tCEH_min = 20000,
+ .tCH_min = 3000,
+ .tCK_min = 15000,
+ .tCS_min = 15000,
+ .tDH_min = 1300,
+ .tDQSCK_min = 3000,
+ .tDQSCK_max = 25000,
+ .tDQSD_min = 0,
+ .tDQSD_max = 18000,
+ .tDQSHZ_max = 20000,
+ .tDQSQ_max = 1300,
+ .tDS_min = 1500,
+ .tDSC_min = 15000,
+ .tFEAT_max = 1000000,
+ .tITC_max = 1000000,
+ .tQHS_max = 1500,
+ .tRHW_min = 100000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWHR_min = 80000,
+ .tWRCK_min = 20000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 4 */
+ {
+ .type = NAND_NVDDR_IFACE,
+ .timings.mode = 4,
+ .timings.nvddr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tAC_min = 3000,
+ .tAC_max = 25000,
+ .tADL_min = 400000,
+ .tCAD_min = 45000,
+ .tCAH_min = 2500,
+ .tCALH_min = 2500,
+ .tCALS_min = 2500,
+ .tCAS_min = 2500,
+ .tCEH_min = 20000,
+ .tCH_min = 2500,
+ .tCK_min = 12000,
+ .tCS_min = 15000,
+ .tDH_min = 1100,
+ .tDQSCK_min = 3000,
+ .tDQSCK_max = 25000,
+ .tDQSD_min = 0,
+ .tDQSD_max = 18000,
+ .tDQSHZ_max = 20000,
+ .tDQSQ_max = 1000,
+ .tDS_min = 1100,
+ .tDSC_min = 12000,
+ .tFEAT_max = 1000000,
+ .tITC_max = 1000000,
+ .tQHS_max = 1200,
+ .tRHW_min = 100000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWHR_min = 80000,
+ .tWRCK_min = 20000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 5 */
+ {
+ .type = NAND_NVDDR_IFACE,
+ .timings.mode = 5,
+ .timings.nvddr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tAC_min = 3000,
+ .tAC_max = 25000,
+ .tADL_min = 400000,
+ .tCAD_min = 45000,
+ .tCAH_min = 2000,
+ .tCALH_min = 2000,
+ .tCALS_min = 2000,
+ .tCAS_min = 2000,
+ .tCEH_min = 20000,
+ .tCH_min = 2000,
+ .tCK_min = 10000,
+ .tCS_min = 15000,
+ .tDH_min = 900,
+ .tDQSCK_min = 3000,
+ .tDQSCK_max = 25000,
+ .tDQSD_min = 0,
+ .tDQSD_max = 18000,
+ .tDQSHZ_max = 20000,
+ .tDQSQ_max = 850,
+ .tDS_min = 900,
+ .tDSC_min = 10000,
+ .tFEAT_max = 1000000,
+ .tITC_max = 1000000,
+ .tQHS_max = 1000,
+ .tRHW_min = 100000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWHR_min = 80000,
+ .tWRCK_min = 20000,
+ .tWW_min = 100000,
+ },
+ },
+};
+
+/* All NAND chips share the same reset data interface: SDR mode 0 */
+const struct nand_interface_config *nand_get_reset_interface_config(void)
+{
+ return &onfi_sdr_timings[0];
+}
+
+/**
+ * onfi_find_closest_sdr_mode - Derive the closest ONFI SDR timing mode given a
+ * set of timings
+ * @spec_timings: the timings to challenge
+ */
+unsigned int
+onfi_find_closest_sdr_mode(const struct nand_sdr_timings *spec_timings)
+{
+ const struct nand_sdr_timings *onfi_timings;
+ int mode;
+
+ for (mode = ARRAY_SIZE(onfi_sdr_timings) - 1; mode > 0; mode--) {
+ onfi_timings = &onfi_sdr_timings[mode].timings.sdr;
+
+ if (spec_timings->tCCS_min <= onfi_timings->tCCS_min &&
+ spec_timings->tADL_min <= onfi_timings->tADL_min &&
+ spec_timings->tALH_min <= onfi_timings->tALH_min &&
+ spec_timings->tALS_min <= onfi_timings->tALS_min &&
+ spec_timings->tAR_min <= onfi_timings->tAR_min &&
+ spec_timings->tCEH_min <= onfi_timings->tCEH_min &&
+ spec_timings->tCH_min <= onfi_timings->tCH_min &&
+ spec_timings->tCLH_min <= onfi_timings->tCLH_min &&
+ spec_timings->tCLR_min <= onfi_timings->tCLR_min &&
+ spec_timings->tCLS_min <= onfi_timings->tCLS_min &&
+ spec_timings->tCOH_min <= onfi_timings->tCOH_min &&
+ spec_timings->tCS_min <= onfi_timings->tCS_min &&
+ spec_timings->tDH_min <= onfi_timings->tDH_min &&
+ spec_timings->tDS_min <= onfi_timings->tDS_min &&
+ spec_timings->tIR_min <= onfi_timings->tIR_min &&
+ spec_timings->tRC_min <= onfi_timings->tRC_min &&
+ spec_timings->tREH_min <= onfi_timings->tREH_min &&
+ spec_timings->tRHOH_min <= onfi_timings->tRHOH_min &&
+ spec_timings->tRHW_min <= onfi_timings->tRHW_min &&
+ spec_timings->tRLOH_min <= onfi_timings->tRLOH_min &&
+ spec_timings->tRP_min <= onfi_timings->tRP_min &&
+ spec_timings->tRR_min <= onfi_timings->tRR_min &&
+ spec_timings->tWC_min <= onfi_timings->tWC_min &&
+ spec_timings->tWH_min <= onfi_timings->tWH_min &&
+ spec_timings->tWHR_min <= onfi_timings->tWHR_min &&
+ spec_timings->tWP_min <= onfi_timings->tWP_min &&
+ spec_timings->tWW_min <= onfi_timings->tWW_min)
+ return mode;
+ }
+
+ return 0;
+}
+
+/**
+ * onfi_find_closest_nvddr_mode - Derive the closest ONFI NVDDR timing mode
+ * given a set of timings
+ * @spec_timings: the timings to challenge
+ */
+unsigned int
+onfi_find_closest_nvddr_mode(const struct nand_nvddr_timings *spec_timings)
+{
+ const struct nand_nvddr_timings *onfi_timings;
+ int mode;
+
+ for (mode = ARRAY_SIZE(onfi_nvddr_timings) - 1; mode > 0; mode--) {
+ onfi_timings = &onfi_nvddr_timings[mode].timings.nvddr;
+
+ if (spec_timings->tCCS_min <= onfi_timings->tCCS_min &&
+ spec_timings->tAC_min <= onfi_timings->tAC_min &&
+ spec_timings->tADL_min <= onfi_timings->tADL_min &&
+ spec_timings->tCAD_min <= onfi_timings->tCAD_min &&
+ spec_timings->tCAH_min <= onfi_timings->tCAH_min &&
+ spec_timings->tCALH_min <= onfi_timings->tCALH_min &&
+ spec_timings->tCALS_min <= onfi_timings->tCALS_min &&
+ spec_timings->tCAS_min <= onfi_timings->tCAS_min &&
+ spec_timings->tCEH_min <= onfi_timings->tCEH_min &&
+ spec_timings->tCH_min <= onfi_timings->tCH_min &&
+ spec_timings->tCK_min <= onfi_timings->tCK_min &&
+ spec_timings->tCS_min <= onfi_timings->tCS_min &&
+ spec_timings->tDH_min <= onfi_timings->tDH_min &&
+ spec_timings->tDQSCK_min <= onfi_timings->tDQSCK_min &&
+ spec_timings->tDQSD_min <= onfi_timings->tDQSD_min &&
+ spec_timings->tDS_min <= onfi_timings->tDS_min &&
+ spec_timings->tDSC_min <= onfi_timings->tDSC_min &&
+ spec_timings->tRHW_min <= onfi_timings->tRHW_min &&
+ spec_timings->tRR_min <= onfi_timings->tRR_min &&
+ spec_timings->tWHR_min <= onfi_timings->tWHR_min &&
+ spec_timings->tWRCK_min <= onfi_timings->tWRCK_min &&
+ spec_timings->tWW_min <= onfi_timings->tWW_min)
+ return mode;
+ }
+
+ return 0;
+}
+
+/*
+ * onfi_fill_sdr_interface_config - Initialize a SDR interface config from a
+ * given ONFI mode
+ * @chip: The NAND chip
+ * @iface: The interface configuration to fill
+ * @timing_mode: The ONFI timing mode
+ */
+static void onfi_fill_sdr_interface_config(struct nand_chip *chip,
+ struct nand_interface_config *iface,
+ unsigned int timing_mode)
+{
+ struct onfi_params *onfi = chip->parameters.onfi;
+
+ if (WARN_ON(timing_mode >= ARRAY_SIZE(onfi_sdr_timings)))
+ return;
+
+ *iface = onfi_sdr_timings[timing_mode];
+
+ /*
+ * Initialize timings that cannot be deduced from timing mode:
+ * tPROG, tBERS, tR and tCCS.
+ * These information are part of the ONFI parameter page.
+ */
+ if (onfi) {
+ struct nand_sdr_timings *timings = &iface->timings.sdr;
+
+ /* microseconds -> picoseconds */
+ timings->tPROG_max = 1000000ULL * onfi->tPROG;
+ timings->tBERS_max = 1000000ULL * onfi->tBERS;
+ timings->tR_max = 1000000ULL * onfi->tR;
+
+ /* nanoseconds -> picoseconds */
+ timings->tCCS_min = 1000UL * onfi->tCCS;
+ }
+}
+
+/**
+ * onfi_fill_nvddr_interface_config - Initialize a NVDDR interface config from a
+ * given ONFI mode
+ * @chip: The NAND chip
+ * @iface: The interface configuration to fill
+ * @timing_mode: The ONFI timing mode
+ */
+static void onfi_fill_nvddr_interface_config(struct nand_chip *chip,
+ struct nand_interface_config *iface,
+ unsigned int timing_mode)
+{
+ struct onfi_params *onfi = chip->parameters.onfi;
+
+ if (WARN_ON(timing_mode >= ARRAY_SIZE(onfi_nvddr_timings)))
+ return;
+
+ *iface = onfi_nvddr_timings[timing_mode];
+
+ /*
+ * Initialize timings that cannot be deduced from timing mode:
+ * tPROG, tBERS, tR, tCCS and tCAD.
+ * These information are part of the ONFI parameter page.
+ */
+ if (onfi) {
+ struct nand_nvddr_timings *timings = &iface->timings.nvddr;
+
+ /* microseconds -> picoseconds */
+ timings->tPROG_max = 1000000ULL * onfi->tPROG;
+ timings->tBERS_max = 1000000ULL * onfi->tBERS;
+ timings->tR_max = 1000000ULL * onfi->tR;
+
+ /* nanoseconds -> picoseconds */
+ timings->tCCS_min = 1000UL * onfi->tCCS;
+
+ if (onfi->fast_tCAD)
+ timings->tCAD_min = 25000;
+ }
+}
+
+/**
+ * onfi_fill_interface_config - Initialize an interface config from a given
+ * ONFI mode
+ * @chip: The NAND chip
+ * @iface: The interface configuration to fill
+ * @type: The interface type
+ * @timing_mode: The ONFI timing mode
+ */
+void onfi_fill_interface_config(struct nand_chip *chip,
+ struct nand_interface_config *iface,
+ enum nand_interface_type type,
+ unsigned int timing_mode)
+{
+ if (type == NAND_SDR_IFACE)
+ return onfi_fill_sdr_interface_config(chip, iface, timing_mode);
+ else
+ return onfi_fill_nvddr_interface_config(chip, iface, timing_mode);
+}
diff --git a/drivers/mtd/nand/raw/nand_toshiba.c b/drivers/mtd/nand/raw/nand_toshiba.c
new file mode 100644
index 0000000000..d3d34d7192
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_toshiba.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Free Electrons
+ * Copyright (C) 2017 NextThing Co
+ *
+ * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
+ */
+
+#include "internals.h"
+
+/* Bit for detecting BENAND */
+#define TOSHIBA_NAND_ID4_IS_BENAND BIT(7)
+
+/* Recommended to rewrite for BENAND */
+#define TOSHIBA_NAND_STATUS_REWRITE_RECOMMENDED BIT(3)
+
+/* ECC Status Read Command for BENAND */
+#define TOSHIBA_NAND_CMD_ECC_STATUS_READ 0x7A
+
+/* ECC Status Mask for BENAND */
+#define TOSHIBA_NAND_ECC_STATUS_MASK 0x0F
+
+/* Uncorrectable Error for BENAND */
+#define TOSHIBA_NAND_ECC_STATUS_UNCORR 0x0F
+
+/* Max ECC Steps for BENAND */
+#define TOSHIBA_NAND_MAX_ECC_STEPS 8
+
+static int toshiba_nand_benand_read_eccstatus_op(struct nand_chip *chip,
+ u8 *buf)
+{
+ u8 *ecc_status = buf;
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(TOSHIBA_NAND_CMD_ECC_STATUS_READ,
+ PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_8BIT_DATA_IN(chip->ecc.steps, ecc_status, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ return -ENOTSUPP;
+}
+
+static int toshiba_nand_benand_eccstatus(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+ unsigned int max_bitflips = 0;
+ u8 status, ecc_status[TOSHIBA_NAND_MAX_ECC_STEPS];
+
+ /* Check Status */
+ ret = toshiba_nand_benand_read_eccstatus_op(chip, ecc_status);
+ if (!ret) {
+ unsigned int i, bitflips = 0;
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ bitflips = ecc_status[i] & TOSHIBA_NAND_ECC_STATUS_MASK;
+ if (bitflips == TOSHIBA_NAND_ECC_STATUS_UNCORR) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += bitflips;
+ max_bitflips = max(max_bitflips, bitflips);
+ }
+ }
+
+ return max_bitflips;
+ }
+
+ /*
+ * Fallback to regular status check if
+ * toshiba_nand_benand_read_eccstatus_op() failed.
+ */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_FAIL) {
+ /* uncorrected */
+ mtd->ecc_stats.failed++;
+ } else if (status & TOSHIBA_NAND_STATUS_REWRITE_RECOMMENDED) {
+ /* corrected */
+ max_bitflips = mtd->bitflip_threshold;
+ mtd->ecc_stats.corrected += max_bitflips;
+ }
+
+ return max_bitflips;
+}
+
+static int
+toshiba_nand_read_page_benand(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ int ret;
+
+ ret = nand_read_page_raw(chip, buf, oob_required, page);
+ if (ret)
+ return ret;
+
+ return toshiba_nand_benand_eccstatus(chip);
+}
+
+static int
+toshiba_nand_read_subpage_benand(struct nand_chip *chip, uint32_t data_offs,
+ uint32_t readlen, uint8_t *bufpoi, int page)
+{
+ int ret;
+
+ ret = nand_read_page_op(chip, page, data_offs,
+ bufpoi + data_offs, readlen);
+ if (ret)
+ return ret;
+
+ return toshiba_nand_benand_eccstatus(chip);
+}
+
+static void toshiba_nand_benand_init(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /*
+ * On BENAND, the entire OOB region can be used by the MTD user.
+ * The calculated ECC bytes are stored into other isolated
+ * area which is not accessible to users.
+ * This is why chip->ecc.bytes = 0.
+ */
+ chip->ecc.bytes = 0;
+ chip->ecc.size = 512;
+ chip->ecc.strength = 8;
+ chip->ecc.read_page = toshiba_nand_read_page_benand;
+ chip->ecc.read_subpage = toshiba_nand_read_subpage_benand;
+ chip->ecc.write_page = nand_write_page_raw;
+ chip->ecc.read_page_raw = nand_read_page_raw_notsupp;
+ chip->ecc.write_page_raw = nand_write_page_raw_notsupp;
+
+ chip->options |= NAND_SUBPAGE_READ;
+
+ mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
+}
+
+static void toshiba_nand_decode_id(struct nand_chip *chip)
+{
+ struct nand_device *base = &chip->base;
+ struct nand_ecc_props requirements = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ nand_decode_ext_id(chip);
+
+ /*
+ * Toshiba 24nm raw SLC (i.e., not BENAND) have 32B OOB per
+ * 512B page. For Toshiba SLC, we decode the 5th/6th byte as
+ * follows:
+ * - ID byte 6, bits[2:0]: 100b -> 43nm, 101b -> 32nm,
+ * 110b -> 24nm
+ * - ID byte 5, bit[7]: 1 -> BENAND, 0 -> raw SLC
+ */
+ if (chip->id.len >= 6 && nand_is_slc(chip) &&
+ (chip->id.data[5] & 0x7) == 0x6 /* 24nm */ &&
+ !(chip->id.data[4] & TOSHIBA_NAND_ID4_IS_BENAND) /* !BENAND */) {
+ memorg->oobsize = 32 * memorg->pagesize >> 9;
+ mtd->oobsize = memorg->oobsize;
+ }
+
+ /*
+ * Extract ECC requirements from 6th id byte.
+ * For Toshiba SLC, ecc requrements are as follows:
+ * - 43nm: 1 bit ECC for each 512Byte is required.
+ * - 32nm: 4 bit ECC for each 512Byte is required.
+ * - 24nm: 8 bit ECC for each 512Byte is required.
+ */
+ if (chip->id.len >= 6 && nand_is_slc(chip)) {
+ requirements.step_size = 512;
+ switch (chip->id.data[5] & 0x7) {
+ case 0x4:
+ requirements.strength = 1;
+ break;
+ case 0x5:
+ requirements.strength = 4;
+ break;
+ case 0x6:
+ requirements.strength = 8;
+ break;
+ default:
+ WARN(1, "Could not get ECC info");
+ requirements.step_size = 0;
+ break;
+ }
+ }
+
+ nanddev_set_ecc_requirements(base, &requirements);
+}
+
+static int
+tc58teg5dclta00_choose_interface_config(struct nand_chip *chip,
+ struct nand_interface_config *iface)
+{
+ onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 5);
+
+ return nand_choose_best_sdr_timings(chip, iface, NULL);
+}
+
+static int
+tc58nvg0s3e_choose_interface_config(struct nand_chip *chip,
+ struct nand_interface_config *iface)
+{
+ onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 2);
+
+ return nand_choose_best_sdr_timings(chip, iface, NULL);
+}
+
+static int
+th58nvg2s3hbai4_choose_interface_config(struct nand_chip *chip,
+ struct nand_interface_config *iface)
+{
+ struct nand_sdr_timings *sdr = &iface->timings.sdr;
+
+ /* Start with timings from the closest timing mode, mode 4. */
+ onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 4);
+
+ /* Patch timings that differ from mode 4. */
+ sdr->tALS_min = 12000;
+ sdr->tCHZ_max = 20000;
+ sdr->tCLS_min = 12000;
+ sdr->tCOH_min = 0;
+ sdr->tDS_min = 12000;
+ sdr->tRHOH_min = 25000;
+ sdr->tRHW_min = 30000;
+ sdr->tRHZ_max = 60000;
+ sdr->tWHR_min = 60000;
+
+ /* Patch timings not part of onfi timing mode. */
+ sdr->tPROG_max = 700000000;
+ sdr->tBERS_max = 5000000000;
+
+ return nand_choose_best_sdr_timings(chip, iface, sdr);
+}
+
+static int tc58teg5dclta00_init(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ chip->ops.choose_interface_config =
+ &tc58teg5dclta00_choose_interface_config;
+ chip->options |= NAND_NEED_SCRAMBLING;
+ mtd_set_pairing_scheme(mtd, &dist3_pairing_scheme);
+
+ return 0;
+}
+
+static int tc58nvg0s3e_init(struct nand_chip *chip)
+{
+ chip->ops.choose_interface_config =
+ &tc58nvg0s3e_choose_interface_config;
+
+ return 0;
+}
+
+static int th58nvg2s3hbai4_init(struct nand_chip *chip)
+{
+ chip->ops.choose_interface_config =
+ &th58nvg2s3hbai4_choose_interface_config;
+
+ return 0;
+}
+
+static int toshiba_nand_init(struct nand_chip *chip)
+{
+ if (nand_is_slc(chip))
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
+
+ /* Check that chip is BENAND and ECC mode is on-die */
+ if (nand_is_slc(chip) &&
+ chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE &&
+ chip->id.data[4] & TOSHIBA_NAND_ID4_IS_BENAND)
+ toshiba_nand_benand_init(chip);
+
+ if (!strcmp("TC58TEG5DCLTA00", chip->parameters.model))
+ tc58teg5dclta00_init(chip);
+ if (!strncmp("TC58NVG0S3E", chip->parameters.model,
+ sizeof("TC58NVG0S3E") - 1))
+ tc58nvg0s3e_init(chip);
+ if ((!strncmp("TH58NVG2S3HBAI4", chip->parameters.model,
+ sizeof("TH58NVG2S3HBAI4") - 1)) ||
+ (!strncmp("TH58NVG3S0HBAI4", chip->parameters.model,
+ sizeof("TH58NVG3S0HBAI4") - 1)))
+ th58nvg2s3hbai4_init(chip);
+
+ return 0;
+}
+
+const struct nand_manufacturer_ops toshiba_nand_manuf_ops = {
+ .detect = toshiba_nand_decode_id,
+ .init = toshiba_nand_init,
+};
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
new file mode 100644
index 0000000000..179b28459b
--- /dev/null
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -0,0 +1,2460 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * NAND flash simulator.
+ *
+ * Author: Artem B. Bityuckiy <dedekind@oktetlabs.ru>, <dedekind@infradead.org>
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ *
+ * Note: NS means "NAND Simulator".
+ * Note: Input means input TO flash chip, output means output FROM chip.
+ */
+
+#define pr_fmt(fmt) "[nandsim]" fmt
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
+#include <linux/math64.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/sched/mm.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+
+/* Default simulator parameters values */
+#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \
+ !defined(CONFIG_NANDSIM_SECOND_ID_BYTE) || \
+ !defined(CONFIG_NANDSIM_THIRD_ID_BYTE) || \
+ !defined(CONFIG_NANDSIM_FOURTH_ID_BYTE)
+#define CONFIG_NANDSIM_FIRST_ID_BYTE 0x98
+#define CONFIG_NANDSIM_SECOND_ID_BYTE 0x39
+#define CONFIG_NANDSIM_THIRD_ID_BYTE 0xFF /* No byte */
+#define CONFIG_NANDSIM_FOURTH_ID_BYTE 0xFF /* No byte */
+#endif
+
+#ifndef CONFIG_NANDSIM_ACCESS_DELAY
+#define CONFIG_NANDSIM_ACCESS_DELAY 25
+#endif
+#ifndef CONFIG_NANDSIM_PROGRAMM_DELAY
+#define CONFIG_NANDSIM_PROGRAMM_DELAY 200
+#endif
+#ifndef CONFIG_NANDSIM_ERASE_DELAY
+#define CONFIG_NANDSIM_ERASE_DELAY 2
+#endif
+#ifndef CONFIG_NANDSIM_OUTPUT_CYCLE
+#define CONFIG_NANDSIM_OUTPUT_CYCLE 40
+#endif
+#ifndef CONFIG_NANDSIM_INPUT_CYCLE
+#define CONFIG_NANDSIM_INPUT_CYCLE 50
+#endif
+#ifndef CONFIG_NANDSIM_BUS_WIDTH
+#define CONFIG_NANDSIM_BUS_WIDTH 8
+#endif
+#ifndef CONFIG_NANDSIM_DO_DELAYS
+#define CONFIG_NANDSIM_DO_DELAYS 0
+#endif
+#ifndef CONFIG_NANDSIM_LOG
+#define CONFIG_NANDSIM_LOG 0
+#endif
+#ifndef CONFIG_NANDSIM_DBG
+#define CONFIG_NANDSIM_DBG 0
+#endif
+#ifndef CONFIG_NANDSIM_MAX_PARTS
+#define CONFIG_NANDSIM_MAX_PARTS 32
+#endif
+
+static uint access_delay = CONFIG_NANDSIM_ACCESS_DELAY;
+static uint programm_delay = CONFIG_NANDSIM_PROGRAMM_DELAY;
+static uint erase_delay = CONFIG_NANDSIM_ERASE_DELAY;
+static uint output_cycle = CONFIG_NANDSIM_OUTPUT_CYCLE;
+static uint input_cycle = CONFIG_NANDSIM_INPUT_CYCLE;
+static uint bus_width = CONFIG_NANDSIM_BUS_WIDTH;
+static uint do_delays = CONFIG_NANDSIM_DO_DELAYS;
+static uint log = CONFIG_NANDSIM_LOG;
+static uint dbg = CONFIG_NANDSIM_DBG;
+static unsigned long parts[CONFIG_NANDSIM_MAX_PARTS];
+static unsigned int parts_num;
+static char *badblocks = NULL;
+static char *weakblocks = NULL;
+static char *weakpages = NULL;
+static unsigned int bitflips = 0;
+static char *gravepages = NULL;
+static unsigned int overridesize = 0;
+static char *cache_file = NULL;
+static unsigned int bbt;
+static unsigned int bch;
+static u_char id_bytes[8] = {
+ [0] = CONFIG_NANDSIM_FIRST_ID_BYTE,
+ [1] = CONFIG_NANDSIM_SECOND_ID_BYTE,
+ [2] = CONFIG_NANDSIM_THIRD_ID_BYTE,
+ [3] = CONFIG_NANDSIM_FOURTH_ID_BYTE,
+ [4 ... 7] = 0xFF,
+};
+
+module_param_array(id_bytes, byte, NULL, 0400);
+module_param_named(first_id_byte, id_bytes[0], byte, 0400);
+module_param_named(second_id_byte, id_bytes[1], byte, 0400);
+module_param_named(third_id_byte, id_bytes[2], byte, 0400);
+module_param_named(fourth_id_byte, id_bytes[3], byte, 0400);
+module_param(access_delay, uint, 0400);
+module_param(programm_delay, uint, 0400);
+module_param(erase_delay, uint, 0400);
+module_param(output_cycle, uint, 0400);
+module_param(input_cycle, uint, 0400);
+module_param(bus_width, uint, 0400);
+module_param(do_delays, uint, 0400);
+module_param(log, uint, 0400);
+module_param(dbg, uint, 0400);
+module_param_array(parts, ulong, &parts_num, 0400);
+module_param(badblocks, charp, 0400);
+module_param(weakblocks, charp, 0400);
+module_param(weakpages, charp, 0400);
+module_param(bitflips, uint, 0400);
+module_param(gravepages, charp, 0400);
+module_param(overridesize, uint, 0400);
+module_param(cache_file, charp, 0400);
+module_param(bbt, uint, 0400);
+module_param(bch, uint, 0400);
+
+MODULE_PARM_DESC(id_bytes, "The ID bytes returned by NAND Flash 'read ID' command");
+MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID) (obsolete)");
+MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID) (obsolete)");
+MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command (obsolete)");
+MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command (obsolete)");
+MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)");
+MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
+MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)");
+MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanoseconds)");
+MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanoseconds)");
+MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)");
+MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero");
+MODULE_PARM_DESC(log, "Perform logging if not zero");
+MODULE_PARM_DESC(dbg, "Output debug information if not zero");
+MODULE_PARM_DESC(parts, "Partition sizes (in erase blocks) separated by commas");
+/* Page and erase block positions for the following parameters are independent of any partitions */
+MODULE_PARM_DESC(badblocks, "Erase blocks that are initially marked bad, separated by commas");
+MODULE_PARM_DESC(weakblocks, "Weak erase blocks [: remaining erase cycles (defaults to 3)]"
+ " separated by commas e.g. 113:2 means eb 113"
+ " can be erased only twice before failing");
+MODULE_PARM_DESC(weakpages, "Weak pages [: maximum writes (defaults to 3)]"
+ " separated by commas e.g. 1401:2 means page 1401"
+ " can be written only twice before failing");
+MODULE_PARM_DESC(bitflips, "Maximum number of random bit flips per page (zero by default)");
+MODULE_PARM_DESC(gravepages, "Pages that lose data [: maximum reads (defaults to 3)]"
+ " separated by commas e.g. 1401:2 means page 1401"
+ " can be read only twice before failing");
+MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. "
+ "The size is specified in erase blocks and as the exponent of a power of two"
+ " e.g. 5 means a size of 32 erase blocks");
+MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory");
+MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in data area");
+MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
+ "be correctable in 512-byte blocks");
+
+/* The largest possible page size */
+#define NS_LARGEST_PAGE_SIZE 4096
+
+/* Simulator's output macros (logging, debugging, warning, error) */
+#define NS_LOG(args...) \
+ do { if (log) pr_debug(" log: " args); } while(0)
+#define NS_DBG(args...) \
+ do { if (dbg) pr_debug(" debug: " args); } while(0)
+#define NS_WARN(args...) \
+ do { pr_warn(" warning: " args); } while(0)
+#define NS_ERR(args...) \
+ do { pr_err(" error: " args); } while(0)
+#define NS_INFO(args...) \
+ do { pr_info(" " args); } while(0)
+
+/* Busy-wait delay macros (microseconds, milliseconds) */
+#define NS_UDELAY(us) \
+ do { if (do_delays) udelay(us); } while(0)
+#define NS_MDELAY(us) \
+ do { if (do_delays) mdelay(us); } while(0)
+
+/* Is the nandsim structure initialized ? */
+#define NS_IS_INITIALIZED(ns) ((ns)->geom.totsz != 0)
+
+/* Good operation completion status */
+#define NS_STATUS_OK(ns) (NAND_STATUS_READY | (NAND_STATUS_WP * ((ns)->lines.wp == 0)))
+
+/* Operation failed completion status */
+#define NS_STATUS_FAILED(ns) (NAND_STATUS_FAIL | NS_STATUS_OK(ns))
+
+/* Calculate the page offset in flash RAM image by (row, column) address */
+#define NS_RAW_OFFSET(ns) \
+ (((ns)->regs.row * (ns)->geom.pgszoob) + (ns)->regs.column)
+
+/* Calculate the OOB offset in flash RAM image by (row, column) address */
+#define NS_RAW_OFFSET_OOB(ns) (NS_RAW_OFFSET(ns) + ns->geom.pgsz)
+
+/* Calculate the byte shift in the next page to access */
+#define NS_PAGE_BYTE_SHIFT(ns) ((ns)->regs.column + (ns)->regs.off)
+
+/* After a command is input, the simulator goes to one of the following states */
+#define STATE_CMD_READ0 0x00000001 /* read data from the beginning of page */
+#define STATE_CMD_READ1 0x00000002 /* read data from the second half of page */
+#define STATE_CMD_READSTART 0x00000003 /* read data second command (large page devices) */
+#define STATE_CMD_PAGEPROG 0x00000004 /* start page program */
+#define STATE_CMD_READOOB 0x00000005 /* read OOB area */
+#define STATE_CMD_ERASE1 0x00000006 /* sector erase first command */
+#define STATE_CMD_STATUS 0x00000007 /* read status */
+#define STATE_CMD_SEQIN 0x00000009 /* sequential data input */
+#define STATE_CMD_READID 0x0000000A /* read ID */
+#define STATE_CMD_ERASE2 0x0000000B /* sector erase second command */
+#define STATE_CMD_RESET 0x0000000C /* reset */
+#define STATE_CMD_RNDOUT 0x0000000D /* random output command */
+#define STATE_CMD_RNDOUTSTART 0x0000000E /* random output start command */
+#define STATE_CMD_MASK 0x0000000F /* command states mask */
+
+/* After an address is input, the simulator goes to one of these states */
+#define STATE_ADDR_PAGE 0x00000010 /* full (row, column) address is accepted */
+#define STATE_ADDR_SEC 0x00000020 /* sector address was accepted */
+#define STATE_ADDR_COLUMN 0x00000030 /* column address was accepted */
+#define STATE_ADDR_ZERO 0x00000040 /* one byte zero address was accepted */
+#define STATE_ADDR_MASK 0x00000070 /* address states mask */
+
+/* During data input/output the simulator is in these states */
+#define STATE_DATAIN 0x00000100 /* waiting for data input */
+#define STATE_DATAIN_MASK 0x00000100 /* data input states mask */
+
+#define STATE_DATAOUT 0x00001000 /* waiting for page data output */
+#define STATE_DATAOUT_ID 0x00002000 /* waiting for ID bytes output */
+#define STATE_DATAOUT_STATUS 0x00003000 /* waiting for status output */
+#define STATE_DATAOUT_MASK 0x00007000 /* data output states mask */
+
+/* Previous operation is done, ready to accept new requests */
+#define STATE_READY 0x00000000
+
+/* This state is used to mark that the next state isn't known yet */
+#define STATE_UNKNOWN 0x10000000
+
+/* Simulator's actions bit masks */
+#define ACTION_CPY 0x00100000 /* copy page/OOB to the internal buffer */
+#define ACTION_PRGPAGE 0x00200000 /* program the internal buffer to flash */
+#define ACTION_SECERASE 0x00300000 /* erase sector */
+#define ACTION_ZEROOFF 0x00400000 /* don't add any offset to address */
+#define ACTION_HALFOFF 0x00500000 /* add to address half of page */
+#define ACTION_OOBOFF 0x00600000 /* add to address OOB offset */
+#define ACTION_MASK 0x00700000 /* action mask */
+
+#define NS_OPER_NUM 13 /* Number of operations supported by the simulator */
+#define NS_OPER_STATES 6 /* Maximum number of states in operation */
+
+#define OPT_ANY 0xFFFFFFFF /* any chip supports this operation */
+#define OPT_PAGE512 0x00000002 /* 512-byte page chips */
+#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */
+#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
+#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */
+#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
+#define OPT_SMALLPAGE (OPT_PAGE512) /* 512-byte page chips */
+
+/* Remove action bits from state */
+#define NS_STATE(x) ((x) & ~ACTION_MASK)
+
+/*
+ * Maximum previous states which need to be saved. Currently saving is
+ * only needed for page program operation with preceded read command
+ * (which is only valid for 512-byte pages).
+ */
+#define NS_MAX_PREVSTATES 1
+
+/* Maximum page cache pages needed to read or write a NAND page to the cache_file */
+#define NS_MAX_HELD_PAGES 16
+
+/*
+ * A union to represent flash memory contents and flash buffer.
+ */
+union ns_mem {
+ u_char *byte; /* for byte access */
+ uint16_t *word; /* for 16-bit word access */
+};
+
+/*
+ * The structure which describes all the internal simulator data.
+ */
+struct nandsim {
+ struct nand_chip chip;
+ struct nand_controller base;
+ struct mtd_partition partitions[CONFIG_NANDSIM_MAX_PARTS];
+ unsigned int nbparts;
+
+ uint busw; /* flash chip bus width (8 or 16) */
+ u_char ids[8]; /* chip's ID bytes */
+ uint32_t options; /* chip's characteristic bits */
+ uint32_t state; /* current chip state */
+ uint32_t nxstate; /* next expected state */
+
+ uint32_t *op; /* current operation, NULL operations isn't known yet */
+ uint32_t pstates[NS_MAX_PREVSTATES]; /* previous states */
+ uint16_t npstates; /* number of previous states saved */
+ uint16_t stateidx; /* current state index */
+
+ /* The simulated NAND flash pages array */
+ union ns_mem *pages;
+
+ /* Slab allocator for nand pages */
+ struct kmem_cache *nand_pages_slab;
+
+ /* Internal buffer of page + OOB size bytes */
+ union ns_mem buf;
+
+ /* NAND flash "geometry" */
+ struct {
+ uint64_t totsz; /* total flash size, bytes */
+ uint32_t secsz; /* flash sector (erase block) size, bytes */
+ uint pgsz; /* NAND flash page size, bytes */
+ uint oobsz; /* page OOB area size, bytes */
+ uint64_t totszoob; /* total flash size including OOB, bytes */
+ uint pgszoob; /* page size including OOB , bytes*/
+ uint secszoob; /* sector size including OOB, bytes */
+ uint pgnum; /* total number of pages */
+ uint pgsec; /* number of pages per sector */
+ uint secshift; /* bits number in sector size */
+ uint pgshift; /* bits number in page size */
+ uint pgaddrbytes; /* bytes per page address */
+ uint secaddrbytes; /* bytes per sector address */
+ uint idbytes; /* the number ID bytes that this chip outputs */
+ } geom;
+
+ /* NAND flash internal registers */
+ struct {
+ unsigned command; /* the command register */
+ u_char status; /* the status register */
+ uint row; /* the page number */
+ uint column; /* the offset within page */
+ uint count; /* internal counter */
+ uint num; /* number of bytes which must be processed */
+ uint off; /* fixed page offset */
+ } regs;
+
+ /* NAND flash lines state */
+ struct {
+ int ce; /* chip Enable */
+ int cle; /* command Latch Enable */
+ int ale; /* address Latch Enable */
+ int wp; /* write Protect */
+ } lines;
+
+ /* Fields needed when using a cache file */
+ struct file *cfile; /* Open file */
+ unsigned long *pages_written; /* Which pages have been written */
+ void *file_buf;
+ struct page *held_pages[NS_MAX_HELD_PAGES];
+ int held_cnt;
+
+ /* debugfs entry */
+ struct dentry *dent;
+};
+
+/*
+ * Operations array. To perform any operation the simulator must pass
+ * through the correspondent states chain.
+ */
+static struct nandsim_operations {
+ uint32_t reqopts; /* options which are required to perform the operation */
+ uint32_t states[NS_OPER_STATES]; /* operation's states */
+} ops[NS_OPER_NUM] = {
+ /* Read page + OOB from the beginning */
+ {OPT_SMALLPAGE, {STATE_CMD_READ0 | ACTION_ZEROOFF, STATE_ADDR_PAGE | ACTION_CPY,
+ STATE_DATAOUT, STATE_READY}},
+ /* Read page + OOB from the second half */
+ {OPT_PAGE512_8BIT, {STATE_CMD_READ1 | ACTION_HALFOFF, STATE_ADDR_PAGE | ACTION_CPY,
+ STATE_DATAOUT, STATE_READY}},
+ /* Read OOB */
+ {OPT_SMALLPAGE, {STATE_CMD_READOOB | ACTION_OOBOFF, STATE_ADDR_PAGE | ACTION_CPY,
+ STATE_DATAOUT, STATE_READY}},
+ /* Program page starting from the beginning */
+ {OPT_ANY, {STATE_CMD_SEQIN, STATE_ADDR_PAGE, STATE_DATAIN,
+ STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
+ /* Program page starting from the beginning */
+ {OPT_SMALLPAGE, {STATE_CMD_READ0, STATE_CMD_SEQIN | ACTION_ZEROOFF, STATE_ADDR_PAGE,
+ STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
+ /* Program page starting from the second half */
+ {OPT_PAGE512, {STATE_CMD_READ1, STATE_CMD_SEQIN | ACTION_HALFOFF, STATE_ADDR_PAGE,
+ STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
+ /* Program OOB */
+ {OPT_SMALLPAGE, {STATE_CMD_READOOB, STATE_CMD_SEQIN | ACTION_OOBOFF, STATE_ADDR_PAGE,
+ STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
+ /* Erase sector */
+ {OPT_ANY, {STATE_CMD_ERASE1, STATE_ADDR_SEC, STATE_CMD_ERASE2 | ACTION_SECERASE, STATE_READY}},
+ /* Read status */
+ {OPT_ANY, {STATE_CMD_STATUS, STATE_DATAOUT_STATUS, STATE_READY}},
+ /* Read ID */
+ {OPT_ANY, {STATE_CMD_READID, STATE_ADDR_ZERO, STATE_DATAOUT_ID, STATE_READY}},
+ /* Large page devices read page */
+ {OPT_LARGEPAGE, {STATE_CMD_READ0, STATE_ADDR_PAGE, STATE_CMD_READSTART | ACTION_CPY,
+ STATE_DATAOUT, STATE_READY}},
+ /* Large page devices random page read */
+ {OPT_LARGEPAGE, {STATE_CMD_RNDOUT, STATE_ADDR_COLUMN, STATE_CMD_RNDOUTSTART | ACTION_CPY,
+ STATE_DATAOUT, STATE_READY}},
+};
+
+struct weak_block {
+ struct list_head list;
+ unsigned int erase_block_no;
+ unsigned int max_erases;
+ unsigned int erases_done;
+};
+
+static LIST_HEAD(weak_blocks);
+
+struct weak_page {
+ struct list_head list;
+ unsigned int page_no;
+ unsigned int max_writes;
+ unsigned int writes_done;
+};
+
+static LIST_HEAD(weak_pages);
+
+struct grave_page {
+ struct list_head list;
+ unsigned int page_no;
+ unsigned int max_reads;
+ unsigned int reads_done;
+};
+
+static LIST_HEAD(grave_pages);
+
+static unsigned long *erase_block_wear = NULL;
+static unsigned int wear_eb_count = 0;
+static unsigned long total_wear = 0;
+
+/* MTD structure for NAND controller */
+static struct mtd_info *nsmtd;
+
+static int ns_show(struct seq_file *m, void *private)
+{
+ unsigned long wmin = -1, wmax = 0, avg;
+ unsigned long deciles[10], decile_max[10], tot = 0;
+ unsigned int i;
+
+ /* Calc wear stats */
+ for (i = 0; i < wear_eb_count; ++i) {
+ unsigned long wear = erase_block_wear[i];
+ if (wear < wmin)
+ wmin = wear;
+ if (wear > wmax)
+ wmax = wear;
+ tot += wear;
+ }
+
+ for (i = 0; i < 9; ++i) {
+ deciles[i] = 0;
+ decile_max[i] = (wmax * (i + 1) + 5) / 10;
+ }
+ deciles[9] = 0;
+ decile_max[9] = wmax;
+ for (i = 0; i < wear_eb_count; ++i) {
+ int d;
+ unsigned long wear = erase_block_wear[i];
+ for (d = 0; d < 10; ++d)
+ if (wear <= decile_max[d]) {
+ deciles[d] += 1;
+ break;
+ }
+ }
+ avg = tot / wear_eb_count;
+
+ /* Output wear report */
+ seq_printf(m, "Total numbers of erases: %lu\n", tot);
+ seq_printf(m, "Number of erase blocks: %u\n", wear_eb_count);
+ seq_printf(m, "Average number of erases: %lu\n", avg);
+ seq_printf(m, "Maximum number of erases: %lu\n", wmax);
+ seq_printf(m, "Minimum number of erases: %lu\n", wmin);
+ for (i = 0; i < 10; ++i) {
+ unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
+ if (from > decile_max[i])
+ continue;
+ seq_printf(m, "Number of ebs with erase counts from %lu to %lu : %lu\n",
+ from,
+ decile_max[i],
+ deciles[i]);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ns);
+
+/**
+ * ns_debugfs_create - initialize debugfs
+ * @ns: nandsim device description object
+ *
+ * This function creates all debugfs files for UBI device @ubi. Returns zero in
+ * case of success and a negative error code in case of failure.
+ */
+static int ns_debugfs_create(struct nandsim *ns)
+{
+ struct dentry *root = nsmtd->dbg.dfs_dir;
+
+ /*
+ * Just skip debugfs initialization when the debugfs directory is
+ * missing.
+ */
+ if (IS_ERR_OR_NULL(root)) {
+ if (IS_ENABLED(CONFIG_DEBUG_FS) &&
+ !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+ NS_WARN("CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n");
+ return 0;
+ }
+
+ ns->dent = debugfs_create_file("nandsim_wear_report", 0400, root, ns,
+ &ns_fops);
+ if (IS_ERR_OR_NULL(ns->dent)) {
+ NS_ERR("cannot create \"nandsim_wear_report\" debugfs entry\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void ns_debugfs_remove(struct nandsim *ns)
+{
+ debugfs_remove_recursive(ns->dent);
+}
+
+/*
+ * Allocate array of page pointers, create slab allocation for an array
+ * and initialize the array by NULL pointers.
+ *
+ * RETURNS: 0 if success, -ENOMEM if memory alloc fails.
+ */
+static int __init ns_alloc_device(struct nandsim *ns)
+{
+ struct file *cfile;
+ int i, err;
+
+ if (cache_file) {
+ cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600);
+ if (IS_ERR(cfile))
+ return PTR_ERR(cfile);
+ if (!(cfile->f_mode & FMODE_CAN_READ)) {
+ NS_ERR("alloc_device: cache file not readable\n");
+ err = -EINVAL;
+ goto err_close_filp;
+ }
+ if (!(cfile->f_mode & FMODE_CAN_WRITE)) {
+ NS_ERR("alloc_device: cache file not writeable\n");
+ err = -EINVAL;
+ goto err_close_filp;
+ }
+ ns->pages_written =
+ vzalloc(array_size(sizeof(unsigned long),
+ BITS_TO_LONGS(ns->geom.pgnum)));
+ if (!ns->pages_written) {
+ NS_ERR("alloc_device: unable to allocate pages written array\n");
+ err = -ENOMEM;
+ goto err_close_filp;
+ }
+ ns->file_buf = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
+ if (!ns->file_buf) {
+ NS_ERR("alloc_device: unable to allocate file buf\n");
+ err = -ENOMEM;
+ goto err_free_pw;
+ }
+ ns->cfile = cfile;
+
+ return 0;
+
+err_free_pw:
+ vfree(ns->pages_written);
+err_close_filp:
+ filp_close(cfile, NULL);
+
+ return err;
+ }
+
+ ns->pages = vmalloc(array_size(sizeof(union ns_mem), ns->geom.pgnum));
+ if (!ns->pages) {
+ NS_ERR("alloc_device: unable to allocate page array\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < ns->geom.pgnum; i++) {
+ ns->pages[i].byte = NULL;
+ }
+ ns->nand_pages_slab = kmem_cache_create("nandsim",
+ ns->geom.pgszoob, 0, 0, NULL);
+ if (!ns->nand_pages_slab) {
+ NS_ERR("cache_create: unable to create kmem_cache\n");
+ err = -ENOMEM;
+ goto err_free_pg;
+ }
+
+ return 0;
+
+err_free_pg:
+ vfree(ns->pages);
+
+ return err;
+}
+
+/*
+ * Free any allocated pages, and free the array of page pointers.
+ */
+static void ns_free_device(struct nandsim *ns)
+{
+ int i;
+
+ if (ns->cfile) {
+ kfree(ns->file_buf);
+ vfree(ns->pages_written);
+ filp_close(ns->cfile, NULL);
+ return;
+ }
+
+ if (ns->pages) {
+ for (i = 0; i < ns->geom.pgnum; i++) {
+ if (ns->pages[i].byte)
+ kmem_cache_free(ns->nand_pages_slab,
+ ns->pages[i].byte);
+ }
+ kmem_cache_destroy(ns->nand_pages_slab);
+ vfree(ns->pages);
+ }
+}
+
+static char __init *ns_get_partition_name(int i)
+{
+ return kasprintf(GFP_KERNEL, "NAND simulator partition %d", i);
+}
+
+/*
+ * Initialize the nandsim structure.
+ *
+ * RETURNS: 0 if success, -ERRNO if failure.
+ */
+static int __init ns_init(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nandsim *ns = nand_get_controller_data(chip);
+ int i, ret = 0;
+ uint64_t remains;
+ uint64_t next_offset;
+
+ if (NS_IS_INITIALIZED(ns)) {
+ NS_ERR("init_nandsim: nandsim is already initialized\n");
+ return -EIO;
+ }
+
+ /* Initialize the NAND flash parameters */
+ ns->busw = chip->options & NAND_BUSWIDTH_16 ? 16 : 8;
+ ns->geom.totsz = mtd->size;
+ ns->geom.pgsz = mtd->writesize;
+ ns->geom.oobsz = mtd->oobsize;
+ ns->geom.secsz = mtd->erasesize;
+ ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz;
+ ns->geom.pgnum = div_u64(ns->geom.totsz, ns->geom.pgsz);
+ ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
+ ns->geom.secshift = ffs(ns->geom.secsz) - 1;
+ ns->geom.pgshift = chip->page_shift;
+ ns->geom.pgsec = ns->geom.secsz / ns->geom.pgsz;
+ ns->geom.secszoob = ns->geom.secsz + ns->geom.oobsz * ns->geom.pgsec;
+ ns->options = 0;
+
+ if (ns->geom.pgsz == 512) {
+ ns->options |= OPT_PAGE512;
+ if (ns->busw == 8)
+ ns->options |= OPT_PAGE512_8BIT;
+ } else if (ns->geom.pgsz == 2048) {
+ ns->options |= OPT_PAGE2048;
+ } else if (ns->geom.pgsz == 4096) {
+ ns->options |= OPT_PAGE4096;
+ } else {
+ NS_ERR("init_nandsim: unknown page size %u\n", ns->geom.pgsz);
+ return -EIO;
+ }
+
+ if (ns->options & OPT_SMALLPAGE) {
+ if (ns->geom.totsz <= (32 << 20)) {
+ ns->geom.pgaddrbytes = 3;
+ ns->geom.secaddrbytes = 2;
+ } else {
+ ns->geom.pgaddrbytes = 4;
+ ns->geom.secaddrbytes = 3;
+ }
+ } else {
+ if (ns->geom.totsz <= (128 << 20)) {
+ ns->geom.pgaddrbytes = 4;
+ ns->geom.secaddrbytes = 2;
+ } else {
+ ns->geom.pgaddrbytes = 5;
+ ns->geom.secaddrbytes = 3;
+ }
+ }
+
+ /* Fill the partition_info structure */
+ if (parts_num > ARRAY_SIZE(ns->partitions)) {
+ NS_ERR("too many partitions.\n");
+ return -EINVAL;
+ }
+ remains = ns->geom.totsz;
+ next_offset = 0;
+ for (i = 0; i < parts_num; ++i) {
+ uint64_t part_sz = (uint64_t)parts[i] * ns->geom.secsz;
+
+ if (!part_sz || part_sz > remains) {
+ NS_ERR("bad partition size.\n");
+ return -EINVAL;
+ }
+ ns->partitions[i].name = ns_get_partition_name(i);
+ if (!ns->partitions[i].name) {
+ NS_ERR("unable to allocate memory.\n");
+ return -ENOMEM;
+ }
+ ns->partitions[i].offset = next_offset;
+ ns->partitions[i].size = part_sz;
+ next_offset += ns->partitions[i].size;
+ remains -= ns->partitions[i].size;
+ }
+ ns->nbparts = parts_num;
+ if (remains) {
+ if (parts_num + 1 > ARRAY_SIZE(ns->partitions)) {
+ NS_ERR("too many partitions.\n");
+ ret = -EINVAL;
+ goto free_partition_names;
+ }
+ ns->partitions[i].name = ns_get_partition_name(i);
+ if (!ns->partitions[i].name) {
+ NS_ERR("unable to allocate memory.\n");
+ ret = -ENOMEM;
+ goto free_partition_names;
+ }
+ ns->partitions[i].offset = next_offset;
+ ns->partitions[i].size = remains;
+ ns->nbparts += 1;
+ }
+
+ if (ns->busw == 16)
+ NS_WARN("16-bit flashes support wasn't tested\n");
+
+ printk("flash size: %llu MiB\n",
+ (unsigned long long)ns->geom.totsz >> 20);
+ printk("page size: %u bytes\n", ns->geom.pgsz);
+ printk("OOB area size: %u bytes\n", ns->geom.oobsz);
+ printk("sector size: %u KiB\n", ns->geom.secsz >> 10);
+ printk("pages number: %u\n", ns->geom.pgnum);
+ printk("pages per sector: %u\n", ns->geom.pgsec);
+ printk("bus width: %u\n", ns->busw);
+ printk("bits in sector size: %u\n", ns->geom.secshift);
+ printk("bits in page size: %u\n", ns->geom.pgshift);
+ printk("bits in OOB size: %u\n", ffs(ns->geom.oobsz) - 1);
+ printk("flash size with OOB: %llu KiB\n",
+ (unsigned long long)ns->geom.totszoob >> 10);
+ printk("page address bytes: %u\n", ns->geom.pgaddrbytes);
+ printk("sector address bytes: %u\n", ns->geom.secaddrbytes);
+ printk("options: %#x\n", ns->options);
+
+ ret = ns_alloc_device(ns);
+ if (ret)
+ goto free_partition_names;
+
+ /* Allocate / initialize the internal buffer */
+ ns->buf.byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
+ if (!ns->buf.byte) {
+ NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n",
+ ns->geom.pgszoob);
+ ret = -ENOMEM;
+ goto free_device;
+ }
+ memset(ns->buf.byte, 0xFF, ns->geom.pgszoob);
+
+ return 0;
+
+free_device:
+ ns_free_device(ns);
+free_partition_names:
+ for (i = 0; i < ARRAY_SIZE(ns->partitions); ++i)
+ kfree(ns->partitions[i].name);
+
+ return ret;
+}
+
+/*
+ * Free the nandsim structure.
+ */
+static void ns_free(struct nandsim *ns)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ns->partitions); ++i)
+ kfree(ns->partitions[i].name);
+
+ kfree(ns->buf.byte);
+ ns_free_device(ns);
+
+ return;
+}
+
+static int ns_parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
+{
+ char *w;
+ int zero_ok;
+ unsigned int erase_block_no;
+ loff_t offset;
+
+ if (!badblocks)
+ return 0;
+ w = badblocks;
+ do {
+ zero_ok = (*w == '0' ? 1 : 0);
+ erase_block_no = simple_strtoul(w, &w, 0);
+ if (!zero_ok && !erase_block_no) {
+ NS_ERR("invalid badblocks.\n");
+ return -EINVAL;
+ }
+ offset = (loff_t)erase_block_no * ns->geom.secsz;
+ if (mtd_block_markbad(mtd, offset)) {
+ NS_ERR("invalid badblocks.\n");
+ return -EINVAL;
+ }
+ if (*w == ',')
+ w += 1;
+ } while (*w);
+ return 0;
+}
+
+static int ns_parse_weakblocks(void)
+{
+ char *w;
+ int zero_ok;
+ unsigned int erase_block_no;
+ unsigned int max_erases;
+ struct weak_block *wb;
+
+ if (!weakblocks)
+ return 0;
+ w = weakblocks;
+ do {
+ zero_ok = (*w == '0' ? 1 : 0);
+ erase_block_no = simple_strtoul(w, &w, 0);
+ if (!zero_ok && !erase_block_no) {
+ NS_ERR("invalid weakblocks.\n");
+ return -EINVAL;
+ }
+ max_erases = 3;
+ if (*w == ':') {
+ w += 1;
+ max_erases = simple_strtoul(w, &w, 0);
+ }
+ if (*w == ',')
+ w += 1;
+ wb = kzalloc(sizeof(*wb), GFP_KERNEL);
+ if (!wb) {
+ NS_ERR("unable to allocate memory.\n");
+ return -ENOMEM;
+ }
+ wb->erase_block_no = erase_block_no;
+ wb->max_erases = max_erases;
+ list_add(&wb->list, &weak_blocks);
+ } while (*w);
+ return 0;
+}
+
+static int ns_erase_error(unsigned int erase_block_no)
+{
+ struct weak_block *wb;
+
+ list_for_each_entry(wb, &weak_blocks, list)
+ if (wb->erase_block_no == erase_block_no) {
+ if (wb->erases_done >= wb->max_erases)
+ return 1;
+ wb->erases_done += 1;
+ return 0;
+ }
+ return 0;
+}
+
+static int ns_parse_weakpages(void)
+{
+ char *w;
+ int zero_ok;
+ unsigned int page_no;
+ unsigned int max_writes;
+ struct weak_page *wp;
+
+ if (!weakpages)
+ return 0;
+ w = weakpages;
+ do {
+ zero_ok = (*w == '0' ? 1 : 0);
+ page_no = simple_strtoul(w, &w, 0);
+ if (!zero_ok && !page_no) {
+ NS_ERR("invalid weakpages.\n");
+ return -EINVAL;
+ }
+ max_writes = 3;
+ if (*w == ':') {
+ w += 1;
+ max_writes = simple_strtoul(w, &w, 0);
+ }
+ if (*w == ',')
+ w += 1;
+ wp = kzalloc(sizeof(*wp), GFP_KERNEL);
+ if (!wp) {
+ NS_ERR("unable to allocate memory.\n");
+ return -ENOMEM;
+ }
+ wp->page_no = page_no;
+ wp->max_writes = max_writes;
+ list_add(&wp->list, &weak_pages);
+ } while (*w);
+ return 0;
+}
+
+static int ns_write_error(unsigned int page_no)
+{
+ struct weak_page *wp;
+
+ list_for_each_entry(wp, &weak_pages, list)
+ if (wp->page_no == page_no) {
+ if (wp->writes_done >= wp->max_writes)
+ return 1;
+ wp->writes_done += 1;
+ return 0;
+ }
+ return 0;
+}
+
+static int ns_parse_gravepages(void)
+{
+ char *g;
+ int zero_ok;
+ unsigned int page_no;
+ unsigned int max_reads;
+ struct grave_page *gp;
+
+ if (!gravepages)
+ return 0;
+ g = gravepages;
+ do {
+ zero_ok = (*g == '0' ? 1 : 0);
+ page_no = simple_strtoul(g, &g, 0);
+ if (!zero_ok && !page_no) {
+ NS_ERR("invalid gravepagess.\n");
+ return -EINVAL;
+ }
+ max_reads = 3;
+ if (*g == ':') {
+ g += 1;
+ max_reads = simple_strtoul(g, &g, 0);
+ }
+ if (*g == ',')
+ g += 1;
+ gp = kzalloc(sizeof(*gp), GFP_KERNEL);
+ if (!gp) {
+ NS_ERR("unable to allocate memory.\n");
+ return -ENOMEM;
+ }
+ gp->page_no = page_no;
+ gp->max_reads = max_reads;
+ list_add(&gp->list, &grave_pages);
+ } while (*g);
+ return 0;
+}
+
+static int ns_read_error(unsigned int page_no)
+{
+ struct grave_page *gp;
+
+ list_for_each_entry(gp, &grave_pages, list)
+ if (gp->page_no == page_no) {
+ if (gp->reads_done >= gp->max_reads)
+ return 1;
+ gp->reads_done += 1;
+ return 0;
+ }
+ return 0;
+}
+
+static int ns_setup_wear_reporting(struct mtd_info *mtd)
+{
+ wear_eb_count = div_u64(mtd->size, mtd->erasesize);
+ erase_block_wear = kcalloc(wear_eb_count, sizeof(unsigned long), GFP_KERNEL);
+ if (!erase_block_wear) {
+ NS_ERR("Too many erase blocks for wear reporting\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void ns_update_wear(unsigned int erase_block_no)
+{
+ if (!erase_block_wear)
+ return;
+ total_wear += 1;
+ /*
+ * TODO: Notify this through a debugfs entry,
+ * instead of showing an error message.
+ */
+ if (total_wear == 0)
+ NS_ERR("Erase counter total overflow\n");
+ erase_block_wear[erase_block_no] += 1;
+ if (erase_block_wear[erase_block_no] == 0)
+ NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no);
+}
+
+/*
+ * Returns the string representation of 'state' state.
+ */
+static char *ns_get_state_name(uint32_t state)
+{
+ switch (NS_STATE(state)) {
+ case STATE_CMD_READ0:
+ return "STATE_CMD_READ0";
+ case STATE_CMD_READ1:
+ return "STATE_CMD_READ1";
+ case STATE_CMD_PAGEPROG:
+ return "STATE_CMD_PAGEPROG";
+ case STATE_CMD_READOOB:
+ return "STATE_CMD_READOOB";
+ case STATE_CMD_READSTART:
+ return "STATE_CMD_READSTART";
+ case STATE_CMD_ERASE1:
+ return "STATE_CMD_ERASE1";
+ case STATE_CMD_STATUS:
+ return "STATE_CMD_STATUS";
+ case STATE_CMD_SEQIN:
+ return "STATE_CMD_SEQIN";
+ case STATE_CMD_READID:
+ return "STATE_CMD_READID";
+ case STATE_CMD_ERASE2:
+ return "STATE_CMD_ERASE2";
+ case STATE_CMD_RESET:
+ return "STATE_CMD_RESET";
+ case STATE_CMD_RNDOUT:
+ return "STATE_CMD_RNDOUT";
+ case STATE_CMD_RNDOUTSTART:
+ return "STATE_CMD_RNDOUTSTART";
+ case STATE_ADDR_PAGE:
+ return "STATE_ADDR_PAGE";
+ case STATE_ADDR_SEC:
+ return "STATE_ADDR_SEC";
+ case STATE_ADDR_ZERO:
+ return "STATE_ADDR_ZERO";
+ case STATE_ADDR_COLUMN:
+ return "STATE_ADDR_COLUMN";
+ case STATE_DATAIN:
+ return "STATE_DATAIN";
+ case STATE_DATAOUT:
+ return "STATE_DATAOUT";
+ case STATE_DATAOUT_ID:
+ return "STATE_DATAOUT_ID";
+ case STATE_DATAOUT_STATUS:
+ return "STATE_DATAOUT_STATUS";
+ case STATE_READY:
+ return "STATE_READY";
+ case STATE_UNKNOWN:
+ return "STATE_UNKNOWN";
+ }
+
+ NS_ERR("get_state_name: unknown state, BUG\n");
+ return NULL;
+}
+
+/*
+ * Check if command is valid.
+ *
+ * RETURNS: 1 if wrong command, 0 if right.
+ */
+static int ns_check_command(int cmd)
+{
+ switch (cmd) {
+
+ case NAND_CMD_READ0:
+ case NAND_CMD_READ1:
+ case NAND_CMD_READSTART:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_READOOB:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_READID:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_RESET:
+ case NAND_CMD_RNDOUT:
+ case NAND_CMD_RNDOUTSTART:
+ return 0;
+
+ default:
+ return 1;
+ }
+}
+
+/*
+ * Returns state after command is accepted by command number.
+ */
+static uint32_t ns_get_state_by_command(unsigned command)
+{
+ switch (command) {
+ case NAND_CMD_READ0:
+ return STATE_CMD_READ0;
+ case NAND_CMD_READ1:
+ return STATE_CMD_READ1;
+ case NAND_CMD_PAGEPROG:
+ return STATE_CMD_PAGEPROG;
+ case NAND_CMD_READSTART:
+ return STATE_CMD_READSTART;
+ case NAND_CMD_READOOB:
+ return STATE_CMD_READOOB;
+ case NAND_CMD_ERASE1:
+ return STATE_CMD_ERASE1;
+ case NAND_CMD_STATUS:
+ return STATE_CMD_STATUS;
+ case NAND_CMD_SEQIN:
+ return STATE_CMD_SEQIN;
+ case NAND_CMD_READID:
+ return STATE_CMD_READID;
+ case NAND_CMD_ERASE2:
+ return STATE_CMD_ERASE2;
+ case NAND_CMD_RESET:
+ return STATE_CMD_RESET;
+ case NAND_CMD_RNDOUT:
+ return STATE_CMD_RNDOUT;
+ case NAND_CMD_RNDOUTSTART:
+ return STATE_CMD_RNDOUTSTART;
+ }
+
+ NS_ERR("get_state_by_command: unknown command, BUG\n");
+ return 0;
+}
+
+/*
+ * Move an address byte to the correspondent internal register.
+ */
+static inline void ns_accept_addr_byte(struct nandsim *ns, u_char bt)
+{
+ uint byte = (uint)bt;
+
+ if (ns->regs.count < (ns->geom.pgaddrbytes - ns->geom.secaddrbytes))
+ ns->regs.column |= (byte << 8 * ns->regs.count);
+ else {
+ ns->regs.row |= (byte << 8 * (ns->regs.count -
+ ns->geom.pgaddrbytes +
+ ns->geom.secaddrbytes));
+ }
+
+ return;
+}
+
+/*
+ * Switch to STATE_READY state.
+ */
+static inline void ns_switch_to_ready_state(struct nandsim *ns, u_char status)
+{
+ NS_DBG("switch_to_ready_state: switch to %s state\n",
+ ns_get_state_name(STATE_READY));
+
+ ns->state = STATE_READY;
+ ns->nxstate = STATE_UNKNOWN;
+ ns->op = NULL;
+ ns->npstates = 0;
+ ns->stateidx = 0;
+ ns->regs.num = 0;
+ ns->regs.count = 0;
+ ns->regs.off = 0;
+ ns->regs.row = 0;
+ ns->regs.column = 0;
+ ns->regs.status = status;
+}
+
+/*
+ * If the operation isn't known yet, try to find it in the global array
+ * of supported operations.
+ *
+ * Operation can be unknown because of the following.
+ * 1. New command was accepted and this is the first call to find the
+ * correspondent states chain. In this case ns->npstates = 0;
+ * 2. There are several operations which begin with the same command(s)
+ * (for example program from the second half and read from the
+ * second half operations both begin with the READ1 command). In this
+ * case the ns->pstates[] array contains previous states.
+ *
+ * Thus, the function tries to find operation containing the following
+ * states (if the 'flag' parameter is 0):
+ * ns->pstates[0], ... ns->pstates[ns->npstates], ns->state
+ *
+ * If (one and only one) matching operation is found, it is accepted (
+ * ns->ops, ns->state, ns->nxstate are initialized, ns->npstate is
+ * zeroed).
+ *
+ * If there are several matches, the current state is pushed to the
+ * ns->pstates.
+ *
+ * The operation can be unknown only while commands are input to the chip.
+ * As soon as address command is accepted, the operation must be known.
+ * In such situation the function is called with 'flag' != 0, and the
+ * operation is searched using the following pattern:
+ * ns->pstates[0], ... ns->pstates[ns->npstates], <address input>
+ *
+ * It is supposed that this pattern must either match one operation or
+ * none. There can't be ambiguity in that case.
+ *
+ * If no matches found, the function does the following:
+ * 1. if there are saved states present, try to ignore them and search
+ * again only using the last command. If nothing was found, switch
+ * to the STATE_READY state.
+ * 2. if there are no saved states, switch to the STATE_READY state.
+ *
+ * RETURNS: -2 - no matched operations found.
+ * -1 - several matches.
+ * 0 - operation is found.
+ */
+static int ns_find_operation(struct nandsim *ns, uint32_t flag)
+{
+ int opsfound = 0;
+ int i, j, idx = 0;
+
+ for (i = 0; i < NS_OPER_NUM; i++) {
+
+ int found = 1;
+
+ if (!(ns->options & ops[i].reqopts))
+ /* Ignore operations we can't perform */
+ continue;
+
+ if (flag) {
+ if (!(ops[i].states[ns->npstates] & STATE_ADDR_MASK))
+ continue;
+ } else {
+ if (NS_STATE(ns->state) != NS_STATE(ops[i].states[ns->npstates]))
+ continue;
+ }
+
+ for (j = 0; j < ns->npstates; j++)
+ if (NS_STATE(ops[i].states[j]) != NS_STATE(ns->pstates[j])
+ && (ns->options & ops[idx].reqopts)) {
+ found = 0;
+ break;
+ }
+
+ if (found) {
+ idx = i;
+ opsfound += 1;
+ }
+ }
+
+ if (opsfound == 1) {
+ /* Exact match */
+ ns->op = &ops[idx].states[0];
+ if (flag) {
+ /*
+ * In this case the find_operation function was
+ * called when address has just began input. But it isn't
+ * yet fully input and the current state must
+ * not be one of STATE_ADDR_*, but the STATE_ADDR_*
+ * state must be the next state (ns->nxstate).
+ */
+ ns->stateidx = ns->npstates - 1;
+ } else {
+ ns->stateidx = ns->npstates;
+ }
+ ns->npstates = 0;
+ ns->state = ns->op[ns->stateidx];
+ ns->nxstate = ns->op[ns->stateidx + 1];
+ NS_DBG("find_operation: operation found, index: %d, state: %s, nxstate %s\n",
+ idx, ns_get_state_name(ns->state),
+ ns_get_state_name(ns->nxstate));
+ return 0;
+ }
+
+ if (opsfound == 0) {
+ /* Nothing was found. Try to ignore previous commands (if any) and search again */
+ if (ns->npstates != 0) {
+ NS_DBG("find_operation: no operation found, try again with state %s\n",
+ ns_get_state_name(ns->state));
+ ns->npstates = 0;
+ return ns_find_operation(ns, 0);
+
+ }
+ NS_DBG("find_operation: no operations found\n");
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return -2;
+ }
+
+ if (flag) {
+ /* This shouldn't happen */
+ NS_DBG("find_operation: BUG, operation must be known if address is input\n");
+ return -2;
+ }
+
+ NS_DBG("find_operation: there is still ambiguity\n");
+
+ ns->pstates[ns->npstates++] = ns->state;
+
+ return -1;
+}
+
+static void ns_put_pages(struct nandsim *ns)
+{
+ int i;
+
+ for (i = 0; i < ns->held_cnt; i++)
+ put_page(ns->held_pages[i]);
+}
+
+/* Get page cache pages in advance to provide NOFS memory allocation */
+static int ns_get_pages(struct nandsim *ns, struct file *file, size_t count,
+ loff_t pos)
+{
+ pgoff_t index, start_index, end_index;
+ struct page *page;
+ struct address_space *mapping = file->f_mapping;
+
+ start_index = pos >> PAGE_SHIFT;
+ end_index = (pos + count - 1) >> PAGE_SHIFT;
+ if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
+ return -EINVAL;
+ ns->held_cnt = 0;
+ for (index = start_index; index <= end_index; index++) {
+ page = find_get_page(mapping, index);
+ if (page == NULL) {
+ page = find_or_create_page(mapping, index, GFP_NOFS);
+ if (page == NULL) {
+ write_inode_now(mapping->host, 1);
+ page = find_or_create_page(mapping, index, GFP_NOFS);
+ }
+ if (page == NULL) {
+ ns_put_pages(ns);
+ return -ENOMEM;
+ }
+ unlock_page(page);
+ }
+ ns->held_pages[ns->held_cnt++] = page;
+ }
+ return 0;
+}
+
+static ssize_t ns_read_file(struct nandsim *ns, struct file *file, void *buf,
+ size_t count, loff_t pos)
+{
+ ssize_t tx;
+ int err;
+ unsigned int noreclaim_flag;
+
+ err = ns_get_pages(ns, file, count, pos);
+ if (err)
+ return err;
+ noreclaim_flag = memalloc_noreclaim_save();
+ tx = kernel_read(file, buf, count, &pos);
+ memalloc_noreclaim_restore(noreclaim_flag);
+ ns_put_pages(ns);
+ return tx;
+}
+
+static ssize_t ns_write_file(struct nandsim *ns, struct file *file, void *buf,
+ size_t count, loff_t pos)
+{
+ ssize_t tx;
+ int err;
+ unsigned int noreclaim_flag;
+
+ err = ns_get_pages(ns, file, count, pos);
+ if (err)
+ return err;
+ noreclaim_flag = memalloc_noreclaim_save();
+ tx = kernel_write(file, buf, count, &pos);
+ memalloc_noreclaim_restore(noreclaim_flag);
+ ns_put_pages(ns);
+ return tx;
+}
+
+/*
+ * Returns a pointer to the current page.
+ */
+static inline union ns_mem *NS_GET_PAGE(struct nandsim *ns)
+{
+ return &(ns->pages[ns->regs.row]);
+}
+
+/*
+ * Retuns a pointer to the current byte, within the current page.
+ */
+static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
+{
+ return NS_GET_PAGE(ns)->byte + NS_PAGE_BYTE_SHIFT(ns);
+}
+
+static int ns_do_read_error(struct nandsim *ns, int num)
+{
+ unsigned int page_no = ns->regs.row;
+
+ if (ns_read_error(page_no)) {
+ get_random_bytes(ns->buf.byte, num);
+ NS_WARN("simulating read error in page %u\n", page_no);
+ return 1;
+ }
+ return 0;
+}
+
+static void ns_do_bit_flips(struct nandsim *ns, int num)
+{
+ if (bitflips && get_random_u16() < (1 << 6)) {
+ int flips = 1;
+ if (bitflips > 1)
+ flips = get_random_u32_inclusive(1, bitflips);
+ while (flips--) {
+ int pos = get_random_u32_below(num * 8);
+ ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
+ NS_WARN("read_page: flipping bit %d in page %d "
+ "reading from %d ecc: corrected=%u failed=%u\n",
+ pos, ns->regs.row, NS_PAGE_BYTE_SHIFT(ns),
+ nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed);
+ }
+ }
+}
+
+/*
+ * Fill the NAND buffer with data read from the specified page.
+ */
+static void ns_read_page(struct nandsim *ns, int num)
+{
+ union ns_mem *mypage;
+
+ if (ns->cfile) {
+ if (!test_bit(ns->regs.row, ns->pages_written)) {
+ NS_DBG("read_page: page %d not written\n", ns->regs.row);
+ memset(ns->buf.byte, 0xFF, num);
+ } else {
+ loff_t pos;
+ ssize_t tx;
+
+ NS_DBG("read_page: page %d written, reading from %d\n",
+ ns->regs.row, NS_PAGE_BYTE_SHIFT(ns));
+ if (ns_do_read_error(ns, num))
+ return;
+ pos = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
+ tx = ns_read_file(ns, ns->cfile, ns->buf.byte, num,
+ pos);
+ if (tx != num) {
+ NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
+ return;
+ }
+ ns_do_bit_flips(ns, num);
+ }
+ return;
+ }
+
+ mypage = NS_GET_PAGE(ns);
+ if (mypage->byte == NULL) {
+ NS_DBG("read_page: page %d not allocated\n", ns->regs.row);
+ memset(ns->buf.byte, 0xFF, num);
+ } else {
+ NS_DBG("read_page: page %d allocated, reading from %d\n",
+ ns->regs.row, NS_PAGE_BYTE_SHIFT(ns));
+ if (ns_do_read_error(ns, num))
+ return;
+ memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num);
+ ns_do_bit_flips(ns, num);
+ }
+}
+
+/*
+ * Erase all pages in the specified sector.
+ */
+static void ns_erase_sector(struct nandsim *ns)
+{
+ union ns_mem *mypage;
+ int i;
+
+ if (ns->cfile) {
+ for (i = 0; i < ns->geom.pgsec; i++)
+ if (__test_and_clear_bit(ns->regs.row + i,
+ ns->pages_written)) {
+ NS_DBG("erase_sector: freeing page %d\n", ns->regs.row + i);
+ }
+ return;
+ }
+
+ mypage = NS_GET_PAGE(ns);
+ for (i = 0; i < ns->geom.pgsec; i++) {
+ if (mypage->byte != NULL) {
+ NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i);
+ kmem_cache_free(ns->nand_pages_slab, mypage->byte);
+ mypage->byte = NULL;
+ }
+ mypage++;
+ }
+}
+
+/*
+ * Program the specified page with the contents from the NAND buffer.
+ */
+static int ns_prog_page(struct nandsim *ns, int num)
+{
+ int i;
+ union ns_mem *mypage;
+ u_char *pg_off;
+
+ if (ns->cfile) {
+ loff_t off;
+ ssize_t tx;
+ int all;
+
+ NS_DBG("prog_page: writing page %d\n", ns->regs.row);
+ pg_off = ns->file_buf + NS_PAGE_BYTE_SHIFT(ns);
+ off = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
+ if (!test_bit(ns->regs.row, ns->pages_written)) {
+ all = 1;
+ memset(ns->file_buf, 0xff, ns->geom.pgszoob);
+ } else {
+ all = 0;
+ tx = ns_read_file(ns, ns->cfile, pg_off, num, off);
+ if (tx != num) {
+ NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
+ return -1;
+ }
+ }
+ for (i = 0; i < num; i++)
+ pg_off[i] &= ns->buf.byte[i];
+ if (all) {
+ loff_t pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
+ tx = ns_write_file(ns, ns->cfile, ns->file_buf,
+ ns->geom.pgszoob, pos);
+ if (tx != ns->geom.pgszoob) {
+ NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
+ return -1;
+ }
+ __set_bit(ns->regs.row, ns->pages_written);
+ } else {
+ tx = ns_write_file(ns, ns->cfile, pg_off, num, off);
+ if (tx != num) {
+ NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
+ return -1;
+ }
+ }
+ return 0;
+ }
+
+ mypage = NS_GET_PAGE(ns);
+ if (mypage->byte == NULL) {
+ NS_DBG("prog_page: allocating page %d\n", ns->regs.row);
+ /*
+ * We allocate memory with GFP_NOFS because a flash FS may
+ * utilize this. If it is holding an FS lock, then gets here,
+ * then kernel memory alloc runs writeback which goes to the FS
+ * again and deadlocks. This was seen in practice.
+ */
+ mypage->byte = kmem_cache_alloc(ns->nand_pages_slab, GFP_NOFS);
+ if (mypage->byte == NULL) {
+ NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row);
+ return -1;
+ }
+ memset(mypage->byte, 0xFF, ns->geom.pgszoob);
+ }
+
+ pg_off = NS_PAGE_BYTE_OFF(ns);
+ for (i = 0; i < num; i++)
+ pg_off[i] &= ns->buf.byte[i];
+
+ return 0;
+}
+
+/*
+ * If state has any action bit, perform this action.
+ *
+ * RETURNS: 0 if success, -1 if error.
+ */
+static int ns_do_state_action(struct nandsim *ns, uint32_t action)
+{
+ int num;
+ int busdiv = ns->busw == 8 ? 1 : 2;
+ unsigned int erase_block_no, page_no;
+
+ action &= ACTION_MASK;
+
+ /* Check that page address input is correct */
+ if (action != ACTION_SECERASE && ns->regs.row >= ns->geom.pgnum) {
+ NS_WARN("do_state_action: wrong page number (%#x)\n", ns->regs.row);
+ return -1;
+ }
+
+ switch (action) {
+
+ case ACTION_CPY:
+ /*
+ * Copy page data to the internal buffer.
+ */
+
+ /* Column shouldn't be very large */
+ if (ns->regs.column >= (ns->geom.pgszoob - ns->regs.off)) {
+ NS_ERR("do_state_action: column number is too large\n");
+ break;
+ }
+ num = ns->geom.pgszoob - NS_PAGE_BYTE_SHIFT(ns);
+ ns_read_page(ns, num);
+
+ NS_DBG("do_state_action: (ACTION_CPY:) copy %d bytes to int buf, raw offset %d\n",
+ num, NS_RAW_OFFSET(ns) + ns->regs.off);
+
+ if (ns->regs.off == 0)
+ NS_LOG("read page %d\n", ns->regs.row);
+ else if (ns->regs.off < ns->geom.pgsz)
+ NS_LOG("read page %d (second half)\n", ns->regs.row);
+ else
+ NS_LOG("read OOB of page %d\n", ns->regs.row);
+
+ NS_UDELAY(access_delay);
+ NS_UDELAY(input_cycle * ns->geom.pgsz / 1000 / busdiv);
+
+ break;
+
+ case ACTION_SECERASE:
+ /*
+ * Erase sector.
+ */
+
+ if (ns->lines.wp) {
+ NS_ERR("do_state_action: device is write-protected, ignore sector erase\n");
+ return -1;
+ }
+
+ if (ns->regs.row >= ns->geom.pgnum - ns->geom.pgsec
+ || (ns->regs.row & ~(ns->geom.secsz - 1))) {
+ NS_ERR("do_state_action: wrong sector address (%#x)\n", ns->regs.row);
+ return -1;
+ }
+
+ ns->regs.row = (ns->regs.row <<
+ 8 * (ns->geom.pgaddrbytes - ns->geom.secaddrbytes)) | ns->regs.column;
+ ns->regs.column = 0;
+
+ erase_block_no = ns->regs.row >> (ns->geom.secshift - ns->geom.pgshift);
+
+ NS_DBG("do_state_action: erase sector at address %#x, off = %d\n",
+ ns->regs.row, NS_RAW_OFFSET(ns));
+ NS_LOG("erase sector %u\n", erase_block_no);
+
+ ns_erase_sector(ns);
+
+ NS_MDELAY(erase_delay);
+
+ if (erase_block_wear)
+ ns_update_wear(erase_block_no);
+
+ if (ns_erase_error(erase_block_no)) {
+ NS_WARN("simulating erase failure in erase block %u\n", erase_block_no);
+ return -1;
+ }
+
+ break;
+
+ case ACTION_PRGPAGE:
+ /*
+ * Program page - move internal buffer data to the page.
+ */
+
+ if (ns->lines.wp) {
+ NS_WARN("do_state_action: device is write-protected, programm\n");
+ return -1;
+ }
+
+ num = ns->geom.pgszoob - NS_PAGE_BYTE_SHIFT(ns);
+ if (num != ns->regs.count) {
+ NS_ERR("do_state_action: too few bytes were input (%d instead of %d)\n",
+ ns->regs.count, num);
+ return -1;
+ }
+
+ if (ns_prog_page(ns, num) == -1)
+ return -1;
+
+ page_no = ns->regs.row;
+
+ NS_DBG("do_state_action: copy %d bytes from int buf to (%#x, %#x), raw off = %d\n",
+ num, ns->regs.row, ns->regs.column, NS_RAW_OFFSET(ns) + ns->regs.off);
+ NS_LOG("programm page %d\n", ns->regs.row);
+
+ NS_UDELAY(programm_delay);
+ NS_UDELAY(output_cycle * ns->geom.pgsz / 1000 / busdiv);
+
+ if (ns_write_error(page_no)) {
+ NS_WARN("simulating write failure in page %u\n", page_no);
+ return -1;
+ }
+
+ break;
+
+ case ACTION_ZEROOFF:
+ NS_DBG("do_state_action: set internal offset to 0\n");
+ ns->regs.off = 0;
+ break;
+
+ case ACTION_HALFOFF:
+ if (!(ns->options & OPT_PAGE512_8BIT)) {
+ NS_ERR("do_state_action: BUG! can't skip half of page for non-512"
+ "byte page size 8x chips\n");
+ return -1;
+ }
+ NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz/2);
+ ns->regs.off = ns->geom.pgsz/2;
+ break;
+
+ case ACTION_OOBOFF:
+ NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz);
+ ns->regs.off = ns->geom.pgsz;
+ break;
+
+ default:
+ NS_DBG("do_state_action: BUG! unknown action\n");
+ }
+
+ return 0;
+}
+
+/*
+ * Switch simulator's state.
+ */
+static void ns_switch_state(struct nandsim *ns)
+{
+ if (ns->op) {
+ /*
+ * The current operation have already been identified.
+ * Just follow the states chain.
+ */
+
+ ns->stateidx += 1;
+ ns->state = ns->nxstate;
+ ns->nxstate = ns->op[ns->stateidx + 1];
+
+ NS_DBG("switch_state: operation is known, switch to the next state, "
+ "state: %s, nxstate: %s\n",
+ ns_get_state_name(ns->state),
+ ns_get_state_name(ns->nxstate));
+ } else {
+ /*
+ * We don't yet know which operation we perform.
+ * Try to identify it.
+ */
+
+ /*
+ * The only event causing the switch_state function to
+ * be called with yet unknown operation is new command.
+ */
+ ns->state = ns_get_state_by_command(ns->regs.command);
+
+ NS_DBG("switch_state: operation is unknown, try to find it\n");
+
+ if (ns_find_operation(ns, 0))
+ return;
+ }
+
+ /* See, whether we need to do some action */
+ if ((ns->state & ACTION_MASK) &&
+ ns_do_state_action(ns, ns->state) < 0) {
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ /* For 16x devices column means the page offset in words */
+ if ((ns->nxstate & STATE_ADDR_MASK) && ns->busw == 16) {
+ NS_DBG("switch_state: double the column number for 16x device\n");
+ ns->regs.column <<= 1;
+ }
+
+ if (NS_STATE(ns->nxstate) == STATE_READY) {
+ /*
+ * The current state is the last. Return to STATE_READY
+ */
+
+ u_char status = NS_STATUS_OK(ns);
+
+ /* In case of data states, see if all bytes were input/output */
+ if ((ns->state & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK))
+ && ns->regs.count != ns->regs.num) {
+ NS_WARN("switch_state: not all bytes were processed, %d left\n",
+ ns->regs.num - ns->regs.count);
+ status = NS_STATUS_FAILED(ns);
+ }
+
+ NS_DBG("switch_state: operation complete, switch to STATE_READY state\n");
+
+ ns_switch_to_ready_state(ns, status);
+
+ return;
+ } else if (ns->nxstate & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK)) {
+ /*
+ * If the next state is data input/output, switch to it now
+ */
+
+ ns->state = ns->nxstate;
+ ns->nxstate = ns->op[++ns->stateidx + 1];
+ ns->regs.num = ns->regs.count = 0;
+
+ NS_DBG("switch_state: the next state is data I/O, switch, "
+ "state: %s, nxstate: %s\n",
+ ns_get_state_name(ns->state),
+ ns_get_state_name(ns->nxstate));
+
+ /*
+ * Set the internal register to the count of bytes which
+ * are expected to be input or output
+ */
+ switch (NS_STATE(ns->state)) {
+ case STATE_DATAIN:
+ case STATE_DATAOUT:
+ ns->regs.num = ns->geom.pgszoob - NS_PAGE_BYTE_SHIFT(ns);
+ break;
+
+ case STATE_DATAOUT_ID:
+ ns->regs.num = ns->geom.idbytes;
+ break;
+
+ case STATE_DATAOUT_STATUS:
+ ns->regs.count = ns->regs.num = 0;
+ break;
+
+ default:
+ NS_ERR("switch_state: BUG! unknown data state\n");
+ }
+
+ } else if (ns->nxstate & STATE_ADDR_MASK) {
+ /*
+ * If the next state is address input, set the internal
+ * register to the number of expected address bytes
+ */
+
+ ns->regs.count = 0;
+
+ switch (NS_STATE(ns->nxstate)) {
+ case STATE_ADDR_PAGE:
+ ns->regs.num = ns->geom.pgaddrbytes;
+
+ break;
+ case STATE_ADDR_SEC:
+ ns->regs.num = ns->geom.secaddrbytes;
+ break;
+
+ case STATE_ADDR_ZERO:
+ ns->regs.num = 1;
+ break;
+
+ case STATE_ADDR_COLUMN:
+ /* Column address is always 2 bytes */
+ ns->regs.num = ns->geom.pgaddrbytes - ns->geom.secaddrbytes;
+ break;
+
+ default:
+ NS_ERR("switch_state: BUG! unknown address state\n");
+ }
+ } else {
+ /*
+ * Just reset internal counters.
+ */
+
+ ns->regs.num = 0;
+ ns->regs.count = 0;
+ }
+}
+
+static u_char ns_nand_read_byte(struct nand_chip *chip)
+{
+ struct nandsim *ns = nand_get_controller_data(chip);
+ u_char outb = 0x00;
+
+ /* Sanity and correctness checks */
+ if (!ns->lines.ce) {
+ NS_ERR("read_byte: chip is disabled, return %#x\n", (uint)outb);
+ return outb;
+ }
+ if (ns->lines.ale || ns->lines.cle) {
+ NS_ERR("read_byte: ALE or CLE pin is high, return %#x\n", (uint)outb);
+ return outb;
+ }
+ if (!(ns->state & STATE_DATAOUT_MASK)) {
+ NS_WARN("read_byte: unexpected data output cycle, state is %s return %#x\n",
+ ns_get_state_name(ns->state), (uint)outb);
+ return outb;
+ }
+
+ /* Status register may be read as many times as it is wanted */
+ if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS) {
+ NS_DBG("read_byte: return %#x status\n", ns->regs.status);
+ return ns->regs.status;
+ }
+
+ /* Check if there is any data in the internal buffer which may be read */
+ if (ns->regs.count == ns->regs.num) {
+ NS_WARN("read_byte: no more data to output, return %#x\n", (uint)outb);
+ return outb;
+ }
+
+ switch (NS_STATE(ns->state)) {
+ case STATE_DATAOUT:
+ if (ns->busw == 8) {
+ outb = ns->buf.byte[ns->regs.count];
+ ns->regs.count += 1;
+ } else {
+ outb = (u_char)cpu_to_le16(ns->buf.word[ns->regs.count >> 1]);
+ ns->regs.count += 2;
+ }
+ break;
+ case STATE_DATAOUT_ID:
+ NS_DBG("read_byte: read ID byte %d, total = %d\n", ns->regs.count, ns->regs.num);
+ outb = ns->ids[ns->regs.count];
+ ns->regs.count += 1;
+ break;
+ default:
+ BUG();
+ }
+
+ if (ns->regs.count == ns->regs.num) {
+ NS_DBG("read_byte: all bytes were read\n");
+
+ if (NS_STATE(ns->nxstate) == STATE_READY)
+ ns_switch_state(ns);
+ }
+
+ return outb;
+}
+
+static void ns_nand_write_byte(struct nand_chip *chip, u_char byte)
+{
+ struct nandsim *ns = nand_get_controller_data(chip);
+
+ /* Sanity and correctness checks */
+ if (!ns->lines.ce) {
+ NS_ERR("write_byte: chip is disabled, ignore write\n");
+ return;
+ }
+ if (ns->lines.ale && ns->lines.cle) {
+ NS_ERR("write_byte: ALE and CLE pins are high simultaneously, ignore write\n");
+ return;
+ }
+
+ if (ns->lines.cle == 1) {
+ /*
+ * The byte written is a command.
+ */
+
+ if (byte == NAND_CMD_RESET) {
+ NS_LOG("reset chip\n");
+ ns_switch_to_ready_state(ns, NS_STATUS_OK(ns));
+ return;
+ }
+
+ /* Check that the command byte is correct */
+ if (ns_check_command(byte)) {
+ NS_ERR("write_byte: unknown command %#x\n", (uint)byte);
+ return;
+ }
+
+ if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS
+ || NS_STATE(ns->state) == STATE_DATAOUT) {
+ int row = ns->regs.row;
+
+ ns_switch_state(ns);
+ if (byte == NAND_CMD_RNDOUT)
+ ns->regs.row = row;
+ }
+
+ /* Check if chip is expecting command */
+ if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) {
+ /* Do not warn if only 2 id bytes are read */
+ if (!(ns->regs.command == NAND_CMD_READID &&
+ NS_STATE(ns->state) == STATE_DATAOUT_ID && ns->regs.count == 2)) {
+ /*
+ * We are in situation when something else (not command)
+ * was expected but command was input. In this case ignore
+ * previous command(s)/state(s) and accept the last one.
+ */
+ NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, ignore previous states\n",
+ (uint)byte,
+ ns_get_state_name(ns->nxstate));
+ }
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ }
+
+ NS_DBG("command byte corresponding to %s state accepted\n",
+ ns_get_state_name(ns_get_state_by_command(byte)));
+ ns->regs.command = byte;
+ ns_switch_state(ns);
+
+ } else if (ns->lines.ale == 1) {
+ /*
+ * The byte written is an address.
+ */
+
+ if (NS_STATE(ns->nxstate) == STATE_UNKNOWN) {
+
+ NS_DBG("write_byte: operation isn't known yet, identify it\n");
+
+ if (ns_find_operation(ns, 1) < 0)
+ return;
+
+ if ((ns->state & ACTION_MASK) &&
+ ns_do_state_action(ns, ns->state) < 0) {
+ ns_switch_to_ready_state(ns,
+ NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ ns->regs.count = 0;
+ switch (NS_STATE(ns->nxstate)) {
+ case STATE_ADDR_PAGE:
+ ns->regs.num = ns->geom.pgaddrbytes;
+ break;
+ case STATE_ADDR_SEC:
+ ns->regs.num = ns->geom.secaddrbytes;
+ break;
+ case STATE_ADDR_ZERO:
+ ns->regs.num = 1;
+ break;
+ default:
+ BUG();
+ }
+ }
+
+ /* Check that chip is expecting address */
+ if (!(ns->nxstate & STATE_ADDR_MASK)) {
+ NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, switch to STATE_READY\n",
+ (uint)byte, ns_get_state_name(ns->nxstate));
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ /* Check if this is expected byte */
+ if (ns->regs.count == ns->regs.num) {
+ NS_ERR("write_byte: no more address bytes expected\n");
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ ns_accept_addr_byte(ns, byte);
+
+ ns->regs.count += 1;
+
+ NS_DBG("write_byte: address byte %#x was accepted (%d bytes input, %d expected)\n",
+ (uint)byte, ns->regs.count, ns->regs.num);
+
+ if (ns->regs.count == ns->regs.num) {
+ NS_DBG("address (%#x, %#x) is accepted\n", ns->regs.row, ns->regs.column);
+ ns_switch_state(ns);
+ }
+
+ } else {
+ /*
+ * The byte written is an input data.
+ */
+
+ /* Check that chip is expecting data input */
+ if (!(ns->state & STATE_DATAIN_MASK)) {
+ NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, switch to %s\n",
+ (uint)byte, ns_get_state_name(ns->state),
+ ns_get_state_name(STATE_READY));
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ /* Check if this is expected byte */
+ if (ns->regs.count == ns->regs.num) {
+ NS_WARN("write_byte: %u input bytes has already been accepted, ignore write\n",
+ ns->regs.num);
+ return;
+ }
+
+ if (ns->busw == 8) {
+ ns->buf.byte[ns->regs.count] = byte;
+ ns->regs.count += 1;
+ } else {
+ ns->buf.word[ns->regs.count >> 1] = cpu_to_le16((uint16_t)byte);
+ ns->regs.count += 2;
+ }
+ }
+
+ return;
+}
+
+static void ns_nand_write_buf(struct nand_chip *chip, const u_char *buf,
+ int len)
+{
+ struct nandsim *ns = nand_get_controller_data(chip);
+
+ /* Check that chip is expecting data input */
+ if (!(ns->state & STATE_DATAIN_MASK)) {
+ NS_ERR("write_buf: data input isn't expected, state is %s, switch to STATE_READY\n",
+ ns_get_state_name(ns->state));
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ /* Check if these are expected bytes */
+ if (ns->regs.count + len > ns->regs.num) {
+ NS_ERR("write_buf: too many input bytes\n");
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ memcpy(ns->buf.byte + ns->regs.count, buf, len);
+ ns->regs.count += len;
+
+ if (ns->regs.count == ns->regs.num) {
+ NS_DBG("write_buf: %d bytes were written\n", ns->regs.count);
+ }
+}
+
+static void ns_nand_read_buf(struct nand_chip *chip, u_char *buf, int len)
+{
+ struct nandsim *ns = nand_get_controller_data(chip);
+
+ /* Sanity and correctness checks */
+ if (!ns->lines.ce) {
+ NS_ERR("read_buf: chip is disabled\n");
+ return;
+ }
+ if (ns->lines.ale || ns->lines.cle) {
+ NS_ERR("read_buf: ALE or CLE pin is high\n");
+ return;
+ }
+ if (!(ns->state & STATE_DATAOUT_MASK)) {
+ NS_WARN("read_buf: unexpected data output cycle, current state is %s\n",
+ ns_get_state_name(ns->state));
+ return;
+ }
+
+ if (NS_STATE(ns->state) != STATE_DATAOUT) {
+ int i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = ns_nand_read_byte(chip);
+
+ return;
+ }
+
+ /* Check if these are expected bytes */
+ if (ns->regs.count + len > ns->regs.num) {
+ NS_ERR("read_buf: too many bytes to read\n");
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ memcpy(buf, ns->buf.byte + ns->regs.count, len);
+ ns->regs.count += len;
+
+ if (ns->regs.count == ns->regs.num) {
+ if (NS_STATE(ns->nxstate) == STATE_READY)
+ ns_switch_state(ns);
+ }
+
+ return;
+}
+
+static int ns_exec_op(struct nand_chip *chip, const struct nand_operation *op,
+ bool check_only)
+{
+ int i;
+ unsigned int op_id;
+ const struct nand_op_instr *instr = NULL;
+ struct nandsim *ns = nand_get_controller_data(chip);
+
+ if (check_only) {
+ /* The current implementation of nandsim needs to know the
+ * ongoing operation when performing the address cycles. This
+ * means it cannot make the difference between a regular read
+ * and a continuous read. Hence, this hack to manually refuse
+ * supporting sequential cached operations.
+ */
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+ if (instr->type == NAND_OP_CMD_INSTR &&
+ (instr->ctx.cmd.opcode == NAND_CMD_READCACHEEND ||
+ instr->ctx.cmd.opcode == NAND_CMD_READCACHESEQ))
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+ }
+
+ ns->lines.ce = 1;
+
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+ ns->lines.cle = 0;
+ ns->lines.ale = 0;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ ns->lines.cle = 1;
+ ns_nand_write_byte(chip, instr->ctx.cmd.opcode);
+ break;
+ case NAND_OP_ADDR_INSTR:
+ ns->lines.ale = 1;
+ for (i = 0; i < instr->ctx.addr.naddrs; i++)
+ ns_nand_write_byte(chip, instr->ctx.addr.addrs[i]);
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ ns_nand_read_buf(chip, instr->ctx.data.buf.in, instr->ctx.data.len);
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ ns_nand_write_buf(chip, instr->ctx.data.buf.out, instr->ctx.data.len);
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ /* we are always ready */
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int ns_attach_chip(struct nand_chip *chip)
+{
+ unsigned int eccsteps, eccbytes;
+
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = bch ? NAND_ECC_ALGO_BCH : NAND_ECC_ALGO_HAMMING;
+
+ if (!bch)
+ return 0;
+
+ if (!IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) {
+ NS_ERR("BCH ECC support is disabled\n");
+ return -EINVAL;
+ }
+
+ /* Use 512-byte ecc blocks */
+ eccsteps = nsmtd->writesize / 512;
+ eccbytes = ((bch * 13) + 7) / 8;
+
+ /* Do not bother supporting small page devices */
+ if (nsmtd->oobsize < 64 || !eccsteps) {
+ NS_ERR("BCH not available on small page devices\n");
+ return -EINVAL;
+ }
+
+ if (((eccbytes * eccsteps) + 2) > nsmtd->oobsize) {
+ NS_ERR("Invalid BCH value %u\n", bch);
+ return -EINVAL;
+ }
+
+ chip->ecc.size = 512;
+ chip->ecc.strength = bch;
+ chip->ecc.bytes = eccbytes;
+
+ NS_INFO("Using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size);
+
+ return 0;
+}
+
+static const struct nand_controller_ops ns_controller_ops = {
+ .attach_chip = ns_attach_chip,
+ .exec_op = ns_exec_op,
+};
+
+/*
+ * Module initialization function
+ */
+static int __init ns_init_module(void)
+{
+ struct list_head *pos, *n;
+ struct nand_chip *chip;
+ struct nandsim *ns;
+ int ret;
+
+ if (bus_width != 8 && bus_width != 16) {
+ NS_ERR("wrong bus width (%d), use only 8 or 16\n", bus_width);
+ return -EINVAL;
+ }
+
+ ns = kzalloc(sizeof(struct nandsim), GFP_KERNEL);
+ if (!ns) {
+ NS_ERR("unable to allocate core structures.\n");
+ return -ENOMEM;
+ }
+ chip = &ns->chip;
+ nsmtd = nand_to_mtd(chip);
+ nand_set_controller_data(chip, (void *)ns);
+
+ /* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
+ /* and 'badblocks' parameters to work */
+ chip->options |= NAND_SKIP_BBTSCAN;
+
+ switch (bbt) {
+ case 2:
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+ fallthrough;
+ case 1:
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+ fallthrough;
+ case 0:
+ break;
+ default:
+ NS_ERR("bbt has to be 0..2\n");
+ ret = -EINVAL;
+ goto free_ns_struct;
+ }
+ /*
+ * Perform minimum nandsim structure initialization to handle
+ * the initial ID read command correctly
+ */
+ if (id_bytes[6] != 0xFF || id_bytes[7] != 0xFF)
+ ns->geom.idbytes = 8;
+ else if (id_bytes[4] != 0xFF || id_bytes[5] != 0xFF)
+ ns->geom.idbytes = 6;
+ else if (id_bytes[2] != 0xFF || id_bytes[3] != 0xFF)
+ ns->geom.idbytes = 4;
+ else
+ ns->geom.idbytes = 2;
+ ns->regs.status = NS_STATUS_OK(ns);
+ ns->nxstate = STATE_UNKNOWN;
+ ns->options |= OPT_PAGE512; /* temporary value */
+ memcpy(ns->ids, id_bytes, sizeof(ns->ids));
+ if (bus_width == 16) {
+ ns->busw = 16;
+ chip->options |= NAND_BUSWIDTH_16;
+ }
+
+ nsmtd->owner = THIS_MODULE;
+
+ ret = ns_parse_weakblocks();
+ if (ret)
+ goto free_ns_struct;
+
+ ret = ns_parse_weakpages();
+ if (ret)
+ goto free_wb_list;
+
+ ret = ns_parse_gravepages();
+ if (ret)
+ goto free_wp_list;
+
+ nand_controller_init(&ns->base);
+ ns->base.ops = &ns_controller_ops;
+ chip->controller = &ns->base;
+
+ ret = nand_scan(chip, 1);
+ if (ret) {
+ NS_ERR("Could not scan NAND Simulator device\n");
+ goto free_gp_list;
+ }
+
+ if (overridesize) {
+ uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
+ struct nand_memory_organization *memorg;
+ u64 targetsize;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ if (new_size >> overridesize != nsmtd->erasesize) {
+ NS_ERR("overridesize is too big\n");
+ ret = -EINVAL;
+ goto cleanup_nand;
+ }
+
+ /* N.B. This relies on nand_scan not doing anything with the size before we change it */
+ nsmtd->size = new_size;
+ memorg->eraseblocks_per_lun = 1 << overridesize;
+ targetsize = nanddev_target_size(&chip->base);
+ chip->chip_shift = ffs(nsmtd->erasesize) + overridesize - 1;
+ chip->pagemask = (targetsize >> chip->page_shift) - 1;
+ }
+
+ ret = ns_setup_wear_reporting(nsmtd);
+ if (ret)
+ goto cleanup_nand;
+
+ ret = ns_init(nsmtd);
+ if (ret)
+ goto free_ebw;
+
+ ret = nand_create_bbt(chip);
+ if (ret)
+ goto free_ns_object;
+
+ ret = ns_parse_badblocks(ns, nsmtd);
+ if (ret)
+ goto free_ns_object;
+
+ /* Register NAND partitions */
+ ret = mtd_device_register(nsmtd, &ns->partitions[0], ns->nbparts);
+ if (ret)
+ goto free_ns_object;
+
+ ret = ns_debugfs_create(ns);
+ if (ret)
+ goto unregister_mtd;
+
+ return 0;
+
+unregister_mtd:
+ WARN_ON(mtd_device_unregister(nsmtd));
+free_ns_object:
+ ns_free(ns);
+free_ebw:
+ kfree(erase_block_wear);
+cleanup_nand:
+ nand_cleanup(chip);
+free_gp_list:
+ list_for_each_safe(pos, n, &grave_pages) {
+ list_del(pos);
+ kfree(list_entry(pos, struct grave_page, list));
+ }
+free_wp_list:
+ list_for_each_safe(pos, n, &weak_pages) {
+ list_del(pos);
+ kfree(list_entry(pos, struct weak_page, list));
+ }
+free_wb_list:
+ list_for_each_safe(pos, n, &weak_blocks) {
+ list_del(pos);
+ kfree(list_entry(pos, struct weak_block, list));
+ }
+free_ns_struct:
+ kfree(ns);
+
+ return ret;
+}
+
+module_init(ns_init_module);
+
+/*
+ * Module clean-up function
+ */
+static void __exit ns_cleanup_module(void)
+{
+ struct nand_chip *chip = mtd_to_nand(nsmtd);
+ struct nandsim *ns = nand_get_controller_data(chip);
+ struct list_head *pos, *n;
+
+ ns_debugfs_remove(ns);
+ WARN_ON(mtd_device_unregister(nsmtd));
+ ns_free(ns);
+ kfree(erase_block_wear);
+ nand_cleanup(chip);
+
+ list_for_each_safe(pos, n, &grave_pages) {
+ list_del(pos);
+ kfree(list_entry(pos, struct grave_page, list));
+ }
+
+ list_for_each_safe(pos, n, &weak_pages) {
+ list_del(pos);
+ kfree(list_entry(pos, struct weak_page, list));
+ }
+
+ list_for_each_safe(pos, n, &weak_blocks) {
+ list_del(pos);
+ kfree(list_entry(pos, struct weak_block, list));
+ }
+
+ kfree(ns);
+}
+
+module_exit(ns_cleanup_module);
+
+MODULE_LICENSE ("GPL");
+MODULE_AUTHOR ("Artem B. Bityuckiy");
+MODULE_DESCRIPTION ("The NAND flash simulator");
diff --git a/drivers/mtd/nand/raw/ndfc.c b/drivers/mtd/nand/raw/ndfc.c
new file mode 100644
index 0000000000..3bb32a7c6d
--- /dev/null
+++ b/drivers/mtd/nand/raw/ndfc.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Overview:
+ * Platform independent driver for NDFC (NanD Flash Controller)
+ * integrated into EP440 cores
+ *
+ * Ported to an OF platform driver by Sean MacLennan
+ *
+ * The NDFC supports multiple chips, but this driver only supports a
+ * single chip since I do not have access to any boards with
+ * multiple chips.
+ *
+ * Author: Thomas Gleixner
+ *
+ * Copyright 2006 IBM
+ * Copyright 2008 PIKA Technologies
+ * Sean MacLennan <smaclennan@pikatech.com>
+ */
+#include <linux/module.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/ndfc.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <asm/io.h>
+
+#define NDFC_MAX_CS 4
+
+struct ndfc_controller {
+ struct platform_device *ofdev;
+ void __iomem *ndfcbase;
+ struct nand_chip chip;
+ int chip_select;
+ struct nand_controller ndfc_control;
+};
+
+static struct ndfc_controller ndfc_ctrl[NDFC_MAX_CS];
+
+static void ndfc_select_chip(struct nand_chip *nchip, int chip)
+{
+ uint32_t ccr;
+ struct ndfc_controller *ndfc = nand_get_controller_data(nchip);
+
+ ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
+ if (chip >= 0) {
+ ccr &= ~NDFC_CCR_BS_MASK;
+ ccr |= NDFC_CCR_BS(chip + ndfc->chip_select);
+ } else
+ ccr |= NDFC_CCR_RESET_CE;
+ out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
+}
+
+static void ndfc_hwcontrol(struct nand_chip *chip, int cmd, unsigned int ctrl)
+{
+ struct ndfc_controller *ndfc = nand_get_controller_data(chip);
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ writel(cmd & 0xFF, ndfc->ndfcbase + NDFC_CMD);
+ else
+ writel(cmd & 0xFF, ndfc->ndfcbase + NDFC_ALE);
+}
+
+static int ndfc_ready(struct nand_chip *chip)
+{
+ struct ndfc_controller *ndfc = nand_get_controller_data(chip);
+
+ return in_be32(ndfc->ndfcbase + NDFC_STAT) & NDFC_STAT_IS_READY;
+}
+
+static void ndfc_enable_hwecc(struct nand_chip *chip, int mode)
+{
+ uint32_t ccr;
+ struct ndfc_controller *ndfc = nand_get_controller_data(chip);
+
+ ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
+ ccr |= NDFC_CCR_RESET_ECC;
+ out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
+ wmb();
+}
+
+static int ndfc_calculate_ecc(struct nand_chip *chip,
+ const u_char *dat, u_char *ecc_code)
+{
+ struct ndfc_controller *ndfc = nand_get_controller_data(chip);
+ uint32_t ecc;
+ uint8_t *p = (uint8_t *)&ecc;
+
+ wmb();
+ ecc = in_be32(ndfc->ndfcbase + NDFC_ECC);
+ /* The NDFC uses Smart Media (SMC) bytes order */
+ ecc_code[0] = p[1];
+ ecc_code[1] = p[2];
+ ecc_code[2] = p[3];
+
+ return 0;
+}
+
+/*
+ * Speedups for buffer read/write/verify
+ *
+ * NDFC allows 32bit read/write of data. So we can speed up the buffer
+ * functions. No further checking, as nand_base will always read/write
+ * page aligned.
+ */
+static void ndfc_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
+{
+ struct ndfc_controller *ndfc = nand_get_controller_data(chip);
+ uint32_t *p = (uint32_t *) buf;
+
+ for(;len > 0; len -= 4)
+ *p++ = in_be32(ndfc->ndfcbase + NDFC_DATA);
+}
+
+static void ndfc_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
+{
+ struct ndfc_controller *ndfc = nand_get_controller_data(chip);
+ uint32_t *p = (uint32_t *) buf;
+
+ for(;len > 0; len -= 4)
+ out_be32(ndfc->ndfcbase + NDFC_DATA, *p++);
+}
+
+/*
+ * Initialize chip structure
+ */
+static int ndfc_chip_init(struct ndfc_controller *ndfc,
+ struct device_node *node)
+{
+ struct device_node *flash_np;
+ struct nand_chip *chip = &ndfc->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ chip->legacy.IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA;
+ chip->legacy.IO_ADDR_W = ndfc->ndfcbase + NDFC_DATA;
+ chip->legacy.cmd_ctrl = ndfc_hwcontrol;
+ chip->legacy.dev_ready = ndfc_ready;
+ chip->legacy.select_chip = ndfc_select_chip;
+ chip->legacy.chip_delay = 50;
+ chip->controller = &ndfc->ndfc_control;
+ chip->legacy.read_buf = ndfc_read_buf;
+ chip->legacy.write_buf = ndfc_write_buf;
+ chip->ecc.correct = rawnand_sw_hamming_correct;
+ chip->ecc.hwctl = ndfc_enable_hwecc;
+ chip->ecc.calculate = ndfc_calculate_ecc;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
+ nand_set_controller_data(chip, ndfc);
+
+ mtd->dev.parent = &ndfc->ofdev->dev;
+
+ flash_np = of_get_next_child(node, NULL);
+ if (!flash_np)
+ return -ENODEV;
+ nand_set_flash_node(chip, flash_np);
+
+ mtd->name = kasprintf(GFP_KERNEL, "%s.%pOFn", dev_name(&ndfc->ofdev->dev),
+ flash_np);
+ if (!mtd->name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = nand_scan(chip, 1);
+ if (ret)
+ goto err;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+
+err:
+ of_node_put(flash_np);
+ if (ret)
+ kfree(mtd->name);
+ return ret;
+}
+
+static int ndfc_probe(struct platform_device *ofdev)
+{
+ struct ndfc_controller *ndfc;
+ const __be32 *reg;
+ u32 ccr;
+ u32 cs;
+ int err, len;
+
+ /* Read the reg property to get the chip select */
+ reg = of_get_property(ofdev->dev.of_node, "reg", &len);
+ if (reg == NULL || len != 12) {
+ dev_err(&ofdev->dev, "unable read reg property (%d)\n", len);
+ return -ENOENT;
+ }
+
+ cs = be32_to_cpu(reg[0]);
+ if (cs >= NDFC_MAX_CS) {
+ dev_err(&ofdev->dev, "invalid CS number (%d)\n", cs);
+ return -EINVAL;
+ }
+
+ ndfc = &ndfc_ctrl[cs];
+ ndfc->chip_select = cs;
+
+ nand_controller_init(&ndfc->ndfc_control);
+ ndfc->ofdev = ofdev;
+ dev_set_drvdata(&ofdev->dev, ndfc);
+
+ ndfc->ndfcbase = of_iomap(ofdev->dev.of_node, 0);
+ if (!ndfc->ndfcbase) {
+ dev_err(&ofdev->dev, "failed to get memory\n");
+ return -EIO;
+ }
+
+ ccr = NDFC_CCR_BS(ndfc->chip_select);
+
+ /* It is ok if ccr does not exist - just default to 0 */
+ reg = of_get_property(ofdev->dev.of_node, "ccr", NULL);
+ if (reg)
+ ccr |= be32_to_cpup(reg);
+
+ out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
+
+ /* Set the bank settings if given */
+ reg = of_get_property(ofdev->dev.of_node, "bank-settings", NULL);
+ if (reg) {
+ int offset = NDFC_BCFG0 + (ndfc->chip_select << 2);
+ out_be32(ndfc->ndfcbase + offset, be32_to_cpup(reg));
+ }
+
+ err = ndfc_chip_init(ndfc, ofdev->dev.of_node);
+ if (err) {
+ iounmap(ndfc->ndfcbase);
+ return err;
+ }
+
+ return 0;
+}
+
+static void ndfc_remove(struct platform_device *ofdev)
+{
+ struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev);
+ struct nand_chip *chip = &ndfc->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = mtd_device_unregister(mtd);
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ kfree(mtd->name);
+}
+
+static const struct of_device_id ndfc_match[] = {
+ { .compatible = "ibm,ndfc", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ndfc_match);
+
+static struct platform_driver ndfc_driver = {
+ .driver = {
+ .name = "ndfc",
+ .of_match_table = ndfc_match,
+ },
+ .probe = ndfc_probe,
+ .remove_new = ndfc_remove,
+};
+
+module_platform_driver(ndfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
+MODULE_DESCRIPTION("OF Platform driver for NDFC");
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
new file mode 100644
index 0000000000..c45bef6158
--- /dev/null
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -0,0 +1,2305 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
+ * Copyright © 2004 Micron Technology Inc.
+ * Copyright © 2004 David Brownell
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-bch.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/omap-dma.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+#include <linux/platform_data/elm.h>
+
+#include <linux/omap-gpmc.h>
+#include <linux/platform_data/mtd-nand-omap2.h>
+
+#define DRIVER_NAME "omap2-nand"
+#define OMAP_NAND_TIMEOUT_MS 5000
+
+#define NAND_Ecc_P1e (1 << 0)
+#define NAND_Ecc_P2e (1 << 1)
+#define NAND_Ecc_P4e (1 << 2)
+#define NAND_Ecc_P8e (1 << 3)
+#define NAND_Ecc_P16e (1 << 4)
+#define NAND_Ecc_P32e (1 << 5)
+#define NAND_Ecc_P64e (1 << 6)
+#define NAND_Ecc_P128e (1 << 7)
+#define NAND_Ecc_P256e (1 << 8)
+#define NAND_Ecc_P512e (1 << 9)
+#define NAND_Ecc_P1024e (1 << 10)
+#define NAND_Ecc_P2048e (1 << 11)
+
+#define NAND_Ecc_P1o (1 << 16)
+#define NAND_Ecc_P2o (1 << 17)
+#define NAND_Ecc_P4o (1 << 18)
+#define NAND_Ecc_P8o (1 << 19)
+#define NAND_Ecc_P16o (1 << 20)
+#define NAND_Ecc_P32o (1 << 21)
+#define NAND_Ecc_P64o (1 << 22)
+#define NAND_Ecc_P128o (1 << 23)
+#define NAND_Ecc_P256o (1 << 24)
+#define NAND_Ecc_P512o (1 << 25)
+#define NAND_Ecc_P1024o (1 << 26)
+#define NAND_Ecc_P2048o (1 << 27)
+
+#define TF(value) (value ? 1 : 0)
+
+#define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
+#define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
+#define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
+#define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
+#define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
+#define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
+#define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
+#define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
+
+#define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
+#define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
+#define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
+#define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
+#define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
+#define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
+#define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
+#define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
+
+#define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
+#define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
+#define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
+#define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
+#define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
+#define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
+#define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
+#define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
+
+#define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
+#define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
+#define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
+#define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
+#define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
+#define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
+#define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
+#define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
+
+#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
+#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
+
+#define PREFETCH_CONFIG1_CS_SHIFT 24
+#define ECC_CONFIG_CS_SHIFT 1
+#define CS_MASK 0x7
+#define ENABLE_PREFETCH (0x1 << 7)
+#define DMA_MPU_MODE_SHIFT 2
+#define ECCSIZE0_SHIFT 12
+#define ECCSIZE1_SHIFT 22
+#define ECC1RESULTSIZE 0x1
+#define ECCCLEAR 0x100
+#define ECC1 0x1
+#define PREFETCH_FIFOTHRESHOLD_MAX 0x40
+#define PREFETCH_FIFOTHRESHOLD(val) ((val) << 8)
+#define PREFETCH_STATUS_COUNT(val) (val & 0x00003fff)
+#define PREFETCH_STATUS_FIFO_CNT(val) ((val >> 24) & 0x7F)
+#define STATUS_BUFF_EMPTY 0x00000001
+
+#define SECTOR_BYTES 512
+/* 4 bit padding to make byte aligned, 56 = 52 + 4 */
+#define BCH4_BIT_PAD 4
+
+/* GPMC ecc engine settings for read */
+#define BCH_WRAPMODE_1 1 /* BCH wrap mode 1 */
+#define BCH8R_ECC_SIZE0 0x1a /* ecc_size0 = 26 */
+#define BCH8R_ECC_SIZE1 0x2 /* ecc_size1 = 2 */
+#define BCH4R_ECC_SIZE0 0xd /* ecc_size0 = 13 */
+#define BCH4R_ECC_SIZE1 0x3 /* ecc_size1 = 3 */
+
+/* GPMC ecc engine settings for write */
+#define BCH_WRAPMODE_6 6 /* BCH wrap mode 6 */
+#define BCH_ECC_SIZE0 0x0 /* ecc_size0 = 0, no oob protection */
+#define BCH_ECC_SIZE1 0x20 /* ecc_size1 = 32 */
+
+#define BBM_LEN 2
+
+static u_char bch16_vector[] = {0xf5, 0x24, 0x1c, 0xd0, 0x61, 0xb3, 0xf1, 0x55,
+ 0x2e, 0x2c, 0x86, 0xa3, 0xed, 0x36, 0x1b, 0x78,
+ 0x48, 0x76, 0xa9, 0x3b, 0x97, 0xd1, 0x7a, 0x93,
+ 0x07, 0x0e};
+static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
+ 0xac, 0x6b, 0xff, 0x99, 0x7b};
+static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};
+
+struct omap_nand_info {
+ struct nand_chip nand;
+ struct platform_device *pdev;
+
+ int gpmc_cs;
+ bool dev_ready;
+ enum nand_io xfer_type;
+ enum omap_ecc ecc_opt;
+ struct device_node *elm_of_node;
+
+ unsigned long phys_base;
+ struct completion comp;
+ struct dma_chan *dma;
+ int gpmc_irq_fifo;
+ int gpmc_irq_count;
+ enum {
+ OMAP_NAND_IO_READ = 0, /* read */
+ OMAP_NAND_IO_WRITE, /* write */
+ } iomode;
+ u_char *buf;
+ int buf_len;
+ /* Interface to GPMC */
+ void __iomem *fifo;
+ struct gpmc_nand_regs reg;
+ struct gpmc_nand_ops *ops;
+ bool flash_bbt;
+ /* fields specific for BCHx_HW ECC scheme */
+ struct device *elm_dev;
+ /* NAND ready gpio */
+ struct gpio_desc *ready_gpiod;
+ unsigned int neccpg;
+ unsigned int nsteps_per_eccpg;
+ unsigned int eccpg_size;
+ unsigned int eccpg_bytes;
+ void (*data_in)(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit);
+ void (*data_out)(struct nand_chip *chip,
+ const void *buf, unsigned int len,
+ bool force_8bit);
+};
+
+static inline struct omap_nand_info *mtd_to_omap(struct mtd_info *mtd)
+{
+ return container_of(mtd_to_nand(mtd), struct omap_nand_info, nand);
+}
+
+static void omap_nand_data_in(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit);
+
+static void omap_nand_data_out(struct nand_chip *chip,
+ const void *buf, unsigned int len,
+ bool force_8bit);
+
+/**
+ * omap_prefetch_enable - configures and starts prefetch transfer
+ * @cs: cs (chip select) number
+ * @fifo_th: fifo threshold to be used for read/ write
+ * @dma_mode: dma mode enable (1) or disable (0)
+ * @u32_count: number of bytes to be transferred
+ * @is_write: prefetch read(0) or write post(1) mode
+ * @info: NAND device structure containing platform data
+ */
+static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode,
+ unsigned int u32_count, int is_write, struct omap_nand_info *info)
+{
+ u32 val;
+
+ if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX)
+ return -1;
+
+ if (readl(info->reg.gpmc_prefetch_control))
+ return -EBUSY;
+
+ /* Set the amount of bytes to be prefetched */
+ writel(u32_count, info->reg.gpmc_prefetch_config2);
+
+ /* Set dma/mpu mode, the prefetch read / post write and
+ * enable the engine. Set which cs is has requested for.
+ */
+ val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) |
+ PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH |
+ (dma_mode << DMA_MPU_MODE_SHIFT) | (is_write & 0x1));
+ writel(val, info->reg.gpmc_prefetch_config1);
+
+ /* Start the prefetch engine */
+ writel(0x1, info->reg.gpmc_prefetch_control);
+
+ return 0;
+}
+
+/*
+ * omap_prefetch_reset - disables and stops the prefetch engine
+ */
+static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
+{
+ u32 config1;
+
+ /* check if the same module/cs is trying to reset */
+ config1 = readl(info->reg.gpmc_prefetch_config1);
+ if (((config1 >> PREFETCH_CONFIG1_CS_SHIFT) & CS_MASK) != cs)
+ return -EINVAL;
+
+ /* Stop the PFPW engine */
+ writel(0x0, info->reg.gpmc_prefetch_control);
+
+ /* Reset/disable the PFPW engine */
+ writel(0x0, info->reg.gpmc_prefetch_config1);
+
+ return 0;
+}
+
+/**
+ * omap_nand_data_in_pref - NAND data in using prefetch engine
+ */
+static void omap_nand_data_in_pref(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ uint32_t r_count = 0;
+ int ret = 0;
+ u32 *p = (u32 *)buf;
+ unsigned int pref_len;
+
+ if (force_8bit) {
+ omap_nand_data_in(chip, buf, len, force_8bit);
+ return;
+ }
+
+ /* read 32-bit words using prefetch and remaining bytes normally */
+
+ /* configure and start prefetch transfer */
+ pref_len = len - (len & 3);
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x0, pref_len, 0x0, info);
+ if (ret) {
+ /* prefetch engine is busy, use CPU copy method */
+ omap_nand_data_in(chip, buf, len, false);
+ } else {
+ do {
+ r_count = readl(info->reg.gpmc_prefetch_status);
+ r_count = PREFETCH_STATUS_FIFO_CNT(r_count);
+ r_count = r_count >> 2;
+ ioread32_rep(info->fifo, p, r_count);
+ p += r_count;
+ pref_len -= r_count << 2;
+ } while (pref_len);
+ /* disable and stop the Prefetch engine */
+ omap_prefetch_reset(info->gpmc_cs, info);
+ /* fetch any remaining bytes */
+ if (len & 3)
+ omap_nand_data_in(chip, p, len & 3, false);
+ }
+}
+
+/**
+ * omap_nand_data_out_pref - NAND data out using Write Posting engine
+ */
+static void omap_nand_data_out_pref(struct nand_chip *chip,
+ const void *buf, unsigned int len,
+ bool force_8bit)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ uint32_t w_count = 0;
+ int i = 0, ret = 0;
+ u16 *p = (u16 *)buf;
+ unsigned long tim, limit;
+ u32 val;
+
+ if (force_8bit) {
+ omap_nand_data_out(chip, buf, len, force_8bit);
+ return;
+ }
+
+ /* take care of subpage writes */
+ if (len % 2 != 0) {
+ writeb(*(u8 *)buf, info->fifo);
+ p = (u16 *)(buf + 1);
+ len--;
+ }
+
+ /* configure and start prefetch transfer */
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info);
+ if (ret) {
+ /* write posting engine is busy, use CPU copy method */
+ omap_nand_data_out(chip, buf, len, false);
+ } else {
+ while (len) {
+ w_count = readl(info->reg.gpmc_prefetch_status);
+ w_count = PREFETCH_STATUS_FIFO_CNT(w_count);
+ w_count = w_count >> 1;
+ for (i = 0; (i < w_count) && len; i++, len -= 2)
+ iowrite16(*p++, info->fifo);
+ }
+ /* wait for data to flushed-out before reset the prefetch */
+ tim = 0;
+ limit = (loops_per_jiffy *
+ msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+ do {
+ cpu_relax();
+ val = readl(info->reg.gpmc_prefetch_status);
+ val = PREFETCH_STATUS_COUNT(val);
+ } while (val && (tim++ < limit));
+
+ /* disable and stop the PFPW engine */
+ omap_prefetch_reset(info->gpmc_cs, info);
+ }
+}
+
+/*
+ * omap_nand_dma_callback: callback on the completion of dma transfer
+ * @data: pointer to completion data structure
+ */
+static void omap_nand_dma_callback(void *data)
+{
+ complete((struct completion *) data);
+}
+
+/*
+ * omap_nand_dma_transfer: configure and start dma transfer
+ * @chip: nand chip structure
+ * @addr: virtual address in RAM of source/destination
+ * @len: number of data bytes to be transferred
+ * @is_write: flag for read/write operation
+ */
+static inline int omap_nand_dma_transfer(struct nand_chip *chip,
+ const void *addr, unsigned int len,
+ int is_write)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ struct dma_async_tx_descriptor *tx;
+ enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE;
+ struct scatterlist sg;
+ unsigned long tim, limit;
+ unsigned n;
+ int ret;
+ u32 val;
+
+ if (!virt_addr_valid(addr))
+ goto out_copy;
+
+ sg_init_one(&sg, addr, len);
+ n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
+ if (n == 0) {
+ dev_err(&info->pdev->dev,
+ "Couldn't DMA map a %d byte buffer\n", len);
+ goto out_copy;
+ }
+
+ tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
+ is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx)
+ goto out_copy_unmap;
+
+ tx->callback = omap_nand_dma_callback;
+ tx->callback_param = &info->comp;
+ dmaengine_submit(tx);
+
+ init_completion(&info->comp);
+
+ /* setup and start DMA using dma_addr */
+ dma_async_issue_pending(info->dma);
+
+ /* configure and start prefetch transfer */
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info);
+ if (ret)
+ /* PFPW engine is busy, use cpu copy method */
+ goto out_copy_unmap;
+
+ wait_for_completion(&info->comp);
+ tim = 0;
+ limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+
+ do {
+ cpu_relax();
+ val = readl(info->reg.gpmc_prefetch_status);
+ val = PREFETCH_STATUS_COUNT(val);
+ } while (val && (tim++ < limit));
+
+ /* disable and stop the PFPW engine */
+ omap_prefetch_reset(info->gpmc_cs, info);
+
+ dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
+ return 0;
+
+out_copy_unmap:
+ dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
+out_copy:
+ is_write == 0 ? omap_nand_data_in(chip, (void *)addr, len, false)
+ : omap_nand_data_out(chip, addr, len, false);
+
+ return 0;
+}
+
+/**
+ * omap_nand_data_in_dma_pref - NAND data in using DMA and Prefetch
+ */
+static void omap_nand_data_in_dma_pref(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (force_8bit) {
+ omap_nand_data_in(chip, buf, len, force_8bit);
+ return;
+ }
+
+ if (len <= mtd->oobsize)
+ omap_nand_data_in_pref(chip, buf, len, false);
+ else
+ /* start transfer in DMA mode */
+ omap_nand_dma_transfer(chip, buf, len, 0x0);
+}
+
+/**
+ * omap_nand_data_out_dma_pref - NAND data out using DMA and write posting
+ */
+static void omap_nand_data_out_dma_pref(struct nand_chip *chip,
+ const void *buf, unsigned int len,
+ bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (force_8bit) {
+ omap_nand_data_out(chip, buf, len, force_8bit);
+ return;
+ }
+
+ if (len <= mtd->oobsize)
+ omap_nand_data_out_pref(chip, buf, len, false);
+ else
+ /* start transfer in DMA mode */
+ omap_nand_dma_transfer(chip, buf, len, 0x1);
+}
+
+/*
+ * omap_nand_irq - GPMC irq handler
+ * @this_irq: gpmc irq number
+ * @dev: omap_nand_info structure pointer is passed here
+ */
+static irqreturn_t omap_nand_irq(int this_irq, void *dev)
+{
+ struct omap_nand_info *info = (struct omap_nand_info *) dev;
+ u32 bytes;
+
+ bytes = readl(info->reg.gpmc_prefetch_status);
+ bytes = PREFETCH_STATUS_FIFO_CNT(bytes);
+ bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
+ if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
+ if (this_irq == info->gpmc_irq_count)
+ goto done;
+
+ if (info->buf_len && (info->buf_len < bytes))
+ bytes = info->buf_len;
+ else if (!info->buf_len)
+ bytes = 0;
+ iowrite32_rep(info->fifo, (u32 *)info->buf,
+ bytes >> 2);
+ info->buf = info->buf + bytes;
+ info->buf_len -= bytes;
+
+ } else {
+ ioread32_rep(info->fifo, (u32 *)info->buf,
+ bytes >> 2);
+ info->buf = info->buf + bytes;
+
+ if (this_irq == info->gpmc_irq_count)
+ goto done;
+ }
+
+ return IRQ_HANDLED;
+
+done:
+ complete(&info->comp);
+
+ disable_irq_nosync(info->gpmc_irq_fifo);
+ disable_irq_nosync(info->gpmc_irq_count);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * omap_nand_data_in_irq_pref - NAND data in using Prefetch and IRQ
+ */
+static void omap_nand_data_in_irq_pref(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ struct mtd_info *mtd = nand_to_mtd(&info->nand);
+ int ret = 0;
+
+ if (len <= mtd->oobsize || force_8bit) {
+ omap_nand_data_in(chip, buf, len, force_8bit);
+ return;
+ }
+
+ info->iomode = OMAP_NAND_IO_READ;
+ info->buf = buf;
+ init_completion(&info->comp);
+
+ /* configure and start prefetch transfer */
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info);
+ if (ret) {
+ /* PFPW engine is busy, use cpu copy method */
+ omap_nand_data_in(chip, buf, len, false);
+ return;
+ }
+
+ info->buf_len = len;
+
+ enable_irq(info->gpmc_irq_count);
+ enable_irq(info->gpmc_irq_fifo);
+
+ /* waiting for read to complete */
+ wait_for_completion(&info->comp);
+
+ /* disable and stop the PFPW engine */
+ omap_prefetch_reset(info->gpmc_cs, info);
+ return;
+}
+
+/*
+ * omap_nand_data_out_irq_pref - NAND out using write posting and IRQ
+ */
+static void omap_nand_data_out_irq_pref(struct nand_chip *chip,
+ const void *buf, unsigned int len,
+ bool force_8bit)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ struct mtd_info *mtd = nand_to_mtd(&info->nand);
+ int ret = 0;
+ unsigned long tim, limit;
+ u32 val;
+
+ if (len <= mtd->oobsize || force_8bit) {
+ omap_nand_data_out(chip, buf, len, force_8bit);
+ return;
+ }
+
+ info->iomode = OMAP_NAND_IO_WRITE;
+ info->buf = (u_char *) buf;
+ init_completion(&info->comp);
+
+ /* configure and start prefetch transfer : size=24 */
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info);
+ if (ret) {
+ /* PFPW engine is busy, use cpu copy method */
+ omap_nand_data_out(chip, buf, len, false);
+ return;
+ }
+
+ info->buf_len = len;
+
+ enable_irq(info->gpmc_irq_count);
+ enable_irq(info->gpmc_irq_fifo);
+
+ /* waiting for write to complete */
+ wait_for_completion(&info->comp);
+
+ /* wait for data to flushed-out before reset the prefetch */
+ tim = 0;
+ limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+ do {
+ val = readl(info->reg.gpmc_prefetch_status);
+ val = PREFETCH_STATUS_COUNT(val);
+ cpu_relax();
+ } while (val && (tim++ < limit));
+
+ /* disable and stop the PFPW engine */
+ omap_prefetch_reset(info->gpmc_cs, info);
+ return;
+}
+
+/**
+ * gen_true_ecc - This function will generate true ECC value
+ * @ecc_buf: buffer to store ecc code
+ *
+ * This generated true ECC value can be used when correcting
+ * data read from NAND flash memory core
+ */
+static void gen_true_ecc(u8 *ecc_buf)
+{
+ u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
+ ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
+
+ ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
+ P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
+ ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
+ P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
+ ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
+ P1e(tmp) | P2048o(tmp) | P2048e(tmp));
+}
+
+/**
+ * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
+ * @ecc_data1: ecc code from nand spare area
+ * @ecc_data2: ecc code from hardware register obtained from hardware ecc
+ * @page_data: page data
+ *
+ * This function compares two ECC's and indicates if there is an error.
+ * If the error can be corrected it will be corrected to the buffer.
+ * If there is no error, %0 is returned. If there is an error but it
+ * was corrected, %1 is returned. Otherwise, %-1 is returned.
+ */
+static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
+ u8 *ecc_data2, /* read from register */
+ u8 *page_data)
+{
+ uint i;
+ u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
+ u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
+ u8 ecc_bit[24];
+ u8 ecc_sum = 0;
+ u8 find_bit = 0;
+ uint find_byte = 0;
+ int isEccFF;
+
+ isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
+
+ gen_true_ecc(ecc_data1);
+ gen_true_ecc(ecc_data2);
+
+ for (i = 0; i <= 2; i++) {
+ *(ecc_data1 + i) = ~(*(ecc_data1 + i));
+ *(ecc_data2 + i) = ~(*(ecc_data2 + i));
+ }
+
+ for (i = 0; i < 8; i++) {
+ tmp0_bit[i] = *ecc_data1 % 2;
+ *ecc_data1 = *ecc_data1 / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ tmp1_bit[i] = *(ecc_data1 + 1) % 2;
+ *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ tmp2_bit[i] = *(ecc_data1 + 2) % 2;
+ *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ comp0_bit[i] = *ecc_data2 % 2;
+ *ecc_data2 = *ecc_data2 / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ comp1_bit[i] = *(ecc_data2 + 1) % 2;
+ *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ comp2_bit[i] = *(ecc_data2 + 2) % 2;
+ *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
+ }
+
+ for (i = 0; i < 6; i++)
+ ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
+
+ for (i = 0; i < 8; i++)
+ ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
+
+ for (i = 0; i < 8; i++)
+ ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
+
+ ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
+ ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
+
+ for (i = 0; i < 24; i++)
+ ecc_sum += ecc_bit[i];
+
+ switch (ecc_sum) {
+ case 0:
+ /* Not reached because this function is not called if
+ * ECC values are equal
+ */
+ return 0;
+
+ case 1:
+ /* Uncorrectable error */
+ pr_debug("ECC UNCORRECTED_ERROR 1\n");
+ return -EBADMSG;
+
+ case 11:
+ /* UN-Correctable error */
+ pr_debug("ECC UNCORRECTED_ERROR B\n");
+ return -EBADMSG;
+
+ case 12:
+ /* Correctable error */
+ find_byte = (ecc_bit[23] << 8) +
+ (ecc_bit[21] << 7) +
+ (ecc_bit[19] << 6) +
+ (ecc_bit[17] << 5) +
+ (ecc_bit[15] << 4) +
+ (ecc_bit[13] << 3) +
+ (ecc_bit[11] << 2) +
+ (ecc_bit[9] << 1) +
+ ecc_bit[7];
+
+ find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
+
+ pr_debug("Correcting single bit ECC error at offset: "
+ "%d, bit: %d\n", find_byte, find_bit);
+
+ page_data[find_byte] ^= (1 << find_bit);
+
+ return 1;
+ default:
+ if (isEccFF) {
+ if (ecc_data2[0] == 0 &&
+ ecc_data2[1] == 0 &&
+ ecc_data2[2] == 0)
+ return 0;
+ }
+ pr_debug("UNCORRECTED_ERROR default\n");
+ return -EBADMSG;
+ }
+}
+
+/**
+ * omap_correct_data - Compares the ECC read with HW generated ECC
+ * @chip: NAND chip object
+ * @dat: page data
+ * @read_ecc: ecc read from nand flash
+ * @calc_ecc: ecc read from HW ECC registers
+ *
+ * Compares the ecc read from nand spare area with ECC registers values
+ * and if ECC's mismatched, it will call 'omap_compare_ecc' for error
+ * detection and correction. If there are no errors, %0 is returned. If
+ * there were errors and all of the errors were corrected, the number of
+ * corrected errors is returned. If uncorrectable errors exist, %-1 is
+ * returned.
+ */
+static int omap_correct_data(struct nand_chip *chip, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ int blockCnt = 0, i = 0, ret = 0;
+ int stat = 0;
+
+ /* Ex NAND_ECC_HW12_2048 */
+ if (info->nand.ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST &&
+ info->nand.ecc.size == 2048)
+ blockCnt = 4;
+ else
+ blockCnt = 1;
+
+ for (i = 0; i < blockCnt; i++) {
+ if (memcmp(read_ecc, calc_ecc, 3) != 0) {
+ ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
+ if (ret < 0)
+ return ret;
+ /* keep track of the number of corrected errors */
+ stat += ret;
+ }
+ read_ecc += 3;
+ calc_ecc += 3;
+ dat += 512;
+ }
+ return stat;
+}
+
+/**
+ * omap_calculate_ecc - Generate non-inverted ECC bytes.
+ * @chip: NAND chip object
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ *
+ * Using noninverted ECC can be considered ugly since writing a blank
+ * page ie. padding will clear the ECC bytes. This is no problem as long
+ * nobody is trying to write data on the seemingly unused page. Reading
+ * an erased page will produce an ECC mismatch between generated and read
+ * ECC bytes that has to be dealt with separately.
+ */
+static int omap_calculate_ecc(struct nand_chip *chip, const u_char *dat,
+ u_char *ecc_code)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ u32 val;
+
+ val = readl(info->reg.gpmc_ecc_config);
+ if (((val >> ECC_CONFIG_CS_SHIFT) & CS_MASK) != info->gpmc_cs)
+ return -EINVAL;
+
+ /* read ecc result */
+ val = readl(info->reg.gpmc_ecc1_result);
+ *ecc_code++ = val; /* P128e, ..., P1e */
+ *ecc_code++ = val >> 16; /* P128o, ..., P1o */
+ /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
+ *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
+
+ return 0;
+}
+
+/**
+ * omap_enable_hwecc - This function enables the hardware ecc functionality
+ * @chip: NAND chip object
+ * @mode: Read/Write mode
+ */
+static void omap_enable_hwecc(struct nand_chip *chip, int mode)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+ u32 val;
+
+ /* clear ecc and enable bits */
+ val = ECCCLEAR | ECC1;
+ writel(val, info->reg.gpmc_ecc_control);
+
+ /* program ecc and result sizes */
+ val = ((((info->nand.ecc.size >> 1) - 1) << ECCSIZE1_SHIFT) |
+ ECC1RESULTSIZE);
+ writel(val, info->reg.gpmc_ecc_size_config);
+
+ switch (mode) {
+ case NAND_ECC_READ:
+ case NAND_ECC_WRITE:
+ writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
+ break;
+ case NAND_ECC_READSYN:
+ writel(ECCCLEAR, info->reg.gpmc_ecc_control);
+ break;
+ default:
+ dev_info(&info->pdev->dev,
+ "error: unrecognized Mode[%d]!\n", mode);
+ break;
+ }
+
+ /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
+ val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
+ writel(val, info->reg.gpmc_ecc_config);
+}
+
+/**
+ * omap_enable_hwecc_bch - Program GPMC to perform BCH ECC calculation
+ * @chip: NAND chip object
+ * @mode: Read/Write mode
+ *
+ * When using BCH with SW correction (i.e. no ELM), sector size is set
+ * to 512 bytes and we use BCH_WRAPMODE_6 wrapping mode
+ * for both reading and writing with:
+ * eccsize0 = 0 (no additional protected byte in spare area)
+ * eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
+ */
+static void __maybe_unused omap_enable_hwecc_bch(struct nand_chip *chip,
+ int mode)
+{
+ unsigned int bch_type;
+ unsigned int dev_width, nsectors;
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ enum omap_ecc ecc_opt = info->ecc_opt;
+ u32 val, wr_mode;
+ unsigned int ecc_size1, ecc_size0;
+
+ /* GPMC configurations for calculating ECC */
+ switch (ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ bch_type = 0;
+ nsectors = 1;
+ wr_mode = BCH_WRAPMODE_6;
+ ecc_size0 = BCH_ECC_SIZE0;
+ ecc_size1 = BCH_ECC_SIZE1;
+ break;
+ case OMAP_ECC_BCH4_CODE_HW:
+ bch_type = 0;
+ nsectors = chip->ecc.steps;
+ if (mode == NAND_ECC_READ) {
+ wr_mode = BCH_WRAPMODE_1;
+ ecc_size0 = BCH4R_ECC_SIZE0;
+ ecc_size1 = BCH4R_ECC_SIZE1;
+ } else {
+ wr_mode = BCH_WRAPMODE_6;
+ ecc_size0 = BCH_ECC_SIZE0;
+ ecc_size1 = BCH_ECC_SIZE1;
+ }
+ break;
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ bch_type = 1;
+ nsectors = 1;
+ wr_mode = BCH_WRAPMODE_6;
+ ecc_size0 = BCH_ECC_SIZE0;
+ ecc_size1 = BCH_ECC_SIZE1;
+ break;
+ case OMAP_ECC_BCH8_CODE_HW:
+ bch_type = 1;
+ nsectors = chip->ecc.steps;
+ if (mode == NAND_ECC_READ) {
+ wr_mode = BCH_WRAPMODE_1;
+ ecc_size0 = BCH8R_ECC_SIZE0;
+ ecc_size1 = BCH8R_ECC_SIZE1;
+ } else {
+ wr_mode = BCH_WRAPMODE_6;
+ ecc_size0 = BCH_ECC_SIZE0;
+ ecc_size1 = BCH_ECC_SIZE1;
+ }
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ bch_type = 0x2;
+ nsectors = chip->ecc.steps;
+ if (mode == NAND_ECC_READ) {
+ wr_mode = 0x01;
+ ecc_size0 = 52; /* ECC bits in nibbles per sector */
+ ecc_size1 = 0; /* non-ECC bits in nibbles per sector */
+ } else {
+ wr_mode = 0x01;
+ ecc_size0 = 0; /* extra bits in nibbles per sector */
+ ecc_size1 = 52; /* OOB bits in nibbles per sector */
+ }
+ break;
+ default:
+ return;
+ }
+
+ writel(ECC1, info->reg.gpmc_ecc_control);
+
+ /* Configure ecc size for BCH */
+ val = (ecc_size1 << ECCSIZE1_SHIFT) | (ecc_size0 << ECCSIZE0_SHIFT);
+ writel(val, info->reg.gpmc_ecc_size_config);
+
+ dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+
+ /* BCH configuration */
+ val = ((1 << 16) | /* enable BCH */
+ (bch_type << 12) | /* BCH4/BCH8/BCH16 */
+ (wr_mode << 8) | /* wrap mode */
+ (dev_width << 7) | /* bus width */
+ (((nsectors-1) & 0x7) << 4) | /* number of sectors */
+ (info->gpmc_cs << 1) | /* ECC CS */
+ (0x1)); /* enable ECC */
+
+ writel(val, info->reg.gpmc_ecc_config);
+
+ /* Clear ecc and enable bits */
+ writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
+}
+
+static u8 bch4_polynomial[] = {0x28, 0x13, 0xcc, 0x39, 0x96, 0xac, 0x7f};
+static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
+ 0x97, 0x79, 0xe5, 0x24, 0xb5};
+
+/**
+ * _omap_calculate_ecc_bch - Generate ECC bytes for one sector
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_calc: The ecc_code buffer
+ * @i: The sector number (for a multi sector page)
+ *
+ * Support calculating of BCH4/8/16 ECC vectors for one sector
+ * within a page. Sector number is in @i.
+ */
+static int _omap_calculate_ecc_bch(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_calc, int i)
+{
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ int eccbytes = info->nand.ecc.bytes;
+ struct gpmc_nand_regs *gpmc_regs = &info->reg;
+ u8 *ecc_code;
+ unsigned long bch_val1, bch_val2, bch_val3, bch_val4;
+ u32 val;
+ int j;
+
+ ecc_code = ecc_calc;
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ case OMAP_ECC_BCH8_CODE_HW:
+ bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+ bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
+ bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
+ bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
+ *ecc_code++ = (bch_val4 & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val3 & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val2 & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val1 & 0xFF);
+ break;
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ case OMAP_ECC_BCH4_CODE_HW:
+ bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+ bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
+ *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
+ *ecc_code++ = ((bch_val2 & 0xF) << 4) |
+ ((bch_val1 >> 28) & 0xF);
+ *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
+ *ecc_code++ = ((bch_val1 & 0xF) << 4);
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ val = readl(gpmc_regs->gpmc_bch_result6[i]);
+ ecc_code[0] = ((val >> 8) & 0xFF);
+ ecc_code[1] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result5[i]);
+ ecc_code[2] = ((val >> 24) & 0xFF);
+ ecc_code[3] = ((val >> 16) & 0xFF);
+ ecc_code[4] = ((val >> 8) & 0xFF);
+ ecc_code[5] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result4[i]);
+ ecc_code[6] = ((val >> 24) & 0xFF);
+ ecc_code[7] = ((val >> 16) & 0xFF);
+ ecc_code[8] = ((val >> 8) & 0xFF);
+ ecc_code[9] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result3[i]);
+ ecc_code[10] = ((val >> 24) & 0xFF);
+ ecc_code[11] = ((val >> 16) & 0xFF);
+ ecc_code[12] = ((val >> 8) & 0xFF);
+ ecc_code[13] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result2[i]);
+ ecc_code[14] = ((val >> 24) & 0xFF);
+ ecc_code[15] = ((val >> 16) & 0xFF);
+ ecc_code[16] = ((val >> 8) & 0xFF);
+ ecc_code[17] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result1[i]);
+ ecc_code[18] = ((val >> 24) & 0xFF);
+ ecc_code[19] = ((val >> 16) & 0xFF);
+ ecc_code[20] = ((val >> 8) & 0xFF);
+ ecc_code[21] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result0[i]);
+ ecc_code[22] = ((val >> 24) & 0xFF);
+ ecc_code[23] = ((val >> 16) & 0xFF);
+ ecc_code[24] = ((val >> 8) & 0xFF);
+ ecc_code[25] = ((val >> 0) & 0xFF);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* ECC scheme specific syndrome customizations */
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ /* Add constant polynomial to remainder, so that
+ * ECC of blank pages results in 0x0 on reading back
+ */
+ for (j = 0; j < eccbytes; j++)
+ ecc_calc[j] ^= bch4_polynomial[j];
+ break;
+ case OMAP_ECC_BCH4_CODE_HW:
+ /* Set 8th ECC byte as 0x0 for ROM compatibility */
+ ecc_calc[eccbytes - 1] = 0x0;
+ break;
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ /* Add constant polynomial to remainder, so that
+ * ECC of blank pages results in 0x0 on reading back
+ */
+ for (j = 0; j < eccbytes; j++)
+ ecc_calc[j] ^= bch8_polynomial[j];
+ break;
+ case OMAP_ECC_BCH8_CODE_HW:
+ /* Set 14th ECC byte as 0x0 for ROM compatibility */
+ ecc_calc[eccbytes - 1] = 0x0;
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction
+ * @chip: NAND chip object
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_calc: Buffer storing the calculated ECC bytes
+ *
+ * Support calculating of BCH4/8/16 ECC vectors for one sector. This is used
+ * when SW based correction is required as ECC is required for one sector
+ * at a time.
+ */
+static int omap_calculate_ecc_bch_sw(struct nand_chip *chip,
+ const u_char *dat, u_char *ecc_calc)
+{
+ return _omap_calculate_ecc_bch(nand_to_mtd(chip), dat, ecc_calc, 0);
+}
+
+/**
+ * omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_calc: Buffer storing the calculated ECC bytes
+ *
+ * Support calculating of BCH4/8/16 ecc vectors for the entire page in one go.
+ */
+static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_calc)
+{
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ int eccbytes = info->nand.ecc.bytes;
+ unsigned long nsectors;
+ int i, ret;
+
+ nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
+ for (i = 0; i < nsectors; i++) {
+ ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i);
+ if (ret)
+ return ret;
+
+ ecc_calc += eccbytes;
+ }
+
+ return 0;
+}
+
+/**
+ * erased_sector_bitflips - count bit flips
+ * @data: data sector buffer
+ * @oob: oob buffer
+ * @info: omap_nand_info
+ *
+ * Check the bit flips in erased page falls below correctable level.
+ * If falls below, report the page as erased with correctable bit
+ * flip, else report as uncorrectable page.
+ */
+static int erased_sector_bitflips(u_char *data, u_char *oob,
+ struct omap_nand_info *info)
+{
+ int flip_bits = 0, i;
+
+ for (i = 0; i < info->nand.ecc.size; i++) {
+ flip_bits += hweight8(~data[i]);
+ if (flip_bits > info->nand.ecc.strength)
+ return 0;
+ }
+
+ for (i = 0; i < info->nand.ecc.bytes - 1; i++) {
+ flip_bits += hweight8(~oob[i]);
+ if (flip_bits > info->nand.ecc.strength)
+ return 0;
+ }
+
+ /*
+ * Bit flips falls in correctable level.
+ * Fill data area with 0xFF
+ */
+ if (flip_bits) {
+ memset(data, 0xFF, info->nand.ecc.size);
+ memset(oob, 0xFF, info->nand.ecc.bytes);
+ }
+
+ return flip_bits;
+}
+
+/**
+ * omap_elm_correct_data - corrects page data area in case error reported
+ * @chip: NAND chip object
+ * @data: page data
+ * @read_ecc: ecc read from nand flash
+ * @calc_ecc: ecc read from HW ECC registers
+ *
+ * Calculated ecc vector reported as zero in case of non-error pages.
+ * In case of non-zero ecc vector, first filter out erased-pages, and
+ * then process data via ELM to detect bit-flips.
+ */
+static int omap_elm_correct_data(struct nand_chip *chip, u_char *data,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ struct nand_ecc_ctrl *ecc = &info->nand.ecc;
+ int eccsteps = info->nsteps_per_eccpg;
+ int i , j, stat = 0;
+ int eccflag, actual_eccbytes;
+ struct elm_errorvec err_vec[ERROR_VECTOR_MAX];
+ u_char *ecc_vec = calc_ecc;
+ u_char *spare_ecc = read_ecc;
+ u_char *erased_ecc_vec;
+ u_char *buf;
+ int bitflip_count;
+ bool is_error_reported = false;
+ u32 bit_pos, byte_pos, error_max, pos;
+ int err;
+
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW:
+ /* omit 7th ECC byte reserved for ROM code compatibility */
+ actual_eccbytes = ecc->bytes - 1;
+ erased_ecc_vec = bch4_vector;
+ break;
+ case OMAP_ECC_BCH8_CODE_HW:
+ /* omit 14th ECC byte reserved for ROM code compatibility */
+ actual_eccbytes = ecc->bytes - 1;
+ erased_ecc_vec = bch8_vector;
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ actual_eccbytes = ecc->bytes;
+ erased_ecc_vec = bch16_vector;
+ break;
+ default:
+ dev_err(&info->pdev->dev, "invalid driver configuration\n");
+ return -EINVAL;
+ }
+
+ /* Initialize elm error vector to zero */
+ memset(err_vec, 0, sizeof(err_vec));
+
+ for (i = 0; i < eccsteps ; i++) {
+ eccflag = 0; /* initialize eccflag */
+
+ /*
+ * Check any error reported,
+ * In case of error, non zero ecc reported.
+ */
+ for (j = 0; j < actual_eccbytes; j++) {
+ if (calc_ecc[j] != 0) {
+ eccflag = 1; /* non zero ecc, error present */
+ break;
+ }
+ }
+
+ if (eccflag == 1) {
+ if (memcmp(calc_ecc, erased_ecc_vec,
+ actual_eccbytes) == 0) {
+ /*
+ * calc_ecc[] matches pattern for ECC(all 0xff)
+ * so this is definitely an erased-page
+ */
+ } else {
+ buf = &data[info->nand.ecc.size * i];
+ /*
+ * count number of 0-bits in read_buf.
+ * This check can be removed once a similar
+ * check is introduced in generic NAND driver
+ */
+ bitflip_count = erased_sector_bitflips(
+ buf, read_ecc, info);
+ if (bitflip_count) {
+ /*
+ * number of 0-bits within ECC limits
+ * So this may be an erased-page
+ */
+ stat += bitflip_count;
+ } else {
+ /*
+ * Too many 0-bits. It may be a
+ * - programmed-page, OR
+ * - erased-page with many bit-flips
+ * So this page requires check by ELM
+ */
+ err_vec[i].error_reported = true;
+ is_error_reported = true;
+ }
+ }
+ }
+
+ /* Update the ecc vector */
+ calc_ecc += ecc->bytes;
+ read_ecc += ecc->bytes;
+ }
+
+ /* Check if any error reported */
+ if (!is_error_reported)
+ return stat;
+
+ /* Decode BCH error using ELM module */
+ elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec);
+
+ err = 0;
+ for (i = 0; i < eccsteps; i++) {
+ if (err_vec[i].error_uncorrectable) {
+ dev_err(&info->pdev->dev,
+ "uncorrectable bit-flips found\n");
+ err = -EBADMSG;
+ } else if (err_vec[i].error_reported) {
+ for (j = 0; j < err_vec[i].error_count; j++) {
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW:
+ /* Add 4 bits to take care of padding */
+ pos = err_vec[i].error_loc[j] +
+ BCH4_BIT_PAD;
+ break;
+ case OMAP_ECC_BCH8_CODE_HW:
+ case OMAP_ECC_BCH16_CODE_HW:
+ pos = err_vec[i].error_loc[j];
+ break;
+ default:
+ return -EINVAL;
+ }
+ error_max = (ecc->size + actual_eccbytes) * 8;
+ /* Calculate bit position of error */
+ bit_pos = pos % 8;
+
+ /* Calculate byte position of error */
+ byte_pos = (error_max - pos - 1) / 8;
+
+ if (pos < error_max) {
+ if (byte_pos < 512) {
+ pr_debug("bitflip@dat[%d]=%x\n",
+ byte_pos, data[byte_pos]);
+ data[byte_pos] ^= 1 << bit_pos;
+ } else {
+ pr_debug("bitflip@oob[%d]=%x\n",
+ (byte_pos - 512),
+ spare_ecc[byte_pos - 512]);
+ spare_ecc[byte_pos - 512] ^=
+ 1 << bit_pos;
+ }
+ } else {
+ dev_err(&info->pdev->dev,
+ "invalid bit-flip @ %d:%d\n",
+ byte_pos, bit_pos);
+ err = -EBADMSG;
+ }
+ }
+ }
+
+ /* Update number of correctable errors */
+ stat = max_t(unsigned int, stat, err_vec[i].error_count);
+
+ /* Update page data with sector size */
+ data += ecc->size;
+ spare_ecc += ecc->bytes;
+ }
+
+ return (err) ? err : stat;
+}
+
+/**
+ * omap_write_page_bch - BCH ecc based write page function for entire page
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page
+ *
+ * Custom write page method evolved to support multi sector writing in one shot
+ */
+static int omap_write_page_bch(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ unsigned int eccpg;
+ int ret;
+
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (eccpg = 0; eccpg < info->neccpg; eccpg++) {
+ /* Enable GPMC ecc engine */
+ chip->ecc.hwctl(chip, NAND_ECC_WRITE);
+
+ /* Write data */
+ info->data_out(chip, buf + (eccpg * info->eccpg_size),
+ info->eccpg_size, false);
+
+ /* Update ecc vector from GPMC result registers */
+ ret = omap_calculate_ecc_bch_multi(mtd,
+ buf + (eccpg * info->eccpg_size),
+ ecc_calc);
+ if (ret)
+ return ret;
+
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc,
+ chip->oob_poi,
+ eccpg * info->eccpg_bytes,
+ info->eccpg_bytes);
+ if (ret)
+ return ret;
+ }
+
+ /* Write ecc vector to OOB area */
+ info->data_out(chip, chip->oob_poi, mtd->oobsize, false);
+
+ return nand_prog_page_end_op(chip);
+}
+
+/**
+ * omap_write_subpage_bch - BCH hardware ECC based subpage write
+ * @chip: nand chip info structure
+ * @offset: column address of subpage within the page
+ * @data_len: data length
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * OMAP optimized subpage write method.
+ */
+static int omap_write_subpage_bch(struct nand_chip *chip, u32 offset,
+ u32 data_len, const u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ u8 *ecc_calc = chip->ecc.calc_buf;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ u32 start_step = offset / ecc_size;
+ u32 end_step = (offset + data_len - 1) / ecc_size;
+ unsigned int eccpg;
+ int step, ret = 0;
+
+ /*
+ * Write entire page at one go as it would be optimal
+ * as ECC is calculated by hardware.
+ * ECC is calculated for all subpages but we choose
+ * only what we want.
+ */
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (eccpg = 0; eccpg < info->neccpg; eccpg++) {
+ /* Enable GPMC ECC engine */
+ chip->ecc.hwctl(chip, NAND_ECC_WRITE);
+
+ /* Write data */
+ info->data_out(chip, buf + (eccpg * info->eccpg_size),
+ info->eccpg_size, false);
+
+ for (step = 0; step < info->nsteps_per_eccpg; step++) {
+ unsigned int base_step = eccpg * info->nsteps_per_eccpg;
+ const u8 *bufoffs = buf + (eccpg * info->eccpg_size);
+
+ /* Mask ECC of un-touched subpages with 0xFFs */
+ if ((step + base_step) < start_step ||
+ (step + base_step) > end_step)
+ memset(ecc_calc + (step * ecc_bytes), 0xff,
+ ecc_bytes);
+ else
+ ret = _omap_calculate_ecc_bch(mtd,
+ bufoffs + (step * ecc_size),
+ ecc_calc + (step * ecc_bytes),
+ step);
+
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Copy the calculated ECC for the whole page including the
+ * masked values (0xFF) corresponding to unwritten subpages.
+ */
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi,
+ eccpg * info->eccpg_bytes,
+ info->eccpg_bytes);
+ if (ret)
+ return ret;
+ }
+
+ /* write OOB buffer to NAND device */
+ info->data_out(chip, chip->oob_poi, mtd->oobsize, false);
+
+ return nand_prog_page_end_op(chip);
+}
+
+/**
+ * omap_read_page_bch - BCH ecc based page read function for entire page
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * For BCH ecc scheme, GPMC used for syndrome calculation and ELM module
+ * used for error correction.
+ * Custom method evolved to support ELM error correction & multi sector
+ * reading. On reading page data area is read along with OOB data with
+ * ecc engine enabled. ecc vector updated after read of OOB data.
+ * For non error pages ecc vector reported as zero.
+ */
+static int omap_read_page_bch(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
+ unsigned int max_bitflips = 0, eccpg;
+ int stat, ret;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (eccpg = 0; eccpg < info->neccpg; eccpg++) {
+ /* Enable GPMC ecc engine */
+ chip->ecc.hwctl(chip, NAND_ECC_READ);
+
+ /* Read data */
+ ret = nand_change_read_column_op(chip, eccpg * info->eccpg_size,
+ buf + (eccpg * info->eccpg_size),
+ info->eccpg_size, false);
+ if (ret)
+ return ret;
+
+ /* Read oob bytes */
+ ret = nand_change_read_column_op(chip,
+ mtd->writesize + BBM_LEN +
+ (eccpg * info->eccpg_bytes),
+ chip->oob_poi + BBM_LEN +
+ (eccpg * info->eccpg_bytes),
+ info->eccpg_bytes, false);
+ if (ret)
+ return ret;
+
+ /* Calculate ecc bytes */
+ ret = omap_calculate_ecc_bch_multi(mtd,
+ buf + (eccpg * info->eccpg_size),
+ ecc_calc);
+ if (ret)
+ return ret;
+
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code,
+ chip->oob_poi,
+ eccpg * info->eccpg_bytes,
+ info->eccpg_bytes);
+ if (ret)
+ return ret;
+
+ stat = chip->ecc.correct(chip,
+ buf + (eccpg * info->eccpg_size),
+ ecc_code, ecc_calc);
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+
+ return max_bitflips;
+}
+
+/**
+ * is_elm_present - checks for presence of ELM module by scanning DT nodes
+ * @info: NAND device structure containing platform data
+ * @elm_node: ELM's DT node
+ */
+static bool is_elm_present(struct omap_nand_info *info,
+ struct device_node *elm_node)
+{
+ struct platform_device *pdev;
+
+ /* check whether elm-id is passed via DT */
+ if (!elm_node) {
+ dev_err(&info->pdev->dev, "ELM devicetree node not found\n");
+ return false;
+ }
+ pdev = of_find_device_by_node(elm_node);
+ /* check whether ELM device is registered */
+ if (!pdev) {
+ dev_err(&info->pdev->dev, "ELM device not found\n");
+ return false;
+ }
+ /* ELM module available, now configure it */
+ info->elm_dev = &pdev->dev;
+ return true;
+}
+
+static bool omap2_nand_ecc_check(struct omap_nand_info *info)
+{
+ bool ecc_needs_bch, ecc_needs_omap_bch, ecc_needs_elm;
+
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ ecc_needs_omap_bch = false;
+ ecc_needs_bch = true;
+ ecc_needs_elm = false;
+ break;
+ case OMAP_ECC_BCH4_CODE_HW:
+ case OMAP_ECC_BCH8_CODE_HW:
+ case OMAP_ECC_BCH16_CODE_HW:
+ ecc_needs_omap_bch = true;
+ ecc_needs_bch = false;
+ ecc_needs_elm = true;
+ break;
+ default:
+ ecc_needs_omap_bch = false;
+ ecc_needs_bch = false;
+ ecc_needs_elm = false;
+ break;
+ }
+
+ if (ecc_needs_bch && !IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) {
+ dev_err(&info->pdev->dev,
+ "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
+ return false;
+ }
+ if (ecc_needs_omap_bch && !IS_ENABLED(CONFIG_MTD_NAND_OMAP_BCH)) {
+ dev_err(&info->pdev->dev,
+ "CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
+ return false;
+ }
+ if (ecc_needs_elm && !is_elm_present(info, info->elm_of_node)) {
+ dev_err(&info->pdev->dev, "ELM not available\n");
+ return false;
+ }
+
+ return true;
+}
+
+static const char * const nand_xfer_types[] = {
+ [NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled",
+ [NAND_OMAP_POLLED] = "polled",
+ [NAND_OMAP_PREFETCH_DMA] = "prefetch-dma",
+ [NAND_OMAP_PREFETCH_IRQ] = "prefetch-irq",
+};
+
+static int omap_get_dt_info(struct device *dev, struct omap_nand_info *info)
+{
+ struct device_node *child = dev->of_node;
+ int i;
+ const char *s;
+ u32 cs;
+
+ if (of_property_read_u32(child, "reg", &cs) < 0) {
+ dev_err(dev, "reg not found in DT\n");
+ return -EINVAL;
+ }
+
+ info->gpmc_cs = cs;
+
+ /* detect availability of ELM module. Won't be present pre-OMAP4 */
+ info->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
+ if (!info->elm_of_node) {
+ info->elm_of_node = of_parse_phandle(child, "elm_id", 0);
+ if (!info->elm_of_node)
+ dev_dbg(dev, "ti,elm-id not in DT\n");
+ }
+
+ /* select ecc-scheme for NAND */
+ if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
+ dev_err(dev, "ti,nand-ecc-opt not found\n");
+ return -EINVAL;
+ }
+
+ if (!strcmp(s, "sw")) {
+ info->ecc_opt = OMAP_ECC_HAM1_CODE_SW;
+ } else if (!strcmp(s, "ham1") ||
+ !strcmp(s, "hw") || !strcmp(s, "hw-romcode")) {
+ info->ecc_opt = OMAP_ECC_HAM1_CODE_HW;
+ } else if (!strcmp(s, "bch4")) {
+ if (info->elm_of_node)
+ info->ecc_opt = OMAP_ECC_BCH4_CODE_HW;
+ else
+ info->ecc_opt = OMAP_ECC_BCH4_CODE_HW_DETECTION_SW;
+ } else if (!strcmp(s, "bch8")) {
+ if (info->elm_of_node)
+ info->ecc_opt = OMAP_ECC_BCH8_CODE_HW;
+ else
+ info->ecc_opt = OMAP_ECC_BCH8_CODE_HW_DETECTION_SW;
+ } else if (!strcmp(s, "bch16")) {
+ info->ecc_opt = OMAP_ECC_BCH16_CODE_HW;
+ } else {
+ dev_err(dev, "unrecognized value for ti,nand-ecc-opt\n");
+ return -EINVAL;
+ }
+
+ /* select data transfer mode */
+ if (!of_property_read_string(child, "ti,nand-xfer-type", &s)) {
+ for (i = 0; i < ARRAY_SIZE(nand_xfer_types); i++) {
+ if (!strcasecmp(s, nand_xfer_types[i])) {
+ info->xfer_type = i;
+ return 0;
+ }
+ }
+
+ dev_err(dev, "unrecognized value for ti,nand-xfer-type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int omap_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct nand_chip *chip = &info->nand;
+ int off = BBM_LEN;
+
+ if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW &&
+ !(chip->options & NAND_BUSWIDTH_16))
+ off = 1;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = off;
+ oobregion->length = chip->ecc.total;
+
+ return 0;
+}
+
+static int omap_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct nand_chip *chip = &info->nand;
+ int off = BBM_LEN;
+
+ if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW &&
+ !(chip->options & NAND_BUSWIDTH_16))
+ off = 1;
+
+ if (section)
+ return -ERANGE;
+
+ off += chip->ecc.total;
+ if (off >= mtd->oobsize)
+ return -ERANGE;
+
+ oobregion->offset = off;
+ oobregion->length = mtd->oobsize - off;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops omap_ooblayout_ops = {
+ .ecc = omap_ooblayout_ecc,
+ .free = omap_ooblayout_free,
+};
+
+static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ unsigned int nsteps = nanddev_get_ecc_nsteps(nand);
+ unsigned int ecc_bytes = nanddev_get_ecc_bytes_per_step(nand);
+ int off = BBM_LEN;
+
+ if (section >= nsteps)
+ return -ERANGE;
+
+ /*
+ * When SW correction is employed, one OMAP specific marker byte is
+ * reserved after each ECC step.
+ */
+ oobregion->offset = off + (section * (ecc_bytes + 1));
+ oobregion->length = ecc_bytes;
+
+ return 0;
+}
+
+static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ unsigned int nsteps = nanddev_get_ecc_nsteps(nand);
+ unsigned int ecc_bytes = nanddev_get_ecc_bytes_per_step(nand);
+ int off = BBM_LEN;
+
+ if (section)
+ return -ERANGE;
+
+ /*
+ * When SW correction is employed, one OMAP specific marker byte is
+ * reserved after each ECC step.
+ */
+ off += ((ecc_bytes + 1) * nsteps);
+ if (off >= mtd->oobsize)
+ return -ERANGE;
+
+ oobregion->offset = off;
+ oobregion->length = mtd->oobsize - off;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops omap_sw_ooblayout_ops = {
+ .ecc = omap_sw_ooblayout_ecc,
+ .free = omap_sw_ooblayout_free,
+};
+
+static int omap_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct device *dev = &info->pdev->dev;
+ int min_oobbytes = BBM_LEN;
+ int elm_bch_strength = -1;
+ int oobbytes_per_step;
+ dma_cap_mask_t mask;
+ int err;
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+ else
+ chip->options |= NAND_SKIP_BBTSCAN;
+
+ /* Re-populate low-level callbacks based on xfer modes */
+ switch (info->xfer_type) {
+ case NAND_OMAP_PREFETCH_POLLED:
+ info->data_in = omap_nand_data_in_pref;
+ info->data_out = omap_nand_data_out_pref;
+ break;
+
+ case NAND_OMAP_POLLED:
+ /* Use nand_base defaults for {read,write}_buf */
+ break;
+
+ case NAND_OMAP_PREFETCH_DMA:
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ info->dma = dma_request_chan(dev->parent, "rxtx");
+
+ if (IS_ERR(info->dma)) {
+ dev_err(dev, "DMA engine request failed\n");
+ return PTR_ERR(info->dma);
+ } else {
+ struct dma_slave_config cfg;
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.src_addr = info->phys_base;
+ cfg.dst_addr = info->phys_base;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = 16;
+ cfg.dst_maxburst = 16;
+ err = dmaengine_slave_config(info->dma, &cfg);
+ if (err) {
+ dev_err(dev,
+ "DMA engine slave config failed: %d\n",
+ err);
+ return err;
+ }
+
+ info->data_in = omap_nand_data_in_dma_pref;
+ info->data_out = omap_nand_data_out_dma_pref;
+ }
+ break;
+
+ case NAND_OMAP_PREFETCH_IRQ:
+ info->gpmc_irq_fifo = platform_get_irq(info->pdev, 0);
+ if (info->gpmc_irq_fifo <= 0)
+ return -ENODEV;
+ err = devm_request_irq(dev, info->gpmc_irq_fifo,
+ omap_nand_irq, IRQF_SHARED,
+ "gpmc-nand-fifo", info);
+ if (err) {
+ dev_err(dev, "Requesting IRQ %d, error %d\n",
+ info->gpmc_irq_fifo, err);
+ info->gpmc_irq_fifo = 0;
+ return err;
+ }
+
+ info->gpmc_irq_count = platform_get_irq(info->pdev, 1);
+ if (info->gpmc_irq_count <= 0)
+ return -ENODEV;
+ err = devm_request_irq(dev, info->gpmc_irq_count,
+ omap_nand_irq, IRQF_SHARED,
+ "gpmc-nand-count", info);
+ if (err) {
+ dev_err(dev, "Requesting IRQ %d, error %d\n",
+ info->gpmc_irq_count, err);
+ info->gpmc_irq_count = 0;
+ return err;
+ }
+
+ info->data_in = omap_nand_data_in_irq_pref;
+ info->data_out = omap_nand_data_out_irq_pref;
+ break;
+
+ default:
+ dev_err(dev, "xfer_type %d not supported!\n", info->xfer_type);
+ return -EINVAL;
+ }
+
+ if (!omap2_nand_ecc_check(info))
+ return -EINVAL;
+
+ /*
+ * Bail out earlier to let NAND_ECC_ENGINE_TYPE_SOFT code create its own
+ * ooblayout instead of using ours.
+ */
+ if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW) {
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+ return 0;
+ }
+
+ /* Populate MTD interface based on ECC scheme */
+ switch (info->ecc_opt) {
+ case OMAP_ECC_HAM1_CODE_HW:
+ dev_info(dev, "nand: using OMAP_ECC_HAM1_CODE_HW\n");
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ chip->ecc.bytes = 3;
+ chip->ecc.size = 512;
+ chip->ecc.strength = 1;
+ chip->ecc.calculate = omap_calculate_ecc;
+ chip->ecc.hwctl = omap_enable_hwecc;
+ chip->ecc.correct = omap_correct_data;
+ mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
+ oobbytes_per_step = chip->ecc.bytes;
+
+ if (!(chip->options & NAND_BUSWIDTH_16))
+ min_oobbytes = 1;
+
+ break;
+
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ pr_info("nand: using OMAP_ECC_BCH4_CODE_HW_DETECTION_SW\n");
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 7;
+ chip->ecc.strength = 4;
+ chip->ecc.hwctl = omap_enable_hwecc_bch;
+ chip->ecc.correct = rawnand_sw_bch_correct;
+ chip->ecc.calculate = omap_calculate_ecc_bch_sw;
+ mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
+ /* Reserve one byte for the OMAP marker */
+ oobbytes_per_step = chip->ecc.bytes + 1;
+ /* Software BCH library is used for locating errors */
+ err = rawnand_sw_bch_init(chip);
+ if (err) {
+ dev_err(dev, "Unable to use BCH library\n");
+ return err;
+ }
+ break;
+
+ case OMAP_ECC_BCH4_CODE_HW:
+ pr_info("nand: using OMAP_ECC_BCH4_CODE_HW ECC scheme\n");
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ chip->ecc.size = 512;
+ /* 14th bit is kept reserved for ROM-code compatibility */
+ chip->ecc.bytes = 7 + 1;
+ chip->ecc.strength = 4;
+ chip->ecc.hwctl = omap_enable_hwecc_bch;
+ chip->ecc.correct = omap_elm_correct_data;
+ chip->ecc.read_page = omap_read_page_bch;
+ chip->ecc.write_page = omap_write_page_bch;
+ chip->ecc.write_subpage = omap_write_subpage_bch;
+ mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
+ oobbytes_per_step = chip->ecc.bytes;
+ elm_bch_strength = BCH4_ECC;
+ break;
+
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ pr_info("nand: using OMAP_ECC_BCH8_CODE_HW_DETECTION_SW\n");
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 13;
+ chip->ecc.strength = 8;
+ chip->ecc.hwctl = omap_enable_hwecc_bch;
+ chip->ecc.correct = rawnand_sw_bch_correct;
+ chip->ecc.calculate = omap_calculate_ecc_bch_sw;
+ mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
+ /* Reserve one byte for the OMAP marker */
+ oobbytes_per_step = chip->ecc.bytes + 1;
+ /* Software BCH library is used for locating errors */
+ err = rawnand_sw_bch_init(chip);
+ if (err) {
+ dev_err(dev, "unable to use BCH library\n");
+ return err;
+ }
+ break;
+
+ case OMAP_ECC_BCH8_CODE_HW:
+ pr_info("nand: using OMAP_ECC_BCH8_CODE_HW ECC scheme\n");
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ chip->ecc.size = 512;
+ /* 14th bit is kept reserved for ROM-code compatibility */
+ chip->ecc.bytes = 13 + 1;
+ chip->ecc.strength = 8;
+ chip->ecc.hwctl = omap_enable_hwecc_bch;
+ chip->ecc.correct = omap_elm_correct_data;
+ chip->ecc.read_page = omap_read_page_bch;
+ chip->ecc.write_page = omap_write_page_bch;
+ chip->ecc.write_subpage = omap_write_subpage_bch;
+ mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
+ oobbytes_per_step = chip->ecc.bytes;
+ elm_bch_strength = BCH8_ECC;
+ break;
+
+ case OMAP_ECC_BCH16_CODE_HW:
+ pr_info("Using OMAP_ECC_BCH16_CODE_HW ECC scheme\n");
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 26;
+ chip->ecc.strength = 16;
+ chip->ecc.hwctl = omap_enable_hwecc_bch;
+ chip->ecc.correct = omap_elm_correct_data;
+ chip->ecc.read_page = omap_read_page_bch;
+ chip->ecc.write_page = omap_write_page_bch;
+ chip->ecc.write_subpage = omap_write_subpage_bch;
+ mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
+ oobbytes_per_step = chip->ecc.bytes;
+ elm_bch_strength = BCH16_ECC;
+ break;
+ default:
+ dev_err(dev, "Invalid or unsupported ECC scheme\n");
+ return -EINVAL;
+ }
+
+ if (elm_bch_strength >= 0) {
+ chip->ecc.steps = mtd->writesize / chip->ecc.size;
+ info->neccpg = chip->ecc.steps / ERROR_VECTOR_MAX;
+ if (info->neccpg) {
+ info->nsteps_per_eccpg = ERROR_VECTOR_MAX;
+ } else {
+ info->neccpg = 1;
+ info->nsteps_per_eccpg = chip->ecc.steps;
+ }
+ info->eccpg_size = info->nsteps_per_eccpg * chip->ecc.size;
+ info->eccpg_bytes = info->nsteps_per_eccpg * chip->ecc.bytes;
+
+ err = elm_config(info->elm_dev, elm_bch_strength,
+ info->nsteps_per_eccpg, chip->ecc.size,
+ chip->ecc.bytes);
+ if (err < 0)
+ return err;
+ }
+
+ /* Check if NAND device's OOB is enough to store ECC signatures */
+ min_oobbytes += (oobbytes_per_step *
+ (mtd->writesize / chip->ecc.size));
+ if (mtd->oobsize < min_oobbytes) {
+ dev_err(dev,
+ "Not enough OOB bytes: required = %d, available=%d\n",
+ min_oobbytes, mtd->oobsize);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void omap_nand_data_in(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ u32 alignment = ((uintptr_t)buf | len) & 3;
+
+ if (force_8bit || (alignment & 1))
+ ioread8_rep(info->fifo, buf, len);
+ else if (alignment & 3)
+ ioread16_rep(info->fifo, buf, len >> 1);
+ else
+ ioread32_rep(info->fifo, buf, len >> 2);
+}
+
+static void omap_nand_data_out(struct nand_chip *chip,
+ const void *buf, unsigned int len,
+ bool force_8bit)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ u32 alignment = ((uintptr_t)buf | len) & 3;
+
+ if (force_8bit || (alignment & 1))
+ iowrite8_rep(info->fifo, buf, len);
+ else if (alignment & 3)
+ iowrite16_rep(info->fifo, buf, len >> 1);
+ else
+ iowrite32_rep(info->fifo, buf, len >> 2);
+}
+
+static int omap_nand_exec_instr(struct nand_chip *chip,
+ const struct nand_op_instr *instr)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ unsigned int i;
+ int ret;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ iowrite8(instr->ctx.cmd.opcode,
+ info->reg.gpmc_nand_command);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ iowrite8(instr->ctx.addr.addrs[i],
+ info->reg.gpmc_nand_address);
+ }
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ info->data_in(chip, instr->ctx.data.buf.in,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ info->data_out(chip, instr->ctx.data.buf.out,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ ret = info->ready_gpiod ?
+ nand_gpio_waitrdy(chip, info->ready_gpiod, instr->ctx.waitrdy.timeout_ms) :
+ nand_soft_waitrdy(chip, instr->ctx.waitrdy.timeout_ms);
+ if (ret)
+ return ret;
+ break;
+ }
+
+ if (instr->delay_ns)
+ ndelay(instr->delay_ns);
+
+ return 0;
+}
+
+static int omap_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ unsigned int i;
+
+ if (check_only)
+ return 0;
+
+ for (i = 0; i < op->ninstrs; i++) {
+ int ret;
+
+ ret = omap_nand_exec_instr(chip, &op->instrs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops omap_nand_controller_ops = {
+ .attach_chip = omap_nand_attach_chip,
+ .exec_op = omap_nand_exec_op,
+};
+
+/* Shared among all NAND instances to synchronize access to the ECC Engine */
+static struct nand_controller omap_gpmc_controller;
+static bool omap_gpmc_controller_initialized;
+
+static int omap_nand_probe(struct platform_device *pdev)
+{
+ struct omap_nand_info *info;
+ struct mtd_info *mtd;
+ struct nand_chip *nand_chip;
+ int err;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ void __iomem *vaddr;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->pdev = pdev;
+
+ err = omap_get_dt_info(dev, info);
+ if (err)
+ return err;
+
+ info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs);
+ if (!info->ops) {
+ dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n");
+ return -ENODEV;
+ }
+
+ nand_chip = &info->nand;
+ mtd = nand_to_mtd(nand_chip);
+ mtd->dev.parent = &pdev->dev;
+ nand_set_flash_node(nand_chip, dev->of_node);
+
+ if (!mtd->name) {
+ mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "omap2-nand.%d", info->gpmc_cs);
+ if (!mtd->name) {
+ dev_err(&pdev->dev, "Failed to set MTD name\n");
+ return -ENOMEM;
+ }
+ }
+
+ vaddr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ info->fifo = vaddr;
+ info->phys_base = res->start;
+
+ if (!omap_gpmc_controller_initialized) {
+ omap_gpmc_controller.ops = &omap_nand_controller_ops;
+ nand_controller_init(&omap_gpmc_controller);
+ omap_gpmc_controller_initialized = true;
+ }
+
+ nand_chip->controller = &omap_gpmc_controller;
+
+ info->ready_gpiod = devm_gpiod_get_optional(&pdev->dev, "rb",
+ GPIOD_IN);
+ if (IS_ERR(info->ready_gpiod)) {
+ dev_err(dev, "failed to get ready gpio\n");
+ return PTR_ERR(info->ready_gpiod);
+ }
+
+ if (info->flash_bbt)
+ nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+ /* default operations */
+ info->data_in = omap_nand_data_in;
+ info->data_out = omap_nand_data_out;
+
+ err = nand_scan(nand_chip, 1);
+ if (err)
+ goto return_error;
+
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err)
+ goto cleanup_nand;
+
+ platform_set_drvdata(pdev, mtd);
+
+ return 0;
+
+cleanup_nand:
+ nand_cleanup(nand_chip);
+
+return_error:
+ if (!IS_ERR_OR_NULL(info->dma))
+ dma_release_channel(info->dma);
+
+ rawnand_sw_bch_cleanup(nand_chip);
+
+ return err;
+}
+
+static void omap_nand_remove(struct platform_device *pdev)
+{
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+
+ rawnand_sw_bch_cleanup(nand_chip);
+
+ if (info->dma)
+ dma_release_channel(info->dma);
+ WARN_ON(mtd_device_unregister(mtd));
+ nand_cleanup(nand_chip);
+}
+
+/* omap_nand_ids defined in linux/platform_data/mtd-nand-omap2.h */
+MODULE_DEVICE_TABLE(of, omap_nand_ids);
+
+static struct platform_driver omap_nand_driver = {
+ .probe = omap_nand_probe,
+ .remove_new = omap_nand_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = omap_nand_ids,
+ },
+};
+
+module_platform_driver(omap_nand_driver);
+
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c
new file mode 100644
index 0000000000..4a97d4a764
--- /dev/null
+++ b/drivers/mtd/nand/raw/omap_elm.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Error Location Module
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+#define DRIVER_NAME "omap-elm"
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/sched.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_data/elm.h>
+
+#define ELM_SYSCONFIG 0x010
+#define ELM_IRQSTATUS 0x018
+#define ELM_IRQENABLE 0x01c
+#define ELM_LOCATION_CONFIG 0x020
+#define ELM_PAGE_CTRL 0x080
+#define ELM_SYNDROME_FRAGMENT_0 0x400
+#define ELM_SYNDROME_FRAGMENT_1 0x404
+#define ELM_SYNDROME_FRAGMENT_2 0x408
+#define ELM_SYNDROME_FRAGMENT_3 0x40c
+#define ELM_SYNDROME_FRAGMENT_4 0x410
+#define ELM_SYNDROME_FRAGMENT_5 0x414
+#define ELM_SYNDROME_FRAGMENT_6 0x418
+#define ELM_LOCATION_STATUS 0x800
+#define ELM_ERROR_LOCATION_0 0x880
+
+/* ELM Interrupt Status Register */
+#define INTR_STATUS_PAGE_VALID BIT(8)
+
+/* ELM Interrupt Enable Register */
+#define INTR_EN_PAGE_MASK BIT(8)
+
+/* ELM Location Configuration Register */
+#define ECC_BCH_LEVEL_MASK 0x3
+
+/* ELM syndrome */
+#define ELM_SYNDROME_VALID BIT(16)
+
+/* ELM_LOCATION_STATUS Register */
+#define ECC_CORRECTABLE_MASK BIT(8)
+#define ECC_NB_ERRORS_MASK 0x1f
+
+/* ELM_ERROR_LOCATION_0-15 Registers */
+#define ECC_ERROR_LOCATION_MASK 0x1fff
+
+#define ELM_ECC_SIZE 0x7ff
+
+#define SYNDROME_FRAGMENT_REG_SIZE 0x40
+#define ERROR_LOCATION_SIZE 0x100
+
+struct elm_registers {
+ u32 elm_irqenable;
+ u32 elm_sysconfig;
+ u32 elm_location_config;
+ u32 elm_page_ctrl;
+ u32 elm_syndrome_fragment_6[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_5[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_4[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_3[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_2[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_1[ERROR_VECTOR_MAX];
+ u32 elm_syndrome_fragment_0[ERROR_VECTOR_MAX];
+};
+
+struct elm_info {
+ struct device *dev;
+ void __iomem *elm_base;
+ struct completion elm_completion;
+ struct list_head list;
+ enum bch_ecc bch_type;
+ struct elm_registers elm_regs;
+ int ecc_steps;
+ int ecc_syndrome_size;
+};
+
+static LIST_HEAD(elm_devices);
+
+static void elm_write_reg(struct elm_info *info, int offset, u32 val)
+{
+ writel(val, info->elm_base + offset);
+}
+
+static u32 elm_read_reg(struct elm_info *info, int offset)
+{
+ return readl(info->elm_base + offset);
+}
+
+/**
+ * elm_config - Configure ELM module
+ * @dev: ELM device
+ * @bch_type: Type of BCH ecc
+ * @ecc_steps: ECC steps to assign to config
+ * @ecc_step_size: ECC step size to assign to config
+ * @ecc_syndrome_size: ECC syndrome size to assign to config
+ */
+int elm_config(struct device *dev, enum bch_ecc bch_type,
+ int ecc_steps, int ecc_step_size, int ecc_syndrome_size)
+{
+ u32 reg_val;
+ struct elm_info *info = dev_get_drvdata(dev);
+
+ if (!info) {
+ dev_err(dev, "Unable to configure elm - device not probed?\n");
+ return -EPROBE_DEFER;
+ }
+ /* ELM cannot detect ECC errors for chunks > 1KB */
+ if (ecc_step_size > ((ELM_ECC_SIZE + 1) / 2)) {
+ dev_err(dev, "unsupported config ecc-size=%d\n", ecc_step_size);
+ return -EINVAL;
+ }
+ /* ELM support 8 error syndrome process */
+ if (ecc_steps > ERROR_VECTOR_MAX && ecc_steps % ERROR_VECTOR_MAX) {
+ dev_err(dev, "unsupported config ecc-step=%d\n", ecc_steps);
+ return -EINVAL;
+ }
+
+ reg_val = (bch_type & ECC_BCH_LEVEL_MASK) | (ELM_ECC_SIZE << 16);
+ elm_write_reg(info, ELM_LOCATION_CONFIG, reg_val);
+ info->bch_type = bch_type;
+ info->ecc_steps = ecc_steps;
+ info->ecc_syndrome_size = ecc_syndrome_size;
+
+ return 0;
+}
+EXPORT_SYMBOL(elm_config);
+
+/**
+ * elm_configure_page_mode - Enable/Disable page mode
+ * @info: elm info
+ * @index: index number of syndrome fragment vector
+ * @enable: enable/disable flag for page mode
+ *
+ * Enable page mode for syndrome fragment index
+ */
+static void elm_configure_page_mode(struct elm_info *info, int index,
+ bool enable)
+{
+ u32 reg_val;
+
+ reg_val = elm_read_reg(info, ELM_PAGE_CTRL);
+ if (enable)
+ reg_val |= BIT(index); /* enable page mode */
+ else
+ reg_val &= ~BIT(index); /* disable page mode */
+
+ elm_write_reg(info, ELM_PAGE_CTRL, reg_val);
+}
+
+/**
+ * elm_load_syndrome - Load ELM syndrome reg
+ * @info: elm info
+ * @err_vec: elm error vectors
+ * @ecc: buffer with calculated ecc
+ *
+ * Load syndrome fragment registers with calculated ecc in reverse order.
+ */
+static void elm_load_syndrome(struct elm_info *info,
+ struct elm_errorvec *err_vec, u8 *ecc)
+{
+ int i, offset;
+ u32 val;
+
+ for (i = 0; i < info->ecc_steps; i++) {
+
+ /* Check error reported */
+ if (err_vec[i].error_reported) {
+ elm_configure_page_mode(info, i, true);
+ offset = ELM_SYNDROME_FRAGMENT_0 +
+ SYNDROME_FRAGMENT_REG_SIZE * i;
+ switch (info->bch_type) {
+ case BCH8_ECC:
+ /* syndrome fragment 0 = ecc[9-12B] */
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[9]);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 1 = ecc[5-8B] */
+ offset += 4;
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[5]);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 2 = ecc[1-4B] */
+ offset += 4;
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[1]);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 3 = ecc[0B] */
+ offset += 4;
+ val = ecc[0];
+ elm_write_reg(info, offset, val);
+ break;
+ case BCH4_ECC:
+ /* syndrome fragment 0 = ecc[20-52b] bits */
+ val = ((__force u32)cpu_to_be32(*(u32 *)&ecc[3]) >> 4) |
+ ((ecc[2] & 0xf) << 28);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 1 = ecc[0-20b] bits */
+ offset += 4;
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 12;
+ elm_write_reg(info, offset, val);
+ break;
+ case BCH16_ECC:
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[22]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[18]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[14]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[10]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[6]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[2]);
+ elm_write_reg(info, offset, val);
+ offset += 4;
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 16;
+ elm_write_reg(info, offset, val);
+ break;
+ default:
+ pr_err("invalid config bch_type\n");
+ }
+ }
+
+ /* Update ecc pointer with ecc byte size */
+ ecc += info->ecc_syndrome_size;
+ }
+}
+
+/**
+ * elm_start_processing - start elm syndrome processing
+ * @info: elm info
+ * @err_vec: elm error vectors
+ *
+ * Set syndrome valid bit for syndrome fragment registers for which
+ * elm syndrome fragment registers are loaded. This enables elm module
+ * to start processing syndrome vectors.
+ */
+static void elm_start_processing(struct elm_info *info,
+ struct elm_errorvec *err_vec)
+{
+ int i, offset;
+ u32 reg_val;
+
+ /*
+ * Set syndrome vector valid, so that ELM module
+ * will process it for vectors error is reported
+ */
+ for (i = 0; i < info->ecc_steps; i++) {
+ if (err_vec[i].error_reported) {
+ offset = ELM_SYNDROME_FRAGMENT_6 +
+ SYNDROME_FRAGMENT_REG_SIZE * i;
+ reg_val = elm_read_reg(info, offset);
+ reg_val |= ELM_SYNDROME_VALID;
+ elm_write_reg(info, offset, reg_val);
+ }
+ }
+}
+
+/**
+ * elm_error_correction - locate correctable error position
+ * @info: elm info
+ * @err_vec: elm error vectors
+ *
+ * On completion of processing by elm module, error location status
+ * register updated with correctable/uncorrectable error information.
+ * In case of correctable errors, number of errors located from
+ * elm location status register & read the positions from
+ * elm error location register.
+ */
+static void elm_error_correction(struct elm_info *info,
+ struct elm_errorvec *err_vec)
+{
+ int i, j;
+ int offset;
+ u32 reg_val;
+
+ for (i = 0; i < info->ecc_steps; i++) {
+
+ /* Check error reported */
+ if (err_vec[i].error_reported) {
+ offset = ELM_LOCATION_STATUS + ERROR_LOCATION_SIZE * i;
+ reg_val = elm_read_reg(info, offset);
+
+ /* Check correctable error or not */
+ if (reg_val & ECC_CORRECTABLE_MASK) {
+ offset = ELM_ERROR_LOCATION_0 +
+ ERROR_LOCATION_SIZE * i;
+
+ /* Read count of correctable errors */
+ err_vec[i].error_count = reg_val &
+ ECC_NB_ERRORS_MASK;
+
+ /* Update the error locations in error vector */
+ for (j = 0; j < err_vec[i].error_count; j++) {
+
+ reg_val = elm_read_reg(info, offset);
+ err_vec[i].error_loc[j] = reg_val &
+ ECC_ERROR_LOCATION_MASK;
+
+ /* Update error location register */
+ offset += 4;
+ }
+ } else {
+ err_vec[i].error_uncorrectable = true;
+ }
+
+ /* Clearing interrupts for processed error vectors */
+ elm_write_reg(info, ELM_IRQSTATUS, BIT(i));
+
+ /* Disable page mode */
+ elm_configure_page_mode(info, i, false);
+ }
+ }
+}
+
+/**
+ * elm_decode_bch_error_page - Locate error position
+ * @dev: device pointer
+ * @ecc_calc: calculated ECC bytes from GPMC
+ * @err_vec: elm error vectors
+ *
+ * Called with one or more error reported vectors & vectors with
+ * error reported is updated in err_vec[].error_reported
+ */
+void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc,
+ struct elm_errorvec *err_vec)
+{
+ struct elm_info *info = dev_get_drvdata(dev);
+ u32 reg_val;
+
+ /* Enable page mode interrupt */
+ reg_val = elm_read_reg(info, ELM_IRQSTATUS);
+ elm_write_reg(info, ELM_IRQSTATUS, reg_val & INTR_STATUS_PAGE_VALID);
+ elm_write_reg(info, ELM_IRQENABLE, INTR_EN_PAGE_MASK);
+
+ /* Load valid ecc byte to syndrome fragment register */
+ elm_load_syndrome(info, err_vec, ecc_calc);
+
+ /* Enable syndrome processing for which syndrome fragment is updated */
+ elm_start_processing(info, err_vec);
+
+ /* Wait for ELM module to finish locating error correction */
+ wait_for_completion(&info->elm_completion);
+
+ /* Disable page mode interrupt */
+ reg_val = elm_read_reg(info, ELM_IRQENABLE);
+ elm_write_reg(info, ELM_IRQENABLE, reg_val & ~INTR_EN_PAGE_MASK);
+ elm_error_correction(info, err_vec);
+}
+EXPORT_SYMBOL(elm_decode_bch_error_page);
+
+static irqreturn_t elm_isr(int this_irq, void *dev_id)
+{
+ u32 reg_val;
+ struct elm_info *info = dev_id;
+
+ reg_val = elm_read_reg(info, ELM_IRQSTATUS);
+
+ /* All error vectors processed */
+ if (reg_val & INTR_STATUS_PAGE_VALID) {
+ elm_write_reg(info, ELM_IRQSTATUS,
+ reg_val & INTR_STATUS_PAGE_VALID);
+ complete(&info->elm_completion);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int elm_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct elm_info *info;
+ int irq;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = &pdev->dev;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ info->elm_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(info->elm_base))
+ return PTR_ERR(info->elm_base);
+
+ ret = devm_request_irq(&pdev->dev, irq, elm_isr, 0,
+ pdev->name, info);
+ if (ret) {
+ dev_err(&pdev->dev, "failure requesting %d\n", irq);
+ return ret;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ if (pm_runtime_get_sync(&pdev->dev) < 0) {
+ ret = -EINVAL;
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ dev_err(&pdev->dev, "can't enable clock\n");
+ return ret;
+ }
+
+ init_completion(&info->elm_completion);
+ INIT_LIST_HEAD(&info->list);
+ list_add(&info->list, &elm_devices);
+ platform_set_drvdata(pdev, info);
+ return ret;
+}
+
+static void elm_remove(struct platform_device *pdev)
+{
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+/*
+ * elm_context_save
+ * saves ELM configurations to preserve them across Hardware powered-down
+ */
+static int elm_context_save(struct elm_info *info)
+{
+ struct elm_registers *regs = &info->elm_regs;
+ enum bch_ecc bch_type = info->bch_type;
+ u32 offset = 0, i;
+
+ regs->elm_irqenable = elm_read_reg(info, ELM_IRQENABLE);
+ regs->elm_sysconfig = elm_read_reg(info, ELM_SYSCONFIG);
+ regs->elm_location_config = elm_read_reg(info, ELM_LOCATION_CONFIG);
+ regs->elm_page_ctrl = elm_read_reg(info, ELM_PAGE_CTRL);
+ for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+ offset = i * SYNDROME_FRAGMENT_REG_SIZE;
+ switch (bch_type) {
+ case BCH16_ECC:
+ regs->elm_syndrome_fragment_6[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_6 + offset);
+ regs->elm_syndrome_fragment_5[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_5 + offset);
+ regs->elm_syndrome_fragment_4[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_4 + offset);
+ fallthrough;
+ case BCH8_ECC:
+ regs->elm_syndrome_fragment_3[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_3 + offset);
+ regs->elm_syndrome_fragment_2[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_2 + offset);
+ fallthrough;
+ case BCH4_ECC:
+ regs->elm_syndrome_fragment_1[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_1 + offset);
+ regs->elm_syndrome_fragment_0[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_0 + offset);
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* ELM SYNDROME_VALID bit in SYNDROME_FRAGMENT_6[] needs
+ * to be saved for all BCH schemes*/
+ regs->elm_syndrome_fragment_6[i] = elm_read_reg(info,
+ ELM_SYNDROME_FRAGMENT_6 + offset);
+ }
+ return 0;
+}
+
+/*
+ * elm_context_restore
+ * writes configurations saved duing power-down back into ELM registers
+ */
+static int elm_context_restore(struct elm_info *info)
+{
+ struct elm_registers *regs = &info->elm_regs;
+ enum bch_ecc bch_type = info->bch_type;
+ u32 offset = 0, i;
+
+ elm_write_reg(info, ELM_IRQENABLE, regs->elm_irqenable);
+ elm_write_reg(info, ELM_SYSCONFIG, regs->elm_sysconfig);
+ elm_write_reg(info, ELM_LOCATION_CONFIG, regs->elm_location_config);
+ elm_write_reg(info, ELM_PAGE_CTRL, regs->elm_page_ctrl);
+ for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+ offset = i * SYNDROME_FRAGMENT_REG_SIZE;
+ switch (bch_type) {
+ case BCH16_ECC:
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_6 + offset,
+ regs->elm_syndrome_fragment_6[i]);
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_5 + offset,
+ regs->elm_syndrome_fragment_5[i]);
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_4 + offset,
+ regs->elm_syndrome_fragment_4[i]);
+ fallthrough;
+ case BCH8_ECC:
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_3 + offset,
+ regs->elm_syndrome_fragment_3[i]);
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_2 + offset,
+ regs->elm_syndrome_fragment_2[i]);
+ fallthrough;
+ case BCH4_ECC:
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_1 + offset,
+ regs->elm_syndrome_fragment_1[i]);
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset,
+ regs->elm_syndrome_fragment_0[i]);
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* ELM_SYNDROME_VALID bit to be set in last to trigger FSM */
+ elm_write_reg(info, ELM_SYNDROME_FRAGMENT_6 + offset,
+ regs->elm_syndrome_fragment_6[i] &
+ ELM_SYNDROME_VALID);
+ }
+ return 0;
+}
+
+static int elm_suspend(struct device *dev)
+{
+ struct elm_info *info = dev_get_drvdata(dev);
+ elm_context_save(info);
+ pm_runtime_put_sync(dev);
+ return 0;
+}
+
+static int elm_resume(struct device *dev)
+{
+ struct elm_info *info = dev_get_drvdata(dev);
+ pm_runtime_get_sync(dev);
+ elm_context_restore(info);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(elm_pm_ops, elm_suspend, elm_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id elm_of_match[] = {
+ { .compatible = "ti,am3352-elm" },
+ { .compatible = "ti,am64-elm" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, elm_of_match);
+#endif
+
+static struct platform_driver elm_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(elm_of_match),
+ .pm = &elm_pm_ops,
+ },
+ .probe = elm_probe,
+ .remove_new = elm_remove,
+};
+
+module_platform_driver(elm_driver);
+
+MODULE_DESCRIPTION("ELM driver for BCH error correction");
+MODULE_AUTHOR("Texas Instruments");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/orion_nand.c b/drivers/mtd/nand/raw/orion_nand.c
new file mode 100644
index 0000000000..2951d81614
--- /dev/null
+++ b/drivers/mtd/nand/raw/orion_nand.c
@@ -0,0 +1,229 @@
+/*
+ * NAND support for Marvell Orion SoC platforms
+ *
+ * Tzachi Perelstein <tzachi@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/sizes.h>
+#include <linux/platform_data/mtd-orion_nand.h>
+
+struct orion_nand_info {
+ struct nand_controller controller;
+ struct nand_chip chip;
+ struct clk *clk;
+};
+
+static void orion_nand_cmd_ctrl(struct nand_chip *nc, int cmd,
+ unsigned int ctrl)
+{
+ struct orion_nand_data *board = nand_get_controller_data(nc);
+ u32 offs;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ offs = (1 << board->cle);
+ else if (ctrl & NAND_ALE)
+ offs = (1 << board->ale);
+ else
+ return;
+
+ if (nc->options & NAND_BUSWIDTH_16)
+ offs <<= 1;
+
+ writeb(cmd, nc->legacy.IO_ADDR_W + offs);
+}
+
+static void orion_nand_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
+{
+ void __iomem *io_base = chip->legacy.IO_ADDR_R;
+#if defined(__LINUX_ARM_ARCH__) && __LINUX_ARM_ARCH__ >= 5
+ uint64_t *buf64;
+#endif
+ int i = 0;
+
+ while (len && (unsigned long)buf & 7) {
+ *buf++ = readb(io_base);
+ len--;
+ }
+#if defined(__LINUX_ARM_ARCH__) && __LINUX_ARM_ARCH__ >= 5
+ buf64 = (uint64_t *)buf;
+ while (i < len/8) {
+ /*
+ * Since GCC has no proper constraint (PR 43518)
+ * force x variable to r2/r3 registers as ldrd instruction
+ * requires first register to be even.
+ */
+ register uint64_t x asm ("r2");
+
+ asm volatile ("ldrd\t%0, [%1]" : "=&r" (x) : "r" (io_base));
+ buf64[i++] = x;
+ }
+ i *= 8;
+#else
+ readsl(io_base, buf, len/4);
+ i = len / 4 * 4;
+#endif
+ while (i < len)
+ buf[i++] = readb(io_base);
+}
+
+static int orion_nand_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+ chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ return 0;
+}
+
+static const struct nand_controller_ops orion_nand_ops = {
+ .attach_chip = orion_nand_attach_chip,
+};
+
+static int __init orion_nand_probe(struct platform_device *pdev)
+{
+ struct orion_nand_info *info;
+ struct mtd_info *mtd;
+ struct nand_chip *nc;
+ struct orion_nand_data *board;
+ void __iomem *io_base;
+ int ret = 0;
+ u32 val = 0;
+
+ info = devm_kzalloc(&pdev->dev,
+ sizeof(struct orion_nand_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+ nc = &info->chip;
+ mtd = nand_to_mtd(nc);
+
+ nand_controller_init(&info->controller);
+ info->controller.ops = &orion_nand_ops;
+ nc->controller = &info->controller;
+
+ io_base = devm_platform_ioremap_resource(pdev, 0);
+
+ if (IS_ERR(io_base))
+ return PTR_ERR(io_base);
+
+ if (pdev->dev.of_node) {
+ board = devm_kzalloc(&pdev->dev, sizeof(struct orion_nand_data),
+ GFP_KERNEL);
+ if (!board)
+ return -ENOMEM;
+ if (!of_property_read_u32(pdev->dev.of_node, "cle", &val))
+ board->cle = (u8)val;
+ else
+ board->cle = 0;
+ if (!of_property_read_u32(pdev->dev.of_node, "ale", &val))
+ board->ale = (u8)val;
+ else
+ board->ale = 1;
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "bank-width", &val))
+ board->width = (u8)val * 8;
+ else
+ board->width = 8;
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "chip-delay", &val))
+ board->chip_delay = (u8)val;
+ } else {
+ board = dev_get_platdata(&pdev->dev);
+ }
+
+ mtd->dev.parent = &pdev->dev;
+
+ nand_set_controller_data(nc, board);
+ nand_set_flash_node(nc, pdev->dev.of_node);
+ nc->legacy.IO_ADDR_R = nc->legacy.IO_ADDR_W = io_base;
+ nc->legacy.cmd_ctrl = orion_nand_cmd_ctrl;
+ nc->legacy.read_buf = orion_nand_read_buf;
+
+ if (board->chip_delay)
+ nc->legacy.chip_delay = board->chip_delay;
+
+ WARN(board->width > 16,
+ "%d bit bus width out of range",
+ board->width);
+
+ if (board->width == 16)
+ nc->options |= NAND_BUSWIDTH_16;
+
+ platform_set_drvdata(pdev, info);
+
+ /* Not all platforms can gate the clock, so it is optional. */
+ info->clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
+ if (IS_ERR(info->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(info->clk),
+ "failed to get and enable clock!\n");
+
+ /*
+ * This driver assumes that the default ECC engine should be TYPE_SOFT.
+ * Set ->engine_type before registering the NAND devices in order to
+ * provide a driver specific default value.
+ */
+ nc->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
+ ret = nand_scan(nc, 1);
+ if (ret)
+ return ret;
+
+ mtd->name = "orion_nand";
+ ret = mtd_device_register(mtd, board->parts, board->nr_parts);
+ if (ret)
+ nand_cleanup(nc);
+
+ return ret;
+}
+
+static void orion_nand_remove(struct platform_device *pdev)
+{
+ struct orion_nand_info *info = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &info->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+
+ nand_cleanup(chip);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id orion_nand_of_match_table[] = {
+ { .compatible = "marvell,orion-nand", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, orion_nand_of_match_table);
+#endif
+
+static struct platform_driver orion_nand_driver = {
+ .remove_new = orion_nand_remove,
+ .driver = {
+ .name = "orion_nand",
+ .of_match_table = of_match_ptr(orion_nand_of_match_table),
+ },
+};
+
+module_platform_driver_probe(orion_nand_driver, orion_nand_probe);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tzachi Perelstein");
+MODULE_DESCRIPTION("NAND glue for Orion platforms");
+MODULE_ALIAS("platform:orion_nand");
diff --git a/drivers/mtd/nand/raw/pasemi_nand.c b/drivers/mtd/nand/raw/pasemi_nand.c
new file mode 100644
index 0000000000..19b2c9d258
--- /dev/null
+++ b/drivers/mtd/nand/raw/pasemi_nand.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2006-2007 PA Semi, Inc
+ *
+ * Author: Egor Martovetsky <egor@pasemi.com>
+ * Maintained by: Olof Johansson <olof@lixom.net>
+ *
+ * Driver for the PWRficient onchip NAND flash interface
+ */
+
+#undef DEBUG
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+
+#include <asm/io.h>
+
+#define LBICTRL_LPCCTL_NR 0x00004000
+#define CLE_PIN_CTL 15
+#define ALE_PIN_CTL 14
+
+struct pasemi_ddata {
+ struct nand_chip chip;
+ unsigned int lpcctl;
+ struct nand_controller controller;
+};
+
+static const char driver_name[] = "pasemi-nand";
+
+static void pasemi_read_buf(struct nand_chip *chip, u_char *buf, int len)
+{
+ while (len > 0x800) {
+ memcpy_fromio(buf, chip->legacy.IO_ADDR_R, 0x800);
+ buf += 0x800;
+ len -= 0x800;
+ }
+ memcpy_fromio(buf, chip->legacy.IO_ADDR_R, len);
+}
+
+static void pasemi_write_buf(struct nand_chip *chip, const u_char *buf,
+ int len)
+{
+ while (len > 0x800) {
+ memcpy_toio(chip->legacy.IO_ADDR_R, buf, 0x800);
+ buf += 0x800;
+ len -= 0x800;
+ }
+ memcpy_toio(chip->legacy.IO_ADDR_R, buf, len);
+}
+
+static void pasemi_hwcontrol(struct nand_chip *chip, int cmd,
+ unsigned int ctrl)
+{
+ struct pasemi_ddata *ddata = container_of(chip, struct pasemi_ddata, chip);
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ out_8(chip->legacy.IO_ADDR_W + (1 << CLE_PIN_CTL), cmd);
+ else
+ out_8(chip->legacy.IO_ADDR_W + (1 << ALE_PIN_CTL), cmd);
+
+ /* Push out posted writes */
+ eieio();
+ inl(ddata->lpcctl);
+}
+
+static int pasemi_device_ready(struct nand_chip *chip)
+{
+ struct pasemi_ddata *ddata = container_of(chip, struct pasemi_ddata, chip);
+
+ return !!(inl(ddata->lpcctl) & LBICTRL_LPCCTL_NR);
+}
+
+static int pasemi_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+ chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ return 0;
+}
+
+static const struct nand_controller_ops pasemi_ops = {
+ .attach_chip = pasemi_attach_chip,
+};
+
+static int pasemi_nand_probe(struct platform_device *ofdev)
+{
+ struct device *dev = &ofdev->dev;
+ struct pci_dev *pdev;
+ struct device_node *np = dev->of_node;
+ struct resource res;
+ struct nand_chip *chip;
+ struct nand_controller *controller;
+ int err = 0;
+ struct pasemi_ddata *ddata;
+ struct mtd_info *pasemi_nand_mtd;
+
+ err = of_address_to_resource(np, 0, &res);
+
+ if (err)
+ return -EINVAL;
+
+ dev_dbg(dev, "pasemi_nand at %pR\n", &res);
+
+ /* Allocate memory for MTD device structure and private data */
+ ddata = kzalloc(sizeof(*ddata), GFP_KERNEL);
+ if (!ddata) {
+ err = -ENOMEM;
+ goto out;
+ }
+ platform_set_drvdata(ofdev, ddata);
+ chip = &ddata->chip;
+ controller = &ddata->controller;
+
+ controller->ops = &pasemi_ops;
+ nand_controller_init(controller);
+ chip->controller = controller;
+
+ pasemi_nand_mtd = nand_to_mtd(chip);
+
+ /* Link the private data with the MTD structure */
+ pasemi_nand_mtd->dev.parent = dev;
+
+ chip->legacy.IO_ADDR_R = of_iomap(np, 0);
+ chip->legacy.IO_ADDR_W = chip->legacy.IO_ADDR_R;
+
+ if (!chip->legacy.IO_ADDR_R) {
+ err = -EIO;
+ goto out_mtd;
+ }
+
+ pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa008, NULL);
+ if (!pdev) {
+ err = -ENODEV;
+ goto out_ior;
+ }
+
+ ddata->lpcctl = pci_resource_start(pdev, 0);
+ pci_dev_put(pdev);
+
+ if (!request_region(ddata->lpcctl, 4, driver_name)) {
+ err = -EBUSY;
+ goto out_ior;
+ }
+
+ chip->legacy.cmd_ctrl = pasemi_hwcontrol;
+ chip->legacy.dev_ready = pasemi_device_ready;
+ chip->legacy.read_buf = pasemi_read_buf;
+ chip->legacy.write_buf = pasemi_write_buf;
+ chip->legacy.chip_delay = 0;
+
+ /* Enable the following for a flash based bad block table */
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+
+ /*
+ * This driver assumes that the default ECC engine should be TYPE_SOFT.
+ * Set ->engine_type before registering the NAND devices in order to
+ * provide a driver specific default value.
+ */
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
+ /* Scan to find existence of the device */
+ err = nand_scan(chip, 1);
+ if (err)
+ goto out_lpc;
+
+ if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) {
+ dev_err(dev, "Unable to register MTD device\n");
+ err = -ENODEV;
+ goto out_cleanup_nand;
+ }
+
+ dev_info(dev, "PA Semi NAND flash at %pR, control at I/O %x\n", &res,
+ ddata->lpcctl);
+
+ return 0;
+
+ out_cleanup_nand:
+ nand_cleanup(chip);
+ out_lpc:
+ release_region(ddata->lpcctl, 4);
+ out_ior:
+ iounmap(chip->legacy.IO_ADDR_R);
+ out_mtd:
+ kfree(ddata);
+ out:
+ return err;
+}
+
+static void pasemi_nand_remove(struct platform_device *ofdev)
+{
+ struct pasemi_ddata *ddata = platform_get_drvdata(ofdev);
+ struct mtd_info *pasemi_nand_mtd;
+ int ret;
+ struct nand_chip *chip;
+
+ chip = &ddata->chip;
+ pasemi_nand_mtd = nand_to_mtd(chip);
+
+ /* Release resources, unregister device */
+ ret = mtd_device_unregister(pasemi_nand_mtd);
+ WARN_ON(ret);
+ nand_cleanup(chip);
+
+ release_region(ddata->lpcctl, 4);
+
+ iounmap(chip->legacy.IO_ADDR_R);
+
+ /* Free the MTD device structure */
+ kfree(ddata);
+}
+
+static const struct of_device_id pasemi_nand_match[] =
+{
+ {
+ .compatible = "pasemi,localbus-nand",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, pasemi_nand_match);
+
+static struct platform_driver pasemi_nand_driver =
+{
+ .driver = {
+ .name = driver_name,
+ .of_match_table = pasemi_nand_match,
+ },
+ .probe = pasemi_nand_probe,
+ .remove_new = pasemi_nand_remove,
+};
+
+module_platform_driver(pasemi_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
+MODULE_DESCRIPTION("NAND flash interface driver for PA Semi PWRficient");
diff --git a/drivers/mtd/nand/raw/pl35x-nand-controller.c b/drivers/mtd/nand/raw/pl35x-nand-controller.c
new file mode 100644
index 0000000000..c506e92a3e
--- /dev/null
+++ b/drivers/mtd/nand/raw/pl35x-nand-controller.c
@@ -0,0 +1,1199 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM PL35X NAND flash controller driver
+ *
+ * Copyright (C) 2017 Xilinx, Inc
+ * Author:
+ * Miquel Raynal <miquel.raynal@bootlin.com>
+ * Original work (rewritten):
+ * Punnaiah Choudary Kalluri <punnaia@xilinx.com>
+ * Naga Sureshkumar Relli <nagasure@xilinx.com>
+ */
+
+#include <linux/amba/bus.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+
+#define PL35X_NANDC_DRIVER_NAME "pl35x-nand-controller"
+
+/* SMC controller status register (RO) */
+#define PL35X_SMC_MEMC_STATUS 0x0
+#define PL35X_SMC_MEMC_STATUS_RAW_INT_STATUS1 BIT(6)
+/* SMC clear config register (WO) */
+#define PL35X_SMC_MEMC_CFG_CLR 0xC
+#define PL35X_SMC_MEMC_CFG_CLR_INT_DIS_1 BIT(1)
+#define PL35X_SMC_MEMC_CFG_CLR_INT_CLR_1 BIT(4)
+#define PL35X_SMC_MEMC_CFG_CLR_ECC_INT_DIS_1 BIT(6)
+/* SMC direct command register (WO) */
+#define PL35X_SMC_DIRECT_CMD 0x10
+#define PL35X_SMC_DIRECT_CMD_NAND_CS (0x4 << 23)
+#define PL35X_SMC_DIRECT_CMD_UPD_REGS (0x2 << 21)
+/* SMC set cycles register (WO) */
+#define PL35X_SMC_CYCLES 0x14
+#define PL35X_SMC_NAND_TRC_CYCLES(x) ((x) << 0)
+#define PL35X_SMC_NAND_TWC_CYCLES(x) ((x) << 4)
+#define PL35X_SMC_NAND_TREA_CYCLES(x) ((x) << 8)
+#define PL35X_SMC_NAND_TWP_CYCLES(x) ((x) << 11)
+#define PL35X_SMC_NAND_TCLR_CYCLES(x) ((x) << 14)
+#define PL35X_SMC_NAND_TAR_CYCLES(x) ((x) << 17)
+#define PL35X_SMC_NAND_TRR_CYCLES(x) ((x) << 20)
+/* SMC set opmode register (WO) */
+#define PL35X_SMC_OPMODE 0x18
+#define PL35X_SMC_OPMODE_BW_8 0
+#define PL35X_SMC_OPMODE_BW_16 1
+/* SMC ECC status register (RO) */
+#define PL35X_SMC_ECC_STATUS 0x400
+#define PL35X_SMC_ECC_STATUS_ECC_BUSY BIT(6)
+/* SMC ECC configuration register */
+#define PL35X_SMC_ECC_CFG 0x404
+#define PL35X_SMC_ECC_CFG_MODE_MASK 0xC
+#define PL35X_SMC_ECC_CFG_MODE_BYPASS 0
+#define PL35X_SMC_ECC_CFG_MODE_APB BIT(2)
+#define PL35X_SMC_ECC_CFG_MODE_MEM BIT(3)
+#define PL35X_SMC_ECC_CFG_PGSIZE_MASK 0x3
+/* SMC ECC command 1 register */
+#define PL35X_SMC_ECC_CMD1 0x408
+#define PL35X_SMC_ECC_CMD1_WRITE(x) ((x) << 0)
+#define PL35X_SMC_ECC_CMD1_READ(x) ((x) << 8)
+#define PL35X_SMC_ECC_CMD1_READ_END(x) ((x) << 16)
+#define PL35X_SMC_ECC_CMD1_READ_END_VALID(x) ((x) << 24)
+/* SMC ECC command 2 register */
+#define PL35X_SMC_ECC_CMD2 0x40C
+#define PL35X_SMC_ECC_CMD2_WRITE_COL_CHG(x) ((x) << 0)
+#define PL35X_SMC_ECC_CMD2_READ_COL_CHG(x) ((x) << 8)
+#define PL35X_SMC_ECC_CMD2_READ_COL_CHG_END(x) ((x) << 16)
+#define PL35X_SMC_ECC_CMD2_READ_COL_CHG_END_VALID(x) ((x) << 24)
+/* SMC ECC value registers (RO) */
+#define PL35X_SMC_ECC_VALUE(x) (0x418 + (4 * (x)))
+#define PL35X_SMC_ECC_VALUE_IS_CORRECTABLE(x) ((x) & BIT(27))
+#define PL35X_SMC_ECC_VALUE_HAS_FAILED(x) ((x) & BIT(28))
+#define PL35X_SMC_ECC_VALUE_IS_VALID(x) ((x) & BIT(30))
+
+/* NAND AXI interface */
+#define PL35X_SMC_CMD_PHASE 0
+#define PL35X_SMC_CMD_PHASE_CMD0(x) ((x) << 3)
+#define PL35X_SMC_CMD_PHASE_CMD1(x) ((x) << 11)
+#define PL35X_SMC_CMD_PHASE_CMD1_VALID BIT(20)
+#define PL35X_SMC_CMD_PHASE_ADDR(pos, x) ((x) << (8 * (pos)))
+#define PL35X_SMC_CMD_PHASE_NADDRS(x) ((x) << 21)
+#define PL35X_SMC_DATA_PHASE BIT(19)
+#define PL35X_SMC_DATA_PHASE_ECC_LAST BIT(10)
+#define PL35X_SMC_DATA_PHASE_CLEAR_CS BIT(21)
+
+#define PL35X_NAND_MAX_CS 1
+#define PL35X_NAND_LAST_XFER_SZ 4
+#define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP((ps) / 1000, period_ns))
+
+#define PL35X_NAND_ECC_BITS_MASK 0xFFF
+#define PL35X_NAND_ECC_BYTE_OFF_MASK 0x1FF
+#define PL35X_NAND_ECC_BIT_OFF_MASK 0x7
+
+struct pl35x_nand_timings {
+ unsigned int t_rc:4;
+ unsigned int t_wc:4;
+ unsigned int t_rea:3;
+ unsigned int t_wp:3;
+ unsigned int t_clr:3;
+ unsigned int t_ar:3;
+ unsigned int t_rr:4;
+ unsigned int rsvd:8;
+};
+
+struct pl35x_nand {
+ struct list_head node;
+ struct nand_chip chip;
+ unsigned int cs;
+ unsigned int addr_cycles;
+ u32 ecc_cfg;
+ u32 timings;
+};
+
+/**
+ * struct pl35x_nandc - NAND flash controller driver structure
+ * @dev: Kernel device
+ * @conf_regs: SMC configuration registers for command phase
+ * @io_regs: NAND data registers for data phase
+ * @controller: Core NAND controller structure
+ * @chip: NAND chip information structure
+ * @selected_chip: NAND chip currently selected by the controller
+ * @assigned_cs: List of assigned CS
+ * @ecc_buf: Temporary buffer to extract ECC bytes
+ */
+struct pl35x_nandc {
+ struct device *dev;
+ void __iomem *conf_regs;
+ void __iomem *io_regs;
+ struct nand_controller controller;
+ struct list_head chips;
+ struct nand_chip *selected_chip;
+ unsigned long assigned_cs;
+ u8 *ecc_buf;
+};
+
+static inline struct pl35x_nandc *to_pl35x_nandc(struct nand_controller *ctrl)
+{
+ return container_of(ctrl, struct pl35x_nandc, controller);
+}
+
+static inline struct pl35x_nand *to_pl35x_nand(struct nand_chip *chip)
+{
+ return container_of(chip, struct pl35x_nand, chip);
+}
+
+static int pl35x_ecc_ooblayout16_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * chip->ecc.bytes);
+ oobregion->length = chip->ecc.bytes;
+
+ return 0;
+}
+
+static int pl35x_ecc_ooblayout16_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * chip->ecc.bytes) + 8;
+ oobregion->length = 8;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops pl35x_ecc_ooblayout16_ops = {
+ .ecc = pl35x_ecc_ooblayout16_ecc,
+ .free = pl35x_ecc_ooblayout16_free,
+};
+
+/* Generic flash bbt decriptors */
+static u8 bbt_pattern[] = { 'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = { '1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 4,
+ .len = 4,
+ .veroffs = 20,
+ .maxblocks = 4,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 4,
+ .len = 4,
+ .veroffs = 20,
+ .maxblocks = 4,
+ .pattern = mirror_pattern
+};
+
+static void pl35x_smc_update_regs(struct pl35x_nandc *nfc)
+{
+ writel(PL35X_SMC_DIRECT_CMD_NAND_CS |
+ PL35X_SMC_DIRECT_CMD_UPD_REGS,
+ nfc->conf_regs + PL35X_SMC_DIRECT_CMD);
+}
+
+static int pl35x_smc_set_buswidth(struct pl35x_nandc *nfc, unsigned int bw)
+{
+ if (bw != PL35X_SMC_OPMODE_BW_8 && bw != PL35X_SMC_OPMODE_BW_16)
+ return -EINVAL;
+
+ writel(bw, nfc->conf_regs + PL35X_SMC_OPMODE);
+ pl35x_smc_update_regs(nfc);
+
+ return 0;
+}
+
+static void pl35x_smc_clear_irq(struct pl35x_nandc *nfc)
+{
+ writel(PL35X_SMC_MEMC_CFG_CLR_INT_CLR_1,
+ nfc->conf_regs + PL35X_SMC_MEMC_CFG_CLR);
+}
+
+static int pl35x_smc_wait_for_irq(struct pl35x_nandc *nfc)
+{
+ u32 reg;
+ int ret;
+
+ ret = readl_poll_timeout(nfc->conf_regs + PL35X_SMC_MEMC_STATUS, reg,
+ reg & PL35X_SMC_MEMC_STATUS_RAW_INT_STATUS1,
+ 10, 1000000);
+ if (ret)
+ dev_err(nfc->dev,
+ "Timeout polling on NAND controller interrupt (0x%x)\n",
+ reg);
+
+ pl35x_smc_clear_irq(nfc);
+
+ return ret;
+}
+
+static int pl35x_smc_wait_for_ecc_done(struct pl35x_nandc *nfc)
+{
+ u32 reg;
+ int ret;
+
+ ret = readl_poll_timeout(nfc->conf_regs + PL35X_SMC_ECC_STATUS, reg,
+ !(reg & PL35X_SMC_ECC_STATUS_ECC_BUSY),
+ 10, 1000000);
+ if (ret)
+ dev_err(nfc->dev,
+ "Timeout polling on ECC controller interrupt\n");
+
+ return ret;
+}
+
+static int pl35x_smc_set_ecc_mode(struct pl35x_nandc *nfc,
+ struct nand_chip *chip,
+ unsigned int mode)
+{
+ struct pl35x_nand *plnand;
+ u32 ecc_cfg;
+
+ ecc_cfg = readl(nfc->conf_regs + PL35X_SMC_ECC_CFG);
+ ecc_cfg &= ~PL35X_SMC_ECC_CFG_MODE_MASK;
+ ecc_cfg |= mode;
+ writel(ecc_cfg, nfc->conf_regs + PL35X_SMC_ECC_CFG);
+
+ if (chip) {
+ plnand = to_pl35x_nand(chip);
+ plnand->ecc_cfg = ecc_cfg;
+ }
+
+ if (mode != PL35X_SMC_ECC_CFG_MODE_BYPASS)
+ return pl35x_smc_wait_for_ecc_done(nfc);
+
+ return 0;
+}
+
+static void pl35x_smc_force_byte_access(struct nand_chip *chip,
+ bool force_8bit)
+{
+ struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
+ int ret;
+
+ if (!(chip->options & NAND_BUSWIDTH_16))
+ return;
+
+ if (force_8bit)
+ ret = pl35x_smc_set_buswidth(nfc, PL35X_SMC_OPMODE_BW_8);
+ else
+ ret = pl35x_smc_set_buswidth(nfc, PL35X_SMC_OPMODE_BW_16);
+
+ if (ret)
+ dev_err(nfc->dev, "Error in Buswidth\n");
+}
+
+static void pl35x_nand_select_target(struct nand_chip *chip,
+ unsigned int die_nr)
+{
+ struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
+ struct pl35x_nand *plnand = to_pl35x_nand(chip);
+
+ if (chip == nfc->selected_chip)
+ return;
+
+ /* Setup the timings */
+ writel(plnand->timings, nfc->conf_regs + PL35X_SMC_CYCLES);
+ pl35x_smc_update_regs(nfc);
+
+ /* Configure the ECC engine */
+ writel(plnand->ecc_cfg, nfc->conf_regs + PL35X_SMC_ECC_CFG);
+
+ nfc->selected_chip = chip;
+}
+
+static void pl35x_nand_read_data_op(struct nand_chip *chip, u8 *in,
+ unsigned int len, bool force_8bit,
+ unsigned int flags, unsigned int last_flags)
+{
+ struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
+ unsigned int buf_end = len / 4;
+ unsigned int in_start = round_down(len, 4);
+ unsigned int data_phase_addr;
+ u32 *buf32 = (u32 *)in;
+ u8 *buf8 = (u8 *)in;
+ int i;
+
+ if (force_8bit)
+ pl35x_smc_force_byte_access(chip, true);
+
+ for (i = 0; i < buf_end; i++) {
+ data_phase_addr = PL35X_SMC_DATA_PHASE + flags;
+ if (i + 1 == buf_end)
+ data_phase_addr = PL35X_SMC_DATA_PHASE + last_flags;
+
+ buf32[i] = readl(nfc->io_regs + data_phase_addr);
+ }
+
+ /* No working extra flags on unaligned data accesses */
+ for (i = in_start; i < len; i++)
+ buf8[i] = readb(nfc->io_regs + PL35X_SMC_DATA_PHASE);
+
+ if (force_8bit)
+ pl35x_smc_force_byte_access(chip, false);
+}
+
+static void pl35x_nand_write_data_op(struct nand_chip *chip, const u8 *out,
+ int len, bool force_8bit,
+ unsigned int flags,
+ unsigned int last_flags)
+{
+ struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
+ unsigned int buf_end = len / 4;
+ unsigned int in_start = round_down(len, 4);
+ const u32 *buf32 = (const u32 *)out;
+ const u8 *buf8 = (const u8 *)out;
+ unsigned int data_phase_addr;
+ int i;
+
+ if (force_8bit)
+ pl35x_smc_force_byte_access(chip, true);
+
+ for (i = 0; i < buf_end; i++) {
+ data_phase_addr = PL35X_SMC_DATA_PHASE + flags;
+ if (i + 1 == buf_end)
+ data_phase_addr = PL35X_SMC_DATA_PHASE + last_flags;
+
+ writel(buf32[i], nfc->io_regs + data_phase_addr);
+ }
+
+ /* No working extra flags on unaligned data accesses */
+ for (i = in_start; i < len; i++)
+ writeb(buf8[i], nfc->io_regs + PL35X_SMC_DATA_PHASE);
+
+ if (force_8bit)
+ pl35x_smc_force_byte_access(chip, false);
+}
+
+static int pl35x_nand_correct_data(struct pl35x_nandc *nfc, unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ unsigned short ecc_odd, ecc_even, read_ecc_lower, read_ecc_upper;
+ unsigned short calc_ecc_lower, calc_ecc_upper;
+ unsigned short byte_addr, bit_addr;
+
+ read_ecc_lower = (read_ecc[0] | (read_ecc[1] << 8)) &
+ PL35X_NAND_ECC_BITS_MASK;
+ read_ecc_upper = ((read_ecc[1] >> 4) | (read_ecc[2] << 4)) &
+ PL35X_NAND_ECC_BITS_MASK;
+
+ calc_ecc_lower = (calc_ecc[0] | (calc_ecc[1] << 8)) &
+ PL35X_NAND_ECC_BITS_MASK;
+ calc_ecc_upper = ((calc_ecc[1] >> 4) | (calc_ecc[2] << 4)) &
+ PL35X_NAND_ECC_BITS_MASK;
+
+ ecc_odd = read_ecc_lower ^ calc_ecc_lower;
+ ecc_even = read_ecc_upper ^ calc_ecc_upper;
+
+ /* No error */
+ if (likely(!ecc_odd && !ecc_even))
+ return 0;
+
+ /* One error in the main data; to be corrected */
+ if (ecc_odd == (~ecc_even & PL35X_NAND_ECC_BITS_MASK)) {
+ /* Bits [11:3] of error code give the byte offset */
+ byte_addr = (ecc_odd >> 3) & PL35X_NAND_ECC_BYTE_OFF_MASK;
+ /* Bits [2:0] of error code give the bit offset */
+ bit_addr = ecc_odd & PL35X_NAND_ECC_BIT_OFF_MASK;
+ /* Toggle the faulty bit */
+ buf[byte_addr] ^= (BIT(bit_addr));
+
+ return 1;
+ }
+
+ /* One error in the ECC data; no action needed */
+ if (hweight32(ecc_odd | ecc_even) == 1)
+ return 1;
+
+ return -EBADMSG;
+}
+
+static void pl35x_nand_ecc_reg_to_array(struct nand_chip *chip, u32 ecc_reg,
+ u8 *ecc_array)
+{
+ u32 ecc_value = ~ecc_reg;
+ unsigned int ecc_byte;
+
+ for (ecc_byte = 0; ecc_byte < chip->ecc.bytes; ecc_byte++)
+ ecc_array[ecc_byte] = ecc_value >> (8 * ecc_byte);
+}
+
+static int pl35x_nand_read_eccbytes(struct pl35x_nandc *nfc,
+ struct nand_chip *chip, u8 *read_ecc)
+{
+ u32 ecc_value;
+ int chunk;
+
+ for (chunk = 0; chunk < chip->ecc.steps;
+ chunk++, read_ecc += chip->ecc.bytes) {
+ ecc_value = readl(nfc->conf_regs + PL35X_SMC_ECC_VALUE(chunk));
+ if (!PL35X_SMC_ECC_VALUE_IS_VALID(ecc_value))
+ return -EINVAL;
+
+ pl35x_nand_ecc_reg_to_array(chip, ecc_value, read_ecc);
+ }
+
+ return 0;
+}
+
+static int pl35x_nand_recover_data_hwecc(struct pl35x_nandc *nfc,
+ struct nand_chip *chip, u8 *data,
+ u8 *read_ecc)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int max_bitflips = 0, chunk;
+ u8 calc_ecc[3];
+ u32 ecc_value;
+ int stats;
+
+ for (chunk = 0; chunk < chip->ecc.steps;
+ chunk++, data += chip->ecc.size, read_ecc += chip->ecc.bytes) {
+ /* Read ECC value for each chunk */
+ ecc_value = readl(nfc->conf_regs + PL35X_SMC_ECC_VALUE(chunk));
+
+ if (!PL35X_SMC_ECC_VALUE_IS_VALID(ecc_value))
+ return -EINVAL;
+
+ if (PL35X_SMC_ECC_VALUE_HAS_FAILED(ecc_value)) {
+ mtd->ecc_stats.failed++;
+ continue;
+ }
+
+ pl35x_nand_ecc_reg_to_array(chip, ecc_value, calc_ecc);
+ stats = pl35x_nand_correct_data(nfc, data, read_ecc, calc_ecc);
+ if (stats < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stats;
+ max_bitflips = max_t(unsigned int, max_bitflips, stats);
+ }
+ }
+
+ return max_bitflips;
+}
+
+static int pl35x_nand_write_page_hwecc(struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
+ struct pl35x_nand *plnand = to_pl35x_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int first_row = (mtd->writesize <= 512) ? 1 : 2;
+ unsigned int nrows = plnand->addr_cycles;
+ u32 addr1 = 0, addr2 = 0, row;
+ u32 cmd_addr;
+ int i, ret;
+ u8 status;
+
+ ret = pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_APB);
+ if (ret)
+ return ret;
+
+ cmd_addr = PL35X_SMC_CMD_PHASE |
+ PL35X_SMC_CMD_PHASE_NADDRS(plnand->addr_cycles) |
+ PL35X_SMC_CMD_PHASE_CMD0(NAND_CMD_SEQIN);
+
+ for (i = 0, row = first_row; row < nrows; i++, row++) {
+ u8 addr = page >> ((i * 8) & 0xFF);
+
+ if (row < 4)
+ addr1 |= PL35X_SMC_CMD_PHASE_ADDR(row, addr);
+ else
+ addr2 |= PL35X_SMC_CMD_PHASE_ADDR(row - 4, addr);
+ }
+
+ /* Send the command and address cycles */
+ writel(addr1, nfc->io_regs + cmd_addr);
+ if (plnand->addr_cycles > 4)
+ writel(addr2, nfc->io_regs + cmd_addr);
+
+ /* Write the data with the engine enabled */
+ pl35x_nand_write_data_op(chip, buf, mtd->writesize, false,
+ 0, PL35X_SMC_DATA_PHASE_ECC_LAST);
+ ret = pl35x_smc_wait_for_ecc_done(nfc);
+ if (ret)
+ goto disable_ecc_engine;
+
+ /* Copy the HW calculated ECC bytes in the OOB buffer */
+ ret = pl35x_nand_read_eccbytes(nfc, chip, nfc->ecc_buf);
+ if (ret)
+ goto disable_ecc_engine;
+
+ if (!oob_required)
+ memset(chip->oob_poi, 0xFF, mtd->oobsize);
+
+ ret = mtd_ooblayout_set_eccbytes(mtd, nfc->ecc_buf, chip->oob_poi,
+ 0, chip->ecc.total);
+ if (ret)
+ goto disable_ecc_engine;
+
+ /* Write the spare area with ECC bytes */
+ pl35x_nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false, 0,
+ PL35X_SMC_CMD_PHASE_CMD1(NAND_CMD_PAGEPROG) |
+ PL35X_SMC_CMD_PHASE_CMD1_VALID |
+ PL35X_SMC_DATA_PHASE_CLEAR_CS);
+ ret = pl35x_smc_wait_for_irq(nfc);
+ if (ret)
+ goto disable_ecc_engine;
+
+ /* Check write status on the chip side */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ goto disable_ecc_engine;
+
+ if (status & NAND_STATUS_FAIL)
+ ret = -EIO;
+
+disable_ecc_engine:
+ pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS);
+
+ return ret;
+}
+
+/*
+ * This functions reads data and checks the data integrity by comparing hardware
+ * generated ECC values and read ECC values from spare area.
+ *
+ * There is a limitation with SMC controller: ECC_LAST must be set on the
+ * last data access to tell the ECC engine not to expect any further data.
+ * In practice, this implies to shrink the last data transfert by eg. 4 bytes,
+ * and doing a last 4-byte transfer with the additional bit set. The last block
+ * should be aligned with the end of an ECC block. Because of this limitation,
+ * it is not possible to use the core routines.
+ */
+static int pl35x_nand_read_page_hwecc(struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
+ struct pl35x_nand *plnand = to_pl35x_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int first_row = (mtd->writesize <= 512) ? 1 : 2;
+ unsigned int nrows = plnand->addr_cycles;
+ unsigned int addr1 = 0, addr2 = 0, row;
+ u32 cmd_addr;
+ int i, ret;
+
+ ret = pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_APB);
+ if (ret)
+ return ret;
+
+ cmd_addr = PL35X_SMC_CMD_PHASE |
+ PL35X_SMC_CMD_PHASE_NADDRS(plnand->addr_cycles) |
+ PL35X_SMC_CMD_PHASE_CMD0(NAND_CMD_READ0) |
+ PL35X_SMC_CMD_PHASE_CMD1(NAND_CMD_READSTART) |
+ PL35X_SMC_CMD_PHASE_CMD1_VALID;
+
+ for (i = 0, row = first_row; row < nrows; i++, row++) {
+ u8 addr = page >> ((i * 8) & 0xFF);
+
+ if (row < 4)
+ addr1 |= PL35X_SMC_CMD_PHASE_ADDR(row, addr);
+ else
+ addr2 |= PL35X_SMC_CMD_PHASE_ADDR(row - 4, addr);
+ }
+
+ /* Send the command and address cycles */
+ writel(addr1, nfc->io_regs + cmd_addr);
+ if (plnand->addr_cycles > 4)
+ writel(addr2, nfc->io_regs + cmd_addr);
+
+ /* Wait the data to be available in the NAND cache */
+ ndelay(PSEC_TO_NSEC(sdr->tRR_min));
+ ret = pl35x_smc_wait_for_irq(nfc);
+ if (ret)
+ goto disable_ecc_engine;
+
+ /* Retrieve the raw data with the engine enabled */
+ pl35x_nand_read_data_op(chip, buf, mtd->writesize, false,
+ 0, PL35X_SMC_DATA_PHASE_ECC_LAST);
+ ret = pl35x_smc_wait_for_ecc_done(nfc);
+ if (ret)
+ goto disable_ecc_engine;
+
+ /* Retrieve the stored ECC bytes */
+ pl35x_nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
+ 0, PL35X_SMC_DATA_PHASE_CLEAR_CS);
+ ret = mtd_ooblayout_get_eccbytes(mtd, nfc->ecc_buf, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ goto disable_ecc_engine;
+
+ pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS);
+
+ /* Correct the data and report failures */
+ return pl35x_nand_recover_data_hwecc(nfc, chip, buf, nfc->ecc_buf);
+
+disable_ecc_engine:
+ pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS);
+
+ return ret;
+}
+
+static int pl35x_nand_exec_op(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
+ const struct nand_op_instr *instr, *data_instr = NULL;
+ unsigned int rdy_tim_ms = 0, naddrs = 0, cmds = 0, last_flags = 0;
+ u32 addr1 = 0, addr2 = 0, cmd0 = 0, cmd1 = 0, cmd_addr = 0;
+ unsigned int op_id, len, offset, rdy_del_ns;
+ int last_instr_type = -1;
+ bool cmd1_valid = false;
+ const u8 *addrs;
+ int i, ret;
+
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ instr = &subop->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (!cmds) {
+ cmd0 = PL35X_SMC_CMD_PHASE_CMD0(instr->ctx.cmd.opcode);
+ } else {
+ cmd1 = PL35X_SMC_CMD_PHASE_CMD1(instr->ctx.cmd.opcode);
+ if (last_instr_type != NAND_OP_DATA_OUT_INSTR)
+ cmd1_valid = true;
+ }
+ cmds++;
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+ cmd_addr |= PL35X_SMC_CMD_PHASE_NADDRS(naddrs);
+
+ for (i = offset; i < naddrs; i++) {
+ if (i < 4)
+ addr1 |= PL35X_SMC_CMD_PHASE_ADDR(i, addrs[i]);
+ else
+ addr2 |= PL35X_SMC_CMD_PHASE_ADDR(i - 4, addrs[i]);
+ }
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_DATA_OUT_INSTR:
+ data_instr = instr;
+ len = nand_subop_get_data_len(subop, op_id);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ rdy_tim_ms = instr->ctx.waitrdy.timeout_ms;
+ rdy_del_ns = instr->delay_ns;
+ break;
+ }
+
+ last_instr_type = instr->type;
+ }
+
+ /* Command phase */
+ cmd_addr |= PL35X_SMC_CMD_PHASE | cmd0 | cmd1 |
+ (cmd1_valid ? PL35X_SMC_CMD_PHASE_CMD1_VALID : 0);
+ writel(addr1, nfc->io_regs + cmd_addr);
+ if (naddrs > 4)
+ writel(addr2, nfc->io_regs + cmd_addr);
+
+ /* Data phase */
+ if (data_instr && data_instr->type == NAND_OP_DATA_OUT_INSTR) {
+ last_flags = PL35X_SMC_DATA_PHASE_CLEAR_CS;
+ if (cmds == 2)
+ last_flags |= cmd1 | PL35X_SMC_CMD_PHASE_CMD1_VALID;
+
+ pl35x_nand_write_data_op(chip, data_instr->ctx.data.buf.out,
+ len, data_instr->ctx.data.force_8bit,
+ 0, last_flags);
+ }
+
+ if (rdy_tim_ms) {
+ ndelay(rdy_del_ns);
+ ret = pl35x_smc_wait_for_irq(nfc);
+ if (ret)
+ return ret;
+ }
+
+ if (data_instr && data_instr->type == NAND_OP_DATA_IN_INSTR)
+ pl35x_nand_read_data_op(chip, data_instr->ctx.data.buf.in,
+ len, data_instr->ctx.data.force_8bit,
+ 0, PL35X_SMC_DATA_PHASE_CLEAR_CS);
+
+ return 0;
+}
+
+static const struct nand_op_parser pl35x_nandc_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(pl35x_nand_exec_op,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 7),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 2112)),
+ NAND_OP_PARSER_PATTERN(pl35x_nand_exec_op,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, 7),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 2112),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ NAND_OP_PARSER_PATTERN(pl35x_nand_exec_op,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, 7),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 2112),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ );
+
+static int pl35x_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ if (!check_only)
+ pl35x_nand_select_target(chip, op->cs);
+
+ return nand_op_parser_exec_op(chip, &pl35x_nandc_op_parser,
+ op, check_only);
+}
+
+static int pl35x_nfc_setup_interface(struct nand_chip *chip, int cs,
+ const struct nand_interface_config *conf)
+{
+ struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
+ struct pl35x_nand *plnand = to_pl35x_nand(chip);
+ struct pl35x_nand_timings tmgs = {};
+ const struct nand_sdr_timings *sdr;
+ unsigned int period_ns, val;
+ struct clk *mclk;
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ mclk = of_clk_get_by_name(nfc->dev->parent->of_node, "memclk");
+ if (IS_ERR(mclk)) {
+ dev_err(nfc->dev, "Failed to retrieve SMC memclk\n");
+ return PTR_ERR(mclk);
+ }
+
+ /*
+ * SDR timings are given in pico-seconds while NFC timings must be
+ * expressed in NAND controller clock cycles. We use the TO_CYCLE()
+ * macro to convert from one to the other.
+ */
+ period_ns = NSEC_PER_SEC / clk_get_rate(mclk);
+
+ /*
+ * PL35X SMC needs one extra read cycle in SDR Mode 5. This is not
+ * written anywhere in the datasheet but is an empirical observation.
+ */
+ val = TO_CYCLES(sdr->tRC_min, period_ns);
+ if (sdr->tRC_min <= 20000)
+ val++;
+
+ tmgs.t_rc = val;
+ if (tmgs.t_rc != val || tmgs.t_rc < 2)
+ return -EINVAL;
+
+ val = TO_CYCLES(sdr->tWC_min, period_ns);
+ tmgs.t_wc = val;
+ if (tmgs.t_wc != val || tmgs.t_wc < 2)
+ return -EINVAL;
+
+ /*
+ * For all SDR modes, PL35X SMC needs tREA_max being 1,
+ * this is also an empirical result.
+ */
+ tmgs.t_rea = 1;
+
+ val = TO_CYCLES(sdr->tWP_min, period_ns);
+ tmgs.t_wp = val;
+ if (tmgs.t_wp != val || tmgs.t_wp < 1)
+ return -EINVAL;
+
+ val = TO_CYCLES(sdr->tCLR_min, period_ns);
+ tmgs.t_clr = val;
+ if (tmgs.t_clr != val)
+ return -EINVAL;
+
+ val = TO_CYCLES(sdr->tAR_min, period_ns);
+ tmgs.t_ar = val;
+ if (tmgs.t_ar != val)
+ return -EINVAL;
+
+ val = TO_CYCLES(sdr->tRR_min, period_ns);
+ tmgs.t_rr = val;
+ if (tmgs.t_rr != val)
+ return -EINVAL;
+
+ if (cs == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ plnand->timings = PL35X_SMC_NAND_TRC_CYCLES(tmgs.t_rc) |
+ PL35X_SMC_NAND_TWC_CYCLES(tmgs.t_wc) |
+ PL35X_SMC_NAND_TREA_CYCLES(tmgs.t_rea) |
+ PL35X_SMC_NAND_TWP_CYCLES(tmgs.t_wp) |
+ PL35X_SMC_NAND_TCLR_CYCLES(tmgs.t_clr) |
+ PL35X_SMC_NAND_TAR_CYCLES(tmgs.t_ar) |
+ PL35X_SMC_NAND_TRR_CYCLES(tmgs.t_rr);
+
+ return 0;
+}
+
+static void pl35x_smc_set_ecc_pg_size(struct pl35x_nandc *nfc,
+ struct nand_chip *chip,
+ unsigned int pg_sz)
+{
+ struct pl35x_nand *plnand = to_pl35x_nand(chip);
+ u32 sz;
+
+ switch (pg_sz) {
+ case SZ_512:
+ sz = 1;
+ break;
+ case SZ_1K:
+ sz = 2;
+ break;
+ case SZ_2K:
+ sz = 3;
+ break;
+ default:
+ sz = 0;
+ break;
+ }
+
+ plnand->ecc_cfg = readl(nfc->conf_regs + PL35X_SMC_ECC_CFG);
+ plnand->ecc_cfg &= ~PL35X_SMC_ECC_CFG_PGSIZE_MASK;
+ plnand->ecc_cfg |= sz;
+ writel(plnand->ecc_cfg, nfc->conf_regs + PL35X_SMC_ECC_CFG);
+}
+
+static int pl35x_nand_init_hw_ecc_controller(struct pl35x_nandc *nfc,
+ struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret = 0;
+
+ if (mtd->writesize < SZ_512 || mtd->writesize > SZ_2K) {
+ dev_err(nfc->dev,
+ "The hardware ECC engine is limited to pages up to 2kiB\n");
+ return -EOPNOTSUPP;
+ }
+
+ chip->ecc.strength = 1;
+ chip->ecc.bytes = 3;
+ chip->ecc.size = SZ_512;
+ chip->ecc.steps = mtd->writesize / chip->ecc.size;
+ chip->ecc.read_page = pl35x_nand_read_page_hwecc;
+ chip->ecc.write_page = pl35x_nand_write_page_hwecc;
+ chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
+ pl35x_smc_set_ecc_pg_size(nfc, chip, mtd->writesize);
+
+ nfc->ecc_buf = devm_kmalloc(nfc->dev, chip->ecc.bytes * chip->ecc.steps,
+ GFP_KERNEL);
+ if (!nfc->ecc_buf)
+ return -ENOMEM;
+
+ switch (mtd->oobsize) {
+ case 16:
+ /* Legacy Xilinx layout */
+ mtd_set_ooblayout(mtd, &pl35x_ecc_ooblayout16_ops);
+ chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
+ break;
+ case 64:
+ mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
+ break;
+ default:
+ dev_err(nfc->dev, "Unsupported OOB size\n");
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int pl35x_nand_attach_chip(struct nand_chip *chip)
+{
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&chip->base);
+ struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
+ struct pl35x_nand *plnand = to_pl35x_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_NONE &&
+ (!chip->ecc.size || !chip->ecc.strength)) {
+ if (requirements->step_size && requirements->strength) {
+ chip->ecc.size = requirements->step_size;
+ chip->ecc.strength = requirements->strength;
+ } else {
+ dev_info(nfc->dev,
+ "No minimum ECC strength, using 1b/512B\n");
+ chip->ecc.size = 512;
+ chip->ecc.strength = 1;
+ }
+ }
+
+ if (mtd->writesize <= SZ_512)
+ plnand->addr_cycles = 1;
+ else
+ plnand->addr_cycles = 2;
+
+ if (chip->options & NAND_ROW_ADDR_3)
+ plnand->addr_cycles += 3;
+ else
+ plnand->addr_cycles += 2;
+
+ switch (chip->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
+ /* Keep these legacy BBT descriptors for ON_DIE situations */
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+ fallthrough;
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ break;
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ ret = pl35x_nand_init_hw_ecc_controller(nfc, chip);
+ if (ret)
+ return ret;
+ break;
+ default:
+ dev_err(nfc->dev, "Unsupported ECC mode: %d\n",
+ chip->ecc.engine_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops pl35x_nandc_ops = {
+ .attach_chip = pl35x_nand_attach_chip,
+ .exec_op = pl35x_nfc_exec_op,
+ .setup_interface = pl35x_nfc_setup_interface,
+};
+
+static int pl35x_nand_reset_state(struct pl35x_nandc *nfc)
+{
+ int ret;
+
+ /* Disable interrupts and clear their status */
+ writel(PL35X_SMC_MEMC_CFG_CLR_INT_CLR_1 |
+ PL35X_SMC_MEMC_CFG_CLR_ECC_INT_DIS_1 |
+ PL35X_SMC_MEMC_CFG_CLR_INT_DIS_1,
+ nfc->conf_regs + PL35X_SMC_MEMC_CFG_CLR);
+
+ /* Set default bus width to 8-bit */
+ ret = pl35x_smc_set_buswidth(nfc, PL35X_SMC_OPMODE_BW_8);
+ if (ret)
+ return ret;
+
+ /* Ensure the ECC controller is bypassed by default */
+ ret = pl35x_smc_set_ecc_mode(nfc, NULL, PL35X_SMC_ECC_CFG_MODE_BYPASS);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure the commands that the ECC block uses to detect the
+ * operations it should start/end.
+ */
+ writel(PL35X_SMC_ECC_CMD1_WRITE(NAND_CMD_SEQIN) |
+ PL35X_SMC_ECC_CMD1_READ(NAND_CMD_READ0) |
+ PL35X_SMC_ECC_CMD1_READ_END(NAND_CMD_READSTART) |
+ PL35X_SMC_ECC_CMD1_READ_END_VALID(NAND_CMD_READ1),
+ nfc->conf_regs + PL35X_SMC_ECC_CMD1);
+ writel(PL35X_SMC_ECC_CMD2_WRITE_COL_CHG(NAND_CMD_RNDIN) |
+ PL35X_SMC_ECC_CMD2_READ_COL_CHG(NAND_CMD_RNDOUT) |
+ PL35X_SMC_ECC_CMD2_READ_COL_CHG_END(NAND_CMD_RNDOUTSTART) |
+ PL35X_SMC_ECC_CMD2_READ_COL_CHG_END_VALID(NAND_CMD_READ1),
+ nfc->conf_regs + PL35X_SMC_ECC_CMD2);
+
+ return 0;
+}
+
+static int pl35x_nand_chip_init(struct pl35x_nandc *nfc,
+ struct device_node *np)
+{
+ struct pl35x_nand *plnand;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ int cs, ret;
+
+ plnand = devm_kzalloc(nfc->dev, sizeof(*plnand), GFP_KERNEL);
+ if (!plnand)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(np, "reg", &cs);
+ if (ret)
+ return ret;
+
+ if (cs >= PL35X_NAND_MAX_CS) {
+ dev_err(nfc->dev, "Wrong CS %d\n", cs);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(cs, &nfc->assigned_cs)) {
+ dev_err(nfc->dev, "Already assigned CS %d\n", cs);
+ return -EINVAL;
+ }
+
+ plnand->cs = cs;
+
+ chip = &plnand->chip;
+ chip->options = NAND_BUSWIDTH_AUTO | NAND_USES_DMA | NAND_NO_SUBPAGE_WRITE;
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+ chip->controller = &nfc->controller;
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = nfc->dev;
+ nand_set_flash_node(chip, np);
+ if (!mtd->name) {
+ mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
+ "%s", PL35X_NANDC_DRIVER_NAME);
+ if (!mtd->name) {
+ dev_err(nfc->dev, "Failed to allocate mtd->name\n");
+ return -ENOMEM;
+ }
+ }
+
+ ret = nand_scan(chip, 1);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ list_add_tail(&plnand->node, &nfc->chips);
+
+ return ret;
+}
+
+static void pl35x_nand_chips_cleanup(struct pl35x_nandc *nfc)
+{
+ struct pl35x_nand *plnand, *tmp;
+ struct nand_chip *chip;
+ int ret;
+
+ list_for_each_entry_safe(plnand, tmp, &nfc->chips, node) {
+ chip = &plnand->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&plnand->node);
+ }
+}
+
+static int pl35x_nand_chips_init(struct pl35x_nandc *nfc)
+{
+ struct device_node *np = nfc->dev->of_node, *nand_np;
+ int nchips = of_get_child_count(np);
+ int ret;
+
+ if (!nchips || nchips > PL35X_NAND_MAX_CS) {
+ dev_err(nfc->dev, "Incorrect number of NAND chips (%d)\n",
+ nchips);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(np, nand_np) {
+ ret = pl35x_nand_chip_init(nfc, nand_np);
+ if (ret) {
+ of_node_put(nand_np);
+ pl35x_nand_chips_cleanup(nfc);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int pl35x_nand_probe(struct platform_device *pdev)
+{
+ struct device *smc_dev = pdev->dev.parent;
+ struct amba_device *smc_amba = to_amba_device(smc_dev);
+ struct pl35x_nandc *nfc;
+ u32 ret;
+
+ nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->dev = &pdev->dev;
+ nand_controller_init(&nfc->controller);
+ nfc->controller.ops = &pl35x_nandc_ops;
+ INIT_LIST_HEAD(&nfc->chips);
+
+ nfc->conf_regs = devm_ioremap_resource(&smc_amba->dev, &smc_amba->res);
+ if (IS_ERR(nfc->conf_regs))
+ return PTR_ERR(nfc->conf_regs);
+
+ nfc->io_regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(nfc->io_regs))
+ return PTR_ERR(nfc->io_regs);
+
+ ret = pl35x_nand_reset_state(nfc);
+ if (ret)
+ return ret;
+
+ ret = pl35x_nand_chips_init(nfc);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, nfc);
+
+ return 0;
+}
+
+static void pl35x_nand_remove(struct platform_device *pdev)
+{
+ struct pl35x_nandc *nfc = platform_get_drvdata(pdev);
+
+ pl35x_nand_chips_cleanup(nfc);
+}
+
+static const struct of_device_id pl35x_nand_of_match[] = {
+ { .compatible = "arm,pl353-nand-r2p1" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pl35x_nand_of_match);
+
+static struct platform_driver pl35x_nandc_driver = {
+ .probe = pl35x_nand_probe,
+ .remove_new = pl35x_nand_remove,
+ .driver = {
+ .name = PL35X_NANDC_DRIVER_NAME,
+ .of_match_table = pl35x_nand_of_match,
+ },
+};
+module_platform_driver(pl35x_nandc_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_ALIAS("platform:" PL35X_NANDC_DRIVER_NAME);
+MODULE_DESCRIPTION("ARM PL35X NAND controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/plat_nand.c b/drivers/mtd/nand/raw/plat_nand.c
new file mode 100644
index 0000000000..b5c374b51e
--- /dev/null
+++ b/drivers/mtd/nand/raw/plat_nand.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generic NAND driver
+ *
+ * Author: Vitaly Wool <vitalywool@gmail.com>
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/platnand.h>
+
+struct plat_nand_data {
+ struct nand_controller controller;
+ struct nand_chip chip;
+ void __iomem *io_base;
+};
+
+static int plat_nand_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+ chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ return 0;
+}
+
+static const struct nand_controller_ops plat_nand_ops = {
+ .attach_chip = plat_nand_attach_chip,
+};
+
+/*
+ * Probe for the NAND device.
+ */
+static int plat_nand_probe(struct platform_device *pdev)
+{
+ struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev);
+ struct plat_nand_data *data;
+ struct mtd_info *mtd;
+ const char **part_types;
+ int err = 0;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "platform_nand_data is missing\n");
+ return -EINVAL;
+ }
+
+ if (pdata->chip.nr_chips < 1) {
+ dev_err(&pdev->dev, "invalid number of chips specified\n");
+ return -EINVAL;
+ }
+
+ /* Allocate memory for the device structure (and zero it) */
+ data = devm_kzalloc(&pdev->dev, sizeof(struct plat_nand_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->controller.ops = &plat_nand_ops;
+ nand_controller_init(&data->controller);
+ data->chip.controller = &data->controller;
+
+ data->io_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->io_base))
+ return PTR_ERR(data->io_base);
+
+ nand_set_flash_node(&data->chip, pdev->dev.of_node);
+ mtd = nand_to_mtd(&data->chip);
+ mtd->dev.parent = &pdev->dev;
+
+ data->chip.legacy.IO_ADDR_R = data->io_base;
+ data->chip.legacy.IO_ADDR_W = data->io_base;
+ data->chip.legacy.cmd_ctrl = pdata->ctrl.cmd_ctrl;
+ data->chip.legacy.dev_ready = pdata->ctrl.dev_ready;
+ data->chip.legacy.select_chip = pdata->ctrl.select_chip;
+ data->chip.legacy.write_buf = pdata->ctrl.write_buf;
+ data->chip.legacy.read_buf = pdata->ctrl.read_buf;
+ data->chip.legacy.chip_delay = pdata->chip.chip_delay;
+ data->chip.options |= pdata->chip.options;
+ data->chip.bbt_options |= pdata->chip.bbt_options;
+
+ platform_set_drvdata(pdev, data);
+
+ /* Handle any platform specific setup */
+ if (pdata->ctrl.probe) {
+ err = pdata->ctrl.probe(pdev);
+ if (err)
+ goto out;
+ }
+
+ /*
+ * This driver assumes that the default ECC engine should be TYPE_SOFT.
+ * Set ->engine_type before registering the NAND devices in order to
+ * provide a driver specific default value.
+ */
+ data->chip.ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
+ /* Scan to find existence of the device */
+ err = nand_scan(&data->chip, pdata->chip.nr_chips);
+ if (err)
+ goto out;
+
+ part_types = pdata->chip.part_probe_types;
+
+ err = mtd_device_parse_register(mtd, part_types, NULL,
+ pdata->chip.partitions,
+ pdata->chip.nr_partitions);
+
+ if (!err)
+ return err;
+
+ nand_cleanup(&data->chip);
+out:
+ if (pdata->ctrl.remove)
+ pdata->ctrl.remove(pdev);
+ return err;
+}
+
+/*
+ * Remove a NAND device.
+ */
+static void plat_nand_remove(struct platform_device *pdev)
+{
+ struct plat_nand_data *data = platform_get_drvdata(pdev);
+ struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev);
+ struct nand_chip *chip = &data->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ if (pdata->ctrl.remove)
+ pdata->ctrl.remove(pdev);
+}
+
+static const struct of_device_id plat_nand_match[] = {
+ { .compatible = "gen_nand" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, plat_nand_match);
+
+static struct platform_driver plat_nand_driver = {
+ .probe = plat_nand_probe,
+ .remove_new = plat_nand_remove,
+ .driver = {
+ .name = "gen_nand",
+ .of_match_table = plat_nand_match,
+ },
+};
+
+module_platform_driver(plat_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vitaly Wool");
+MODULE_DESCRIPTION("Simple generic NAND driver");
+MODULE_ALIAS("platform:gen_nand");
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
new file mode 100644
index 0000000000..b079605c84
--- /dev/null
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -0,0 +1,3546 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ */
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/qcom_adm.h>
+#include <linux/dma/qcom_bam_dma.h>
+#include <linux/module.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* NANDc reg offsets */
+#define NAND_FLASH_CMD 0x00
+#define NAND_ADDR0 0x04
+#define NAND_ADDR1 0x08
+#define NAND_FLASH_CHIP_SELECT 0x0c
+#define NAND_EXEC_CMD 0x10
+#define NAND_FLASH_STATUS 0x14
+#define NAND_BUFFER_STATUS 0x18
+#define NAND_DEV0_CFG0 0x20
+#define NAND_DEV0_CFG1 0x24
+#define NAND_DEV0_ECC_CFG 0x28
+#define NAND_AUTO_STATUS_EN 0x2c
+#define NAND_DEV1_CFG0 0x30
+#define NAND_DEV1_CFG1 0x34
+#define NAND_READ_ID 0x40
+#define NAND_READ_STATUS 0x44
+#define NAND_DEV_CMD0 0xa0
+#define NAND_DEV_CMD1 0xa4
+#define NAND_DEV_CMD2 0xa8
+#define NAND_DEV_CMD_VLD 0xac
+#define SFLASHC_BURST_CFG 0xe0
+#define NAND_ERASED_CW_DETECT_CFG 0xe8
+#define NAND_ERASED_CW_DETECT_STATUS 0xec
+#define NAND_EBI2_ECC_BUF_CFG 0xf0
+#define FLASH_BUF_ACC 0x100
+
+#define NAND_CTRL 0xf00
+#define NAND_VERSION 0xf08
+#define NAND_READ_LOCATION_0 0xf20
+#define NAND_READ_LOCATION_1 0xf24
+#define NAND_READ_LOCATION_2 0xf28
+#define NAND_READ_LOCATION_3 0xf2c
+#define NAND_READ_LOCATION_LAST_CW_0 0xf40
+#define NAND_READ_LOCATION_LAST_CW_1 0xf44
+#define NAND_READ_LOCATION_LAST_CW_2 0xf48
+#define NAND_READ_LOCATION_LAST_CW_3 0xf4c
+
+/* dummy register offsets, used by write_reg_dma */
+#define NAND_DEV_CMD1_RESTORE 0xdead
+#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
+
+/* NAND_FLASH_CMD bits */
+#define PAGE_ACC BIT(4)
+#define LAST_PAGE BIT(5)
+
+/* NAND_FLASH_CHIP_SELECT bits */
+#define NAND_DEV_SEL 0
+#define DM_EN BIT(2)
+
+/* NAND_FLASH_STATUS bits */
+#define FS_OP_ERR BIT(4)
+#define FS_READY_BSY_N BIT(5)
+#define FS_MPU_ERR BIT(8)
+#define FS_DEVICE_STS_ERR BIT(16)
+#define FS_DEVICE_WP BIT(23)
+
+/* NAND_BUFFER_STATUS bits */
+#define BS_UNCORRECTABLE_BIT BIT(8)
+#define BS_CORRECTABLE_ERR_MSK 0x1f
+
+/* NAND_DEVn_CFG0 bits */
+#define DISABLE_STATUS_AFTER_WRITE 4
+#define CW_PER_PAGE 6
+#define UD_SIZE_BYTES 9
+#define UD_SIZE_BYTES_MASK GENMASK(18, 9)
+#define ECC_PARITY_SIZE_BYTES_RS 19
+#define SPARE_SIZE_BYTES 23
+#define SPARE_SIZE_BYTES_MASK GENMASK(26, 23)
+#define NUM_ADDR_CYCLES 27
+#define STATUS_BFR_READ 30
+#define SET_RD_MODE_AFTER_STATUS 31
+
+/* NAND_DEVn_CFG0 bits */
+#define DEV0_CFG1_ECC_DISABLE 0
+#define WIDE_FLASH 1
+#define NAND_RECOVERY_CYCLES 2
+#define CS_ACTIVE_BSY 5
+#define BAD_BLOCK_BYTE_NUM 6
+#define BAD_BLOCK_IN_SPARE_AREA 16
+#define WR_RD_BSY_GAP 17
+#define ENABLE_BCH_ECC 27
+
+/* NAND_DEV0_ECC_CFG bits */
+#define ECC_CFG_ECC_DISABLE 0
+#define ECC_SW_RESET 1
+#define ECC_MODE 4
+#define ECC_PARITY_SIZE_BYTES_BCH 8
+#define ECC_NUM_DATA_BYTES 16
+#define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16)
+#define ECC_FORCE_CLK_OPEN 30
+
+/* NAND_DEV_CMD1 bits */
+#define READ_ADDR 0
+
+/* NAND_DEV_CMD_VLD bits */
+#define READ_START_VLD BIT(0)
+#define READ_STOP_VLD BIT(1)
+#define WRITE_START_VLD BIT(2)
+#define ERASE_START_VLD BIT(3)
+#define SEQ_READ_START_VLD BIT(4)
+
+/* NAND_EBI2_ECC_BUF_CFG bits */
+#define NUM_STEPS 0
+
+/* NAND_ERASED_CW_DETECT_CFG bits */
+#define ERASED_CW_ECC_MASK 1
+#define AUTO_DETECT_RES 0
+#define MASK_ECC BIT(ERASED_CW_ECC_MASK)
+#define RESET_ERASED_DET BIT(AUTO_DETECT_RES)
+#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
+#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
+#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
+
+/* NAND_ERASED_CW_DETECT_STATUS bits */
+#define PAGE_ALL_ERASED BIT(7)
+#define CODEWORD_ALL_ERASED BIT(6)
+#define PAGE_ERASED BIT(5)
+#define CODEWORD_ERASED BIT(4)
+#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
+#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
+
+/* NAND_READ_LOCATION_n bits */
+#define READ_LOCATION_OFFSET 0
+#define READ_LOCATION_SIZE 16
+#define READ_LOCATION_LAST 31
+
+/* Version Mask */
+#define NAND_VERSION_MAJOR_MASK 0xf0000000
+#define NAND_VERSION_MAJOR_SHIFT 28
+#define NAND_VERSION_MINOR_MASK 0x0fff0000
+#define NAND_VERSION_MINOR_SHIFT 16
+
+/* NAND OP_CMDs */
+#define OP_PAGE_READ 0x2
+#define OP_PAGE_READ_WITH_ECC 0x3
+#define OP_PAGE_READ_WITH_ECC_SPARE 0x4
+#define OP_PAGE_READ_ONFI_READ 0x5
+#define OP_PROGRAM_PAGE 0x6
+#define OP_PAGE_PROGRAM_WITH_ECC 0x7
+#define OP_PROGRAM_PAGE_SPARE 0x9
+#define OP_BLOCK_ERASE 0xa
+#define OP_CHECK_STATUS 0xc
+#define OP_FETCH_ID 0xb
+#define OP_RESET_DEVICE 0xd
+
+/* Default Value for NAND_DEV_CMD_VLD */
+#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
+ ERASE_START_VLD | SEQ_READ_START_VLD)
+
+/* NAND_CTRL bits */
+#define BAM_MODE_EN BIT(0)
+
+/*
+ * the NAND controller performs reads/writes with ECC in 516 byte chunks.
+ * the driver calls the chunks 'step' or 'codeword' interchangeably
+ */
+#define NANDC_STEP_SIZE 512
+
+/*
+ * the largest page size we support is 8K, this will have 16 steps/codewords
+ * of 512 bytes each
+ */
+#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
+
+/* we read at most 3 registers per codeword scan */
+#define MAX_REG_RD (3 * MAX_NUM_STEPS)
+
+/* ECC modes supported by the controller */
+#define ECC_NONE BIT(0)
+#define ECC_RS_4BIT BIT(1)
+#define ECC_BCH_4BIT BIT(2)
+#define ECC_BCH_8BIT BIT(3)
+
+#define nandc_set_read_loc_first(chip, reg, cw_offset, read_size, is_last_read_loc) \
+nandc_set_reg(chip, reg, \
+ ((cw_offset) << READ_LOCATION_OFFSET) | \
+ ((read_size) << READ_LOCATION_SIZE) | \
+ ((is_last_read_loc) << READ_LOCATION_LAST))
+
+#define nandc_set_read_loc_last(chip, reg, cw_offset, read_size, is_last_read_loc) \
+nandc_set_reg(chip, reg, \
+ ((cw_offset) << READ_LOCATION_OFFSET) | \
+ ((read_size) << READ_LOCATION_SIZE) | \
+ ((is_last_read_loc) << READ_LOCATION_LAST))
+/*
+ * Returns the actual register address for all NAND_DEV_ registers
+ * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
+ */
+#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
+
+/* Returns the NAND register physical address */
+#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
+
+/* Returns the dma address for reg read buffer */
+#define reg_buf_dma_addr(chip, vaddr) \
+ ((chip)->reg_read_dma + \
+ ((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
+
+#define QPIC_PER_CW_CMD_ELEMENTS 32
+#define QPIC_PER_CW_CMD_SGL 32
+#define QPIC_PER_CW_DATA_SGL 8
+
+#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
+
+/*
+ * Flags used in DMA descriptor preparation helper functions
+ * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
+ */
+/* Don't set the EOT in current tx BAM sgl */
+#define NAND_BAM_NO_EOT BIT(0)
+/* Set the NWD flag in current BAM sgl */
+#define NAND_BAM_NWD BIT(1)
+/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
+#define NAND_BAM_NEXT_SGL BIT(2)
+/*
+ * Erased codeword status is being used two times in single transfer so this
+ * flag will determine the current value of erased codeword status register
+ */
+#define NAND_ERASED_CW_SET BIT(4)
+
+#define MAX_ADDRESS_CYCLE 5
+
+/*
+ * This data type corresponds to the BAM transaction which will be used for all
+ * NAND transfers.
+ * @bam_ce - the array of BAM command elements
+ * @cmd_sgl - sgl for NAND BAM command pipe
+ * @data_sgl - sgl for NAND BAM consumer/producer pipe
+ * @last_data_desc - last DMA desc in data channel (tx/rx).
+ * @last_cmd_desc - last DMA desc in command channel.
+ * @txn_done - completion for NAND transfer.
+ * @bam_ce_pos - the index in bam_ce which is available for next sgl
+ * @bam_ce_start - the index in bam_ce which marks the start position ce
+ * for current sgl. It will be used for size calculation
+ * for current sgl
+ * @cmd_sgl_pos - current index in command sgl.
+ * @cmd_sgl_start - start index in command sgl.
+ * @tx_sgl_pos - current index in data sgl for tx.
+ * @tx_sgl_start - start index in data sgl for tx.
+ * @rx_sgl_pos - current index in data sgl for rx.
+ * @rx_sgl_start - start index in data sgl for rx.
+ * @wait_second_completion - wait for second DMA desc completion before making
+ * the NAND transfer completion.
+ */
+struct bam_transaction {
+ struct bam_cmd_element *bam_ce;
+ struct scatterlist *cmd_sgl;
+ struct scatterlist *data_sgl;
+ struct dma_async_tx_descriptor *last_data_desc;
+ struct dma_async_tx_descriptor *last_cmd_desc;
+ struct completion txn_done;
+ u32 bam_ce_pos;
+ u32 bam_ce_start;
+ u32 cmd_sgl_pos;
+ u32 cmd_sgl_start;
+ u32 tx_sgl_pos;
+ u32 tx_sgl_start;
+ u32 rx_sgl_pos;
+ u32 rx_sgl_start;
+ bool wait_second_completion;
+};
+
+/*
+ * This data type corresponds to the nand dma descriptor
+ * @dma_desc - low level DMA engine descriptor
+ * @list - list for desc_info
+ *
+ * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
+ * ADM
+ * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
+ * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
+ * @dir - DMA transfer direction
+ */
+struct desc_info {
+ struct dma_async_tx_descriptor *dma_desc;
+ struct list_head node;
+
+ union {
+ struct scatterlist adm_sgl;
+ struct {
+ struct scatterlist *bam_sgl;
+ int sgl_cnt;
+ };
+ };
+ enum dma_data_direction dir;
+};
+
+/*
+ * holds the current register values that we want to write. acts as a contiguous
+ * chunk of memory which we use to write the controller registers through DMA.
+ */
+struct nandc_regs {
+ __le32 cmd;
+ __le32 addr0;
+ __le32 addr1;
+ __le32 chip_sel;
+ __le32 exec;
+
+ __le32 cfg0;
+ __le32 cfg1;
+ __le32 ecc_bch_cfg;
+
+ __le32 clrflashstatus;
+ __le32 clrreadstatus;
+
+ __le32 cmd1;
+ __le32 vld;
+
+ __le32 orig_cmd1;
+ __le32 orig_vld;
+
+ __le32 ecc_buf_cfg;
+ __le32 read_location0;
+ __le32 read_location1;
+ __le32 read_location2;
+ __le32 read_location3;
+ __le32 read_location_last0;
+ __le32 read_location_last1;
+ __le32 read_location_last2;
+ __le32 read_location_last3;
+
+ __le32 erased_cw_detect_cfg_clr;
+ __le32 erased_cw_detect_cfg_set;
+};
+
+/*
+ * NAND controller data struct
+ *
+ * @dev: parent device
+ *
+ * @base: MMIO base
+ *
+ * @core_clk: controller clock
+ * @aon_clk: another controller clock
+ *
+ * @regs: a contiguous chunk of memory for DMA register
+ * writes. contains the register values to be
+ * written to controller
+ *
+ * @props: properties of current NAND controller,
+ * initialized via DT match data
+ *
+ * @controller: base controller structure
+ * @host_list: list containing all the chips attached to the
+ * controller
+ *
+ * @chan: dma channel
+ * @cmd_crci: ADM DMA CRCI for command flow control
+ * @data_crci: ADM DMA CRCI for data flow control
+ *
+ * @desc_list: DMA descriptor list (list of desc_infos)
+ *
+ * @data_buffer: our local DMA buffer for page read/writes,
+ * used when we can't use the buffer provided
+ * by upper layers directly
+ * @reg_read_buf: local buffer for reading back registers via DMA
+ *
+ * @base_phys: physical base address of controller registers
+ * @base_dma: dma base address of controller registers
+ * @reg_read_dma: contains dma address for register read buffer
+ *
+ * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf
+ * functions
+ * @max_cwperpage: maximum QPIC codewords required. calculated
+ * from all connected NAND devices pagesize
+ *
+ * @reg_read_pos: marker for data read in reg_read_buf
+ *
+ * @cmd1/vld: some fixed controller register values
+ *
+ * @exec_opwrite: flag to select correct number of code word
+ * while reading status
+ */
+struct qcom_nand_controller {
+ struct device *dev;
+
+ void __iomem *base;
+
+ struct clk *core_clk;
+ struct clk *aon_clk;
+
+ struct nandc_regs *regs;
+ struct bam_transaction *bam_txn;
+
+ const struct qcom_nandc_props *props;
+
+ struct nand_controller controller;
+ struct list_head host_list;
+
+ union {
+ /* will be used only by QPIC for BAM DMA */
+ struct {
+ struct dma_chan *tx_chan;
+ struct dma_chan *rx_chan;
+ struct dma_chan *cmd_chan;
+ };
+
+ /* will be used only by EBI2 for ADM DMA */
+ struct {
+ struct dma_chan *chan;
+ unsigned int cmd_crci;
+ unsigned int data_crci;
+ };
+ };
+
+ struct list_head desc_list;
+
+ u8 *data_buffer;
+ __le32 *reg_read_buf;
+
+ phys_addr_t base_phys;
+ dma_addr_t base_dma;
+ dma_addr_t reg_read_dma;
+
+ int buf_size;
+ int buf_count;
+ int buf_start;
+ unsigned int max_cwperpage;
+
+ int reg_read_pos;
+
+ u32 cmd1, vld;
+ bool exec_opwrite;
+};
+
+/*
+ * NAND special boot partitions
+ *
+ * @page_offset: offset of the partition where spare data is not protected
+ * by ECC (value in pages)
+ * @page_offset: size of the partition where spare data is not protected
+ * by ECC (value in pages)
+ */
+struct qcom_nand_boot_partition {
+ u32 page_offset;
+ u32 page_size;
+};
+
+/*
+ * Qcom op for each exec_op transfer
+ *
+ * @data_instr: data instruction pointer
+ * @data_instr_idx: data instruction index
+ * @rdy_timeout_ms: wait ready timeout in ms
+ * @rdy_delay_ns: Additional delay in ns
+ * @addr1_reg: Address1 register value
+ * @addr2_reg: Address2 register value
+ * @cmd_reg: CMD register value
+ * @flag: flag for misc instruction
+ */
+struct qcom_op {
+ const struct nand_op_instr *data_instr;
+ unsigned int data_instr_idx;
+ unsigned int rdy_timeout_ms;
+ unsigned int rdy_delay_ns;
+ u32 addr1_reg;
+ u32 addr2_reg;
+ u32 cmd_reg;
+ u8 flag;
+};
+
+/*
+ * NAND chip structure
+ *
+ * @boot_partitions: array of boot partitions where offset and size of the
+ * boot partitions are stored
+ *
+ * @chip: base NAND chip structure
+ * @node: list node to add itself to host_list in
+ * qcom_nand_controller
+ *
+ * @nr_boot_partitions: count of the boot partitions where spare data is not
+ * protected by ECC
+ *
+ * @cs: chip select value for this chip
+ * @cw_size: the number of bytes in a single step/codeword
+ * of a page, consisting of all data, ecc, spare
+ * and reserved bytes
+ * @cw_data: the number of bytes within a codeword protected
+ * by ECC
+ * @ecc_bytes_hw: ECC bytes used by controller hardware for this
+ * chip
+ *
+ * @last_command: keeps track of last command on this chip. used
+ * for reading correct status
+ *
+ * @cfg0, cfg1, cfg0_raw..: NANDc register configurations needed for
+ * ecc/non-ecc mode for the current nand flash
+ * device
+ *
+ * @status: value to be returned if NAND_CMD_STATUS command
+ * is executed
+ * @codeword_fixup: keep track of the current layout used by
+ * the driver for read/write operation.
+ * @use_ecc: request the controller to use ECC for the
+ * upcoming read/write
+ * @bch_enabled: flag to tell whether BCH ECC mode is used
+ */
+struct qcom_nand_host {
+ struct qcom_nand_boot_partition *boot_partitions;
+
+ struct nand_chip chip;
+ struct list_head node;
+
+ int nr_boot_partitions;
+
+ int cs;
+ int cw_size;
+ int cw_data;
+ int ecc_bytes_hw;
+ int spare_bytes;
+ int bbm_size;
+
+ int last_command;
+
+ u32 cfg0, cfg1;
+ u32 cfg0_raw, cfg1_raw;
+ u32 ecc_buf_cfg;
+ u32 ecc_bch_cfg;
+ u32 clrflashstatus;
+ u32 clrreadstatus;
+
+ u8 status;
+ bool codeword_fixup;
+ bool use_ecc;
+ bool bch_enabled;
+};
+
+/*
+ * This data type corresponds to the NAND controller properties which varies
+ * among different NAND controllers.
+ * @ecc_modes - ecc mode for NAND
+ * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
+ * @is_bam - whether NAND controller is using BAM
+ * @is_qpic - whether NAND CTRL is part of qpic IP
+ * @qpic_v2 - flag to indicate QPIC IP version 2
+ * @use_codeword_fixup - whether NAND has different layout for boot partitions
+ */
+struct qcom_nandc_props {
+ u32 ecc_modes;
+ u32 dev_cmd_reg_start;
+ bool is_bam;
+ bool is_qpic;
+ bool qpic_v2;
+ bool use_codeword_fixup;
+};
+
+/* Frees the BAM transaction memory */
+static void free_bam_transaction(struct qcom_nand_controller *nandc)
+{
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+ devm_kfree(nandc->dev, bam_txn);
+}
+
+/* Allocates and Initializes the BAM transaction */
+static struct bam_transaction *
+alloc_bam_transaction(struct qcom_nand_controller *nandc)
+{
+ struct bam_transaction *bam_txn;
+ size_t bam_txn_size;
+ unsigned int num_cw = nandc->max_cwperpage;
+ void *bam_txn_buf;
+
+ bam_txn_size =
+ sizeof(*bam_txn) + num_cw *
+ ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
+ (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
+ (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
+
+ bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
+ if (!bam_txn_buf)
+ return NULL;
+
+ bam_txn = bam_txn_buf;
+ bam_txn_buf += sizeof(*bam_txn);
+
+ bam_txn->bam_ce = bam_txn_buf;
+ bam_txn_buf +=
+ sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
+
+ bam_txn->cmd_sgl = bam_txn_buf;
+ bam_txn_buf +=
+ sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
+
+ bam_txn->data_sgl = bam_txn_buf;
+
+ init_completion(&bam_txn->txn_done);
+
+ return bam_txn;
+}
+
+/* Clears the BAM transaction indexes */
+static void clear_bam_transaction(struct qcom_nand_controller *nandc)
+{
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+ if (!nandc->props->is_bam)
+ return;
+
+ bam_txn->bam_ce_pos = 0;
+ bam_txn->bam_ce_start = 0;
+ bam_txn->cmd_sgl_pos = 0;
+ bam_txn->cmd_sgl_start = 0;
+ bam_txn->tx_sgl_pos = 0;
+ bam_txn->tx_sgl_start = 0;
+ bam_txn->rx_sgl_pos = 0;
+ bam_txn->rx_sgl_start = 0;
+ bam_txn->last_data_desc = NULL;
+ bam_txn->wait_second_completion = false;
+
+ sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
+ QPIC_PER_CW_CMD_SGL);
+ sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
+ QPIC_PER_CW_DATA_SGL);
+
+ reinit_completion(&bam_txn->txn_done);
+}
+
+/* Callback for DMA descriptor completion */
+static void qpic_bam_dma_done(void *data)
+{
+ struct bam_transaction *bam_txn = data;
+
+ /*
+ * In case of data transfer with NAND, 2 callbacks will be generated.
+ * One for command channel and another one for data channel.
+ * If current transaction has data descriptors
+ * (i.e. wait_second_completion is true), then set this to false
+ * and wait for second DMA descriptor completion.
+ */
+ if (bam_txn->wait_second_completion)
+ bam_txn->wait_second_completion = false;
+ else
+ complete(&bam_txn->txn_done);
+}
+
+static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
+{
+ return container_of(chip, struct qcom_nand_host, chip);
+}
+
+static inline struct qcom_nand_controller *
+get_qcom_nand_controller(struct nand_chip *chip)
+{
+ return container_of(chip->controller, struct qcom_nand_controller,
+ controller);
+}
+
+static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
+{
+ return ioread32(nandc->base + offset);
+}
+
+static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
+ u32 val)
+{
+ iowrite32(val, nandc->base + offset);
+}
+
+static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
+ bool is_cpu)
+{
+ if (!nandc->props->is_bam)
+ return;
+
+ if (is_cpu)
+ dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
+ MAX_REG_RD *
+ sizeof(*nandc->reg_read_buf),
+ DMA_FROM_DEVICE);
+ else
+ dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
+ MAX_REG_RD *
+ sizeof(*nandc->reg_read_buf),
+ DMA_FROM_DEVICE);
+}
+
+static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
+{
+ switch (offset) {
+ case NAND_FLASH_CMD:
+ return &regs->cmd;
+ case NAND_ADDR0:
+ return &regs->addr0;
+ case NAND_ADDR1:
+ return &regs->addr1;
+ case NAND_FLASH_CHIP_SELECT:
+ return &regs->chip_sel;
+ case NAND_EXEC_CMD:
+ return &regs->exec;
+ case NAND_FLASH_STATUS:
+ return &regs->clrflashstatus;
+ case NAND_DEV0_CFG0:
+ return &regs->cfg0;
+ case NAND_DEV0_CFG1:
+ return &regs->cfg1;
+ case NAND_DEV0_ECC_CFG:
+ return &regs->ecc_bch_cfg;
+ case NAND_READ_STATUS:
+ return &regs->clrreadstatus;
+ case NAND_DEV_CMD1:
+ return &regs->cmd1;
+ case NAND_DEV_CMD1_RESTORE:
+ return &regs->orig_cmd1;
+ case NAND_DEV_CMD_VLD:
+ return &regs->vld;
+ case NAND_DEV_CMD_VLD_RESTORE:
+ return &regs->orig_vld;
+ case NAND_EBI2_ECC_BUF_CFG:
+ return &regs->ecc_buf_cfg;
+ case NAND_READ_LOCATION_0:
+ return &regs->read_location0;
+ case NAND_READ_LOCATION_1:
+ return &regs->read_location1;
+ case NAND_READ_LOCATION_2:
+ return &regs->read_location2;
+ case NAND_READ_LOCATION_3:
+ return &regs->read_location3;
+ case NAND_READ_LOCATION_LAST_CW_0:
+ return &regs->read_location_last0;
+ case NAND_READ_LOCATION_LAST_CW_1:
+ return &regs->read_location_last1;
+ case NAND_READ_LOCATION_LAST_CW_2:
+ return &regs->read_location_last2;
+ case NAND_READ_LOCATION_LAST_CW_3:
+ return &regs->read_location_last3;
+ default:
+ return NULL;
+ }
+}
+
+static void nandc_set_reg(struct nand_chip *chip, int offset,
+ u32 val)
+{
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nandc_regs *regs = nandc->regs;
+ __le32 *reg;
+
+ reg = offset_to_nandc_reg(regs, offset);
+
+ if (reg)
+ *reg = cpu_to_le32(val);
+}
+
+/* Helper to check the code word, whether it is last cw or not */
+static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
+{
+ return cw == (ecc->steps - 1);
+}
+
+/* helper to configure location register values */
+static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg,
+ int cw_offset, int read_size, int is_last_read_loc)
+{
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int reg_base = NAND_READ_LOCATION_0;
+
+ if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
+ reg_base = NAND_READ_LOCATION_LAST_CW_0;
+
+ reg_base += reg * 4;
+
+ if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
+ return nandc_set_read_loc_last(chip, reg_base, cw_offset,
+ read_size, is_last_read_loc);
+ else
+ return nandc_set_read_loc_first(chip, reg_base, cw_offset,
+ read_size, is_last_read_loc);
+}
+
+/* helper to configure address register values */
+static void set_address(struct qcom_nand_host *host, u16 column, int page)
+{
+ struct nand_chip *chip = &host->chip;
+
+ if (chip->options & NAND_BUSWIDTH_16)
+ column >>= 1;
+
+ nandc_set_reg(chip, NAND_ADDR0, page << 16 | column);
+ nandc_set_reg(chip, NAND_ADDR1, page >> 16 & 0xff);
+}
+
+/*
+ * update_rw_regs: set up read/write register values, these will be
+ * written to the NAND controller registers via DMA
+ *
+ * @num_cw: number of steps for the read/write operation
+ * @read: read or write operation
+ * @cw : which code word
+ */
+static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, int cw)
+{
+ struct nand_chip *chip = &host->chip;
+ u32 cmd, cfg0, cfg1, ecc_bch_cfg;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ if (read) {
+ if (host->use_ecc)
+ cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
+ else
+ cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
+ } else {
+ cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
+ }
+
+ if (host->use_ecc) {
+ cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
+ (num_cw - 1) << CW_PER_PAGE;
+
+ cfg1 = host->cfg1;
+ ecc_bch_cfg = host->ecc_bch_cfg;
+ } else {
+ cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
+ (num_cw - 1) << CW_PER_PAGE;
+
+ cfg1 = host->cfg1_raw;
+ ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
+ }
+
+ nandc_set_reg(chip, NAND_FLASH_CMD, cmd);
+ nandc_set_reg(chip, NAND_DEV0_CFG0, cfg0);
+ nandc_set_reg(chip, NAND_DEV0_CFG1, cfg1);
+ nandc_set_reg(chip, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
+ if (!nandc->props->qpic_v2)
+ nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
+ nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus);
+ nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus);
+ nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+
+ if (read)
+ nandc_set_read_loc(chip, cw, 0, 0, host->use_ecc ?
+ host->cw_data : host->cw_size, 1);
+}
+
+/*
+ * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
+ * for BAM. This descriptor will be added in the NAND DMA descriptor queue
+ * which will be submitted to DMA engine.
+ */
+static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
+ struct dma_chan *chan,
+ unsigned long flags)
+{
+ struct desc_info *desc;
+ struct scatterlist *sgl;
+ unsigned int sgl_cnt;
+ int ret;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+ enum dma_transfer_direction dir_eng;
+ struct dma_async_tx_descriptor *dma_desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ if (chan == nandc->cmd_chan) {
+ sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
+ sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
+ bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
+ dir_eng = DMA_MEM_TO_DEV;
+ desc->dir = DMA_TO_DEVICE;
+ } else if (chan == nandc->tx_chan) {
+ sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
+ sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
+ bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
+ dir_eng = DMA_MEM_TO_DEV;
+ desc->dir = DMA_TO_DEVICE;
+ } else {
+ sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
+ sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
+ bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
+ dir_eng = DMA_DEV_TO_MEM;
+ desc->dir = DMA_FROM_DEVICE;
+ }
+
+ sg_mark_end(sgl + sgl_cnt - 1);
+ ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
+ if (ret == 0) {
+ dev_err(nandc->dev, "failure in mapping desc\n");
+ kfree(desc);
+ return -ENOMEM;
+ }
+
+ desc->sgl_cnt = sgl_cnt;
+ desc->bam_sgl = sgl;
+
+ dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
+ flags);
+
+ if (!dma_desc) {
+ dev_err(nandc->dev, "failure in prep desc\n");
+ dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
+ kfree(desc);
+ return -EINVAL;
+ }
+
+ desc->dma_desc = dma_desc;
+
+ /* update last data/command descriptor */
+ if (chan == nandc->cmd_chan)
+ bam_txn->last_cmd_desc = dma_desc;
+ else
+ bam_txn->last_data_desc = dma_desc;
+
+ list_add_tail(&desc->node, &nandc->desc_list);
+
+ return 0;
+}
+
+/*
+ * Prepares the command descriptor for BAM DMA which will be used for NAND
+ * register reads and writes. The command descriptor requires the command
+ * to be formed in command element type so this function uses the command
+ * element from bam transaction ce array and fills the same with required
+ * data. A single SGL can contain multiple command elements so
+ * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
+ * after the current command element.
+ */
+static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
+ int reg_off, const void *vaddr,
+ int size, unsigned int flags)
+{
+ int bam_ce_size;
+ int i, ret;
+ struct bam_cmd_element *bam_ce_buffer;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
+
+ /* fill the command desc */
+ for (i = 0; i < size; i++) {
+ if (read)
+ bam_prep_ce(&bam_ce_buffer[i],
+ nandc_reg_phys(nandc, reg_off + 4 * i),
+ BAM_READ_COMMAND,
+ reg_buf_dma_addr(nandc,
+ (__le32 *)vaddr + i));
+ else
+ bam_prep_ce_le32(&bam_ce_buffer[i],
+ nandc_reg_phys(nandc, reg_off + 4 * i),
+ BAM_WRITE_COMMAND,
+ *((__le32 *)vaddr + i));
+ }
+
+ bam_txn->bam_ce_pos += size;
+
+ /* use the separate sgl after this command */
+ if (flags & NAND_BAM_NEXT_SGL) {
+ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
+ bam_ce_size = (bam_txn->bam_ce_pos -
+ bam_txn->bam_ce_start) *
+ sizeof(struct bam_cmd_element);
+ sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
+ bam_ce_buffer, bam_ce_size);
+ bam_txn->cmd_sgl_pos++;
+ bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
+
+ if (flags & NAND_BAM_NWD) {
+ ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
+ DMA_PREP_FENCE |
+ DMA_PREP_CMD);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Prepares the data descriptor for BAM DMA which will be used for NAND
+ * data reads and writes.
+ */
+static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
+ const void *vaddr,
+ int size, unsigned int flags)
+{
+ int ret;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+ if (read) {
+ sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
+ vaddr, size);
+ bam_txn->rx_sgl_pos++;
+ } else {
+ sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
+ vaddr, size);
+ bam_txn->tx_sgl_pos++;
+
+ /*
+ * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
+ * is not set, form the DMA descriptor
+ */
+ if (!(flags & NAND_BAM_NO_EOT)) {
+ ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
+ DMA_PREP_INTERRUPT);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
+ int reg_off, const void *vaddr, int size,
+ bool flow_control)
+{
+ struct desc_info *desc;
+ struct dma_async_tx_descriptor *dma_desc;
+ struct scatterlist *sgl;
+ struct dma_slave_config slave_conf;
+ struct qcom_adm_peripheral_config periph_conf = {};
+ enum dma_transfer_direction dir_eng;
+ int ret;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ sgl = &desc->adm_sgl;
+
+ sg_init_one(sgl, vaddr, size);
+
+ if (read) {
+ dir_eng = DMA_DEV_TO_MEM;
+ desc->dir = DMA_FROM_DEVICE;
+ } else {
+ dir_eng = DMA_MEM_TO_DEV;
+ desc->dir = DMA_TO_DEVICE;
+ }
+
+ ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
+ if (ret == 0) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ memset(&slave_conf, 0x00, sizeof(slave_conf));
+
+ slave_conf.device_fc = flow_control;
+ if (read) {
+ slave_conf.src_maxburst = 16;
+ slave_conf.src_addr = nandc->base_dma + reg_off;
+ if (nandc->data_crci) {
+ periph_conf.crci = nandc->data_crci;
+ slave_conf.peripheral_config = &periph_conf;
+ slave_conf.peripheral_size = sizeof(periph_conf);
+ }
+ } else {
+ slave_conf.dst_maxburst = 16;
+ slave_conf.dst_addr = nandc->base_dma + reg_off;
+ if (nandc->cmd_crci) {
+ periph_conf.crci = nandc->cmd_crci;
+ slave_conf.peripheral_config = &periph_conf;
+ slave_conf.peripheral_size = sizeof(periph_conf);
+ }
+ }
+
+ ret = dmaengine_slave_config(nandc->chan, &slave_conf);
+ if (ret) {
+ dev_err(nandc->dev, "failed to configure dma channel\n");
+ goto err;
+ }
+
+ dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
+ if (!dma_desc) {
+ dev_err(nandc->dev, "failed to prepare desc\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ desc->dma_desc = dma_desc;
+
+ list_add_tail(&desc->node, &nandc->desc_list);
+
+ return 0;
+err:
+ kfree(desc);
+
+ return ret;
+}
+
+/*
+ * read_reg_dma: prepares a descriptor to read a given number of
+ * contiguous registers to the reg_read_buf pointer
+ *
+ * @first: offset of the first register in the contiguous block
+ * @num_regs: number of registers to read
+ * @flags: flags to control DMA descriptor preparation
+ */
+static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
+ int num_regs, unsigned int flags)
+{
+ bool flow_control = false;
+ void *vaddr;
+
+ vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
+ nandc->reg_read_pos += num_regs;
+
+ if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
+ first = dev_cmd_reg_addr(nandc, first);
+
+ if (nandc->props->is_bam)
+ return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
+ num_regs, flags);
+
+ if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
+ flow_control = true;
+
+ return prep_adm_dma_desc(nandc, true, first, vaddr,
+ num_regs * sizeof(u32), flow_control);
+}
+
+/*
+ * write_reg_dma: prepares a descriptor to write a given number of
+ * contiguous registers
+ *
+ * @first: offset of the first register in the contiguous block
+ * @num_regs: number of registers to write
+ * @flags: flags to control DMA descriptor preparation
+ */
+static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
+ int num_regs, unsigned int flags)
+{
+ bool flow_control = false;
+ struct nandc_regs *regs = nandc->regs;
+ void *vaddr;
+
+ vaddr = offset_to_nandc_reg(regs, first);
+
+ if (first == NAND_ERASED_CW_DETECT_CFG) {
+ if (flags & NAND_ERASED_CW_SET)
+ vaddr = &regs->erased_cw_detect_cfg_set;
+ else
+ vaddr = &regs->erased_cw_detect_cfg_clr;
+ }
+
+ if (first == NAND_EXEC_CMD)
+ flags |= NAND_BAM_NWD;
+
+ if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
+ first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
+
+ if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
+ first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
+
+ if (nandc->props->is_bam)
+ return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
+ num_regs, flags);
+
+ if (first == NAND_FLASH_CMD)
+ flow_control = true;
+
+ return prep_adm_dma_desc(nandc, false, first, vaddr,
+ num_regs * sizeof(u32), flow_control);
+}
+
+/*
+ * read_data_dma: prepares a DMA descriptor to transfer data from the
+ * controller's internal buffer to the buffer 'vaddr'
+ *
+ * @reg_off: offset within the controller's data buffer
+ * @vaddr: virtual address of the buffer we want to write to
+ * @size: DMA transaction size in bytes
+ * @flags: flags to control DMA descriptor preparation
+ */
+static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+ const u8 *vaddr, int size, unsigned int flags)
+{
+ if (nandc->props->is_bam)
+ return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
+
+ return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
+}
+
+/*
+ * write_data_dma: prepares a DMA descriptor to transfer data from
+ * 'vaddr' to the controller's internal buffer
+ *
+ * @reg_off: offset within the controller's data buffer
+ * @vaddr: virtual address of the buffer we want to read from
+ * @size: DMA transaction size in bytes
+ * @flags: flags to control DMA descriptor preparation
+ */
+static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+ const u8 *vaddr, int size, unsigned int flags)
+{
+ if (nandc->props->is_bam)
+ return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
+
+ return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
+}
+
+/*
+ * Helper to prepare DMA descriptors for configuring registers
+ * before reading a NAND page.
+ */
+static void config_nand_page_read(struct nand_chip *chip)
+{
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ write_reg_dma(nandc, NAND_ADDR0, 2, 0);
+ write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
+ if (!nandc->props->qpic_v2)
+ write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
+ write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
+ write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
+ NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+}
+
+/*
+ * Helper to prepare DMA descriptors for configuring registers
+ * before reading each codeword in NAND page.
+ */
+static void
+config_nand_cw_read(struct nand_chip *chip, bool use_ecc, int cw)
+{
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ int reg = NAND_READ_LOCATION_0;
+
+ if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
+ reg = NAND_READ_LOCATION_LAST_CW_0;
+
+ if (nandc->props->is_bam)
+ write_reg_dma(nandc, reg, 4, NAND_BAM_NEXT_SGL);
+
+ write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ if (use_ecc) {
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
+ read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
+ NAND_BAM_NEXT_SGL);
+ } else {
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ }
+}
+
+/*
+ * Helper to prepare dma descriptors to configure registers needed for reading a
+ * single codeword in page
+ */
+static void
+config_nand_single_cw_page_read(struct nand_chip *chip,
+ bool use_ecc, int cw)
+{
+ config_nand_page_read(chip);
+ config_nand_cw_read(chip, use_ecc, cw);
+}
+
+/*
+ * Helper to prepare DMA descriptors used to configure registers needed for
+ * before writing a NAND page.
+ */
+static void config_nand_page_write(struct nand_chip *chip)
+{
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ write_reg_dma(nandc, NAND_ADDR0, 2, 0);
+ write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
+ if (!nandc->props->qpic_v2)
+ write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
+ NAND_BAM_NEXT_SGL);
+}
+
+/*
+ * Helper to prepare DMA descriptors for configuring registers
+ * before writing each codeword in NAND page.
+ */
+static void config_nand_cw_write(struct nand_chip *chip)
+{
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+
+ write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
+ write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
+}
+
+/* helpers to submit/free our list of dma descriptors */
+static int submit_descs(struct qcom_nand_controller *nandc)
+{
+ struct desc_info *desc, *n;
+ dma_cookie_t cookie = 0;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+ int ret = 0;
+
+ if (nandc->props->is_bam) {
+ if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
+ ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
+ if (ret)
+ goto err_unmap_free_desc;
+ }
+
+ if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
+ ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
+ DMA_PREP_INTERRUPT);
+ if (ret)
+ goto err_unmap_free_desc;
+ }
+
+ if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
+ ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
+ DMA_PREP_CMD);
+ if (ret)
+ goto err_unmap_free_desc;
+ }
+ }
+
+ list_for_each_entry(desc, &nandc->desc_list, node)
+ cookie = dmaengine_submit(desc->dma_desc);
+
+ if (nandc->props->is_bam) {
+ bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
+ bam_txn->last_cmd_desc->callback_param = bam_txn;
+ if (bam_txn->last_data_desc) {
+ bam_txn->last_data_desc->callback = qpic_bam_dma_done;
+ bam_txn->last_data_desc->callback_param = bam_txn;
+ bam_txn->wait_second_completion = true;
+ }
+
+ dma_async_issue_pending(nandc->tx_chan);
+ dma_async_issue_pending(nandc->rx_chan);
+ dma_async_issue_pending(nandc->cmd_chan);
+
+ if (!wait_for_completion_timeout(&bam_txn->txn_done,
+ QPIC_NAND_COMPLETION_TIMEOUT))
+ ret = -ETIMEDOUT;
+ } else {
+ if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
+ ret = -ETIMEDOUT;
+ }
+
+err_unmap_free_desc:
+ /*
+ * Unmap the dma sg_list and free the desc allocated by both
+ * prepare_bam_async_desc() and prep_adm_dma_desc() functions.
+ */
+ list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
+ list_del(&desc->node);
+
+ if (nandc->props->is_bam)
+ dma_unmap_sg(nandc->dev, desc->bam_sgl,
+ desc->sgl_cnt, desc->dir);
+ else
+ dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
+ desc->dir);
+
+ kfree(desc);
+ }
+
+ return ret;
+}
+
+/* reset the register read buffer for next NAND operation */
+static void clear_read_regs(struct qcom_nand_controller *nandc)
+{
+ nandc->reg_read_pos = 0;
+ nandc_read_buffer_sync(nandc, false);
+}
+
+/*
+ * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
+ * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
+ *
+ * when using RS ECC, the HW reports the same erros when reading an erased CW,
+ * but it notifies that it is an erased CW by placing special characters at
+ * certain offsets in the buffer.
+ *
+ * verify if the page is erased or not, and fix up the page for RS ECC by
+ * replacing the special characters with 0xff.
+ */
+static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
+{
+ u8 empty1, empty2;
+
+ /*
+ * an erased page flags an error in NAND_FLASH_STATUS, check if the page
+ * is erased by looking for 0x54s at offsets 3 and 175 from the
+ * beginning of each codeword
+ */
+
+ empty1 = data_buf[3];
+ empty2 = data_buf[175];
+
+ /*
+ * if the erased codework markers, if they exist override them with
+ * 0xffs
+ */
+ if ((empty1 == 0x54 && empty2 == 0xff) ||
+ (empty1 == 0xff && empty2 == 0x54)) {
+ data_buf[3] = 0xff;
+ data_buf[175] = 0xff;
+ }
+
+ /*
+ * check if the entire chunk contains 0xffs or not. if it doesn't, then
+ * restore the original values at the special offsets
+ */
+ if (memchr_inv(data_buf, 0xff, data_len)) {
+ data_buf[3] = empty1;
+ data_buf[175] = empty2;
+
+ return false;
+ }
+
+ return true;
+}
+
+struct read_stats {
+ __le32 flash;
+ __le32 buffer;
+ __le32 erased_cw;
+};
+
+/* reads back FLASH_STATUS register set by the controller */
+static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ int i;
+
+ nandc_read_buffer_sync(nandc, true);
+
+ for (i = 0; i < cw_cnt; i++) {
+ u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
+
+ if (flash & (FS_OP_ERR | FS_MPU_ERR))
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* performs raw read for one codeword */
+static int
+qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ u8 *data_buf, u8 *oob_buf, int page, int cw)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int data_size1, data_size2, oob_size1, oob_size2;
+ int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
+ int raw_cw = cw;
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ clear_read_regs(nandc);
+ host->use_ecc = false;
+
+ if (nandc->props->qpic_v2)
+ raw_cw = ecc->steps - 1;
+
+ clear_bam_transaction(nandc);
+ set_address(host, host->cw_size * cw, page);
+ update_rw_regs(host, 1, true, raw_cw);
+ config_nand_page_read(chip);
+
+ data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
+ oob_size1 = host->bbm_size;
+
+ if (qcom_nandc_is_last_cw(ecc, cw) && !host->codeword_fixup) {
+ data_size2 = ecc->size - data_size1 -
+ ((ecc->steps - 1) * 4);
+ oob_size2 = (ecc->steps * 4) + host->ecc_bytes_hw +
+ host->spare_bytes;
+ } else {
+ data_size2 = host->cw_data - data_size1;
+ oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
+ }
+
+ if (nandc->props->is_bam) {
+ nandc_set_read_loc(chip, cw, 0, read_loc, data_size1, 0);
+ read_loc += data_size1;
+
+ nandc_set_read_loc(chip, cw, 1, read_loc, oob_size1, 0);
+ read_loc += oob_size1;
+
+ nandc_set_read_loc(chip, cw, 2, read_loc, data_size2, 0);
+ read_loc += data_size2;
+
+ nandc_set_read_loc(chip, cw, 3, read_loc, oob_size2, 1);
+ }
+
+ config_nand_cw_read(chip, false, raw_cw);
+
+ read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
+ reg_off += data_size1;
+
+ read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
+ reg_off += oob_size1;
+
+ read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
+ reg_off += data_size2;
+
+ read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
+
+ ret = submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
+ return ret;
+ }
+
+ return check_flash_errors(host, 1);
+}
+
+/*
+ * Bitflips can happen in erased codewords also so this function counts the
+ * number of 0 in each CW for which ECC engine returns the uncorrectable
+ * error. The page will be assumed as erased if this count is less than or
+ * equal to the ecc->strength for each CW.
+ *
+ * 1. Both DATA and OOB need to be checked for number of 0. The
+ * top-level API can be called with only data buf or OOB buf so use
+ * chip->data_buf if data buf is null and chip->oob_poi if oob buf
+ * is null for copying the raw bytes.
+ * 2. Perform raw read for all the CW which has uncorrectable errors.
+ * 3. For each CW, check the number of 0 in cw_data and usable OOB bytes.
+ * The BBM and spare bytes bit flip won’t affect the ECC so don’t check
+ * the number of bitflips in this area.
+ */
+static int
+check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf,
+ u8 *oob_buf, unsigned long uncorrectable_cws,
+ int page, unsigned int max_bitflips)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u8 *cw_data_buf, *cw_oob_buf;
+ int cw, data_size, oob_size, ret;
+
+ if (!data_buf)
+ data_buf = nand_get_data_buf(chip);
+
+ if (!oob_buf) {
+ nand_get_data_buf(chip);
+ oob_buf = chip->oob_poi;
+ }
+
+ for_each_set_bit(cw, &uncorrectable_cws, ecc->steps) {
+ if (qcom_nandc_is_last_cw(ecc, cw) && !host->codeword_fixup) {
+ data_size = ecc->size - ((ecc->steps - 1) * 4);
+ oob_size = (ecc->steps * 4) + host->ecc_bytes_hw;
+ } else {
+ data_size = host->cw_data;
+ oob_size = host->ecc_bytes_hw;
+ }
+
+ /* determine starting buffer address for current CW */
+ cw_data_buf = data_buf + (cw * host->cw_data);
+ cw_oob_buf = oob_buf + (cw * ecc->bytes);
+
+ ret = qcom_nandc_read_cw_raw(mtd, chip, cw_data_buf,
+ cw_oob_buf, page, cw);
+ if (ret)
+ return ret;
+
+ /*
+ * make sure it isn't an erased page reported
+ * as not-erased by HW because of a few bitflips
+ */
+ ret = nand_check_erased_ecc_chunk(cw_data_buf, data_size,
+ cw_oob_buf + host->bbm_size,
+ oob_size, NULL,
+ 0, ecc->strength);
+ if (ret < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += ret;
+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
+ }
+ }
+
+ return max_bitflips;
+}
+
+/*
+ * reads back status registers set by the controller to notify page read
+ * errors. this is equivalent to what 'ecc->correct()' would do.
+ */
+static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
+ u8 *oob_buf, int page)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ unsigned int max_bitflips = 0, uncorrectable_cws = 0;
+ struct read_stats *buf;
+ bool flash_op_err = false, erased;
+ int i;
+ u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
+
+ buf = (struct read_stats *)nandc->reg_read_buf;
+ nandc_read_buffer_sync(nandc, true);
+
+ for (i = 0; i < ecc->steps; i++, buf++) {
+ u32 flash, buffer, erased_cw;
+ int data_len, oob_len;
+
+ if (qcom_nandc_is_last_cw(ecc, i)) {
+ data_len = ecc->size - ((ecc->steps - 1) << 2);
+ oob_len = ecc->steps << 2;
+ } else {
+ data_len = host->cw_data;
+ oob_len = 0;
+ }
+
+ flash = le32_to_cpu(buf->flash);
+ buffer = le32_to_cpu(buf->buffer);
+ erased_cw = le32_to_cpu(buf->erased_cw);
+
+ /*
+ * Check ECC failure for each codeword. ECC failure can
+ * happen in either of the following conditions
+ * 1. If number of bitflips are greater than ECC engine
+ * capability.
+ * 2. If this codeword contains all 0xff for which erased
+ * codeword detection check will be done.
+ */
+ if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
+ /*
+ * For BCH ECC, ignore erased codeword errors, if
+ * ERASED_CW bits are set.
+ */
+ if (host->bch_enabled) {
+ erased = (erased_cw & ERASED_CW) == ERASED_CW;
+ /*
+ * For RS ECC, HW reports the erased CW by placing
+ * special characters at certain offsets in the buffer.
+ * These special characters will be valid only if
+ * complete page is read i.e. data_buf is not NULL.
+ */
+ } else if (data_buf) {
+ erased = erased_chunk_check_and_fixup(data_buf,
+ data_len);
+ } else {
+ erased = false;
+ }
+
+ if (!erased)
+ uncorrectable_cws |= BIT(i);
+ /*
+ * Check if MPU or any other operational error (timeout,
+ * device failure, etc.) happened for this codeword and
+ * make flash_op_err true. If flash_op_err is set, then
+ * EIO will be returned for page read.
+ */
+ } else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
+ flash_op_err = true;
+ /*
+ * No ECC or operational errors happened. Check the number of
+ * bits corrected and update the ecc_stats.corrected.
+ */
+ } else {
+ unsigned int stat;
+
+ stat = buffer & BS_CORRECTABLE_ERR_MSK;
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max(max_bitflips, stat);
+ }
+
+ if (data_buf)
+ data_buf += data_len;
+ if (oob_buf)
+ oob_buf += oob_len + ecc->bytes;
+ }
+
+ if (flash_op_err)
+ return -EIO;
+
+ if (!uncorrectable_cws)
+ return max_bitflips;
+
+ return check_for_erased_page(host, data_buf_start, oob_buf_start,
+ uncorrectable_cws, page,
+ max_bitflips);
+}
+
+/*
+ * helper to perform the actual page read operation, used by ecc->read_page(),
+ * ecc->read_oob()
+ */
+static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
+ u8 *oob_buf, int page)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
+ int i, ret;
+
+ config_nand_page_read(chip);
+
+ /* queue cmd descs for each codeword */
+ for (i = 0; i < ecc->steps; i++) {
+ int data_size, oob_size;
+
+ if (qcom_nandc_is_last_cw(ecc, i) && !host->codeword_fixup) {
+ data_size = ecc->size - ((ecc->steps - 1) << 2);
+ oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
+ host->spare_bytes;
+ } else {
+ data_size = host->cw_data;
+ oob_size = host->ecc_bytes_hw + host->spare_bytes;
+ }
+
+ if (nandc->props->is_bam) {
+ if (data_buf && oob_buf) {
+ nandc_set_read_loc(chip, i, 0, 0, data_size, 0);
+ nandc_set_read_loc(chip, i, 1, data_size,
+ oob_size, 1);
+ } else if (data_buf) {
+ nandc_set_read_loc(chip, i, 0, 0, data_size, 1);
+ } else {
+ nandc_set_read_loc(chip, i, 0, data_size,
+ oob_size, 1);
+ }
+ }
+
+ config_nand_cw_read(chip, true, i);
+
+ if (data_buf)
+ read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
+ data_size, 0);
+
+ /*
+ * when ecc is enabled, the controller doesn't read the real
+ * or dummy bad block markers in each chunk. To maintain a
+ * consistent layout across RAW and ECC reads, we just
+ * leave the real/dummy BBM offsets empty (i.e, filled with
+ * 0xffs)
+ */
+ if (oob_buf) {
+ int j;
+
+ for (j = 0; j < host->bbm_size; j++)
+ *oob_buf++ = 0xff;
+
+ read_data_dma(nandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size, 0);
+ }
+
+ if (data_buf)
+ data_buf += data_size;
+ if (oob_buf)
+ oob_buf += oob_size;
+ }
+
+ ret = submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure to read page/oob\n");
+ return ret;
+ }
+
+ return parse_read_errors(host, data_buf_start, oob_buf_start, page);
+}
+
+/*
+ * a helper that copies the last step/codeword of a page (containing free oob)
+ * into our local buffer
+ */
+static int copy_last_cw(struct qcom_nand_host *host, int page)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int size;
+ int ret;
+
+ clear_read_regs(nandc);
+
+ size = host->use_ecc ? host->cw_data : host->cw_size;
+
+ /* prepare a clean read buffer */
+ memset(nandc->data_buffer, 0xff, size);
+
+ set_address(host, host->cw_size * (ecc->steps - 1), page);
+ update_rw_regs(host, 1, true, ecc->steps - 1);
+
+ config_nand_single_cw_page_read(chip, host->use_ecc, ecc->steps - 1);
+
+ read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
+
+ ret = submit_descs(nandc);
+ if (ret)
+ dev_err(nandc->dev, "failed to copy last codeword\n");
+
+ return ret;
+}
+
+static bool qcom_nandc_is_boot_partition(struct qcom_nand_host *host, int page)
+{
+ struct qcom_nand_boot_partition *boot_partition;
+ u32 start, end;
+ int i;
+
+ /*
+ * Since the frequent access will be to the non-boot partitions like rootfs,
+ * optimize the page check by:
+ *
+ * 1. Checking if the page lies after the last boot partition.
+ * 2. Checking from the boot partition end.
+ */
+
+ /* First check the last boot partition */
+ boot_partition = &host->boot_partitions[host->nr_boot_partitions - 1];
+ start = boot_partition->page_offset;
+ end = start + boot_partition->page_size;
+
+ /* Page is after the last boot partition end. This is NOT a boot partition */
+ if (page > end)
+ return false;
+
+ /* Actually check if it's a boot partition */
+ if (page < end && page >= start)
+ return true;
+
+ /* Check the other boot partitions starting from the second-last partition */
+ for (i = host->nr_boot_partitions - 2; i >= 0; i--) {
+ boot_partition = &host->boot_partitions[i];
+ start = boot_partition->page_offset;
+ end = start + boot_partition->page_size;
+
+ if (page < end && page >= start)
+ return true;
+ }
+
+ return false;
+}
+
+static void qcom_nandc_codeword_fixup(struct qcom_nand_host *host, int page)
+{
+ bool codeword_fixup = qcom_nandc_is_boot_partition(host, page);
+
+ /* Skip conf write if we are already in the correct mode */
+ if (codeword_fixup == host->codeword_fixup)
+ return;
+
+ host->codeword_fixup = codeword_fixup;
+
+ host->cw_data = codeword_fixup ? 512 : 516;
+ host->spare_bytes = host->cw_size - host->ecc_bytes_hw -
+ host->bbm_size - host->cw_data;
+
+ host->cfg0 &= ~(SPARE_SIZE_BYTES_MASK | UD_SIZE_BYTES_MASK);
+ host->cfg0 |= host->spare_bytes << SPARE_SIZE_BYTES |
+ host->cw_data << UD_SIZE_BYTES;
+
+ host->ecc_bch_cfg &= ~ECC_NUM_DATA_BYTES_MASK;
+ host->ecc_bch_cfg |= host->cw_data << ECC_NUM_DATA_BYTES;
+ host->ecc_buf_cfg = (host->cw_data - 1) << NUM_STEPS;
+}
+
+/* implements ecc->read_page() */
+static int qcom_nandc_read_page(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u8 *data_buf, *oob_buf = NULL;
+
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ host->use_ecc = true;
+ clear_read_regs(nandc);
+ set_address(host, 0, page);
+ update_rw_regs(host, ecc->steps, true, 0);
+
+ data_buf = buf;
+ oob_buf = oob_required ? chip->oob_poi : NULL;
+
+ clear_bam_transaction(nandc);
+
+ return read_page_ecc(host, data_buf, oob_buf, page);
+}
+
+/* implements ecc->read_page_raw() */
+static int qcom_nandc_read_page_raw(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int cw, ret;
+ u8 *data_buf = buf, *oob_buf = chip->oob_poi;
+
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
+ for (cw = 0; cw < ecc->steps; cw++) {
+ ret = qcom_nandc_read_cw_raw(mtd, chip, data_buf, oob_buf,
+ page, cw);
+ if (ret)
+ return ret;
+
+ data_buf += host->cw_data;
+ oob_buf += ecc->bytes;
+ }
+
+ return 0;
+}
+
+/* implements ecc->read_oob() */
+static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
+ clear_read_regs(nandc);
+ clear_bam_transaction(nandc);
+
+ host->use_ecc = true;
+ set_address(host, 0, page);
+ update_rw_regs(host, ecc->steps, true, 0);
+
+ return read_page_ecc(host, NULL, chip->oob_poi, page);
+}
+
+/* implements ecc->write_page() */
+static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u8 *data_buf, *oob_buf;
+ int i, ret;
+
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
+ set_address(host, 0, page);
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ clear_read_regs(nandc);
+ clear_bam_transaction(nandc);
+
+ data_buf = (u8 *)buf;
+ oob_buf = chip->oob_poi;
+
+ host->use_ecc = true;
+ update_rw_regs(host, ecc->steps, false, 0);
+ config_nand_page_write(chip);
+
+ for (i = 0; i < ecc->steps; i++) {
+ int data_size, oob_size;
+
+ if (qcom_nandc_is_last_cw(ecc, i) && !host->codeword_fixup) {
+ data_size = ecc->size - ((ecc->steps - 1) << 2);
+ oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
+ host->spare_bytes;
+ } else {
+ data_size = host->cw_data;
+ oob_size = ecc->bytes;
+ }
+
+ write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
+ i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
+
+ /*
+ * when ECC is enabled, we don't really need to write anything
+ * to oob for the first n - 1 codewords since these oob regions
+ * just contain ECC bytes that's written by the controller
+ * itself. For the last codeword, we skip the bbm positions and
+ * write to the free oob area.
+ */
+ if (qcom_nandc_is_last_cw(ecc, i)) {
+ oob_buf += host->bbm_size;
+
+ write_data_dma(nandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size, 0);
+ }
+
+ config_nand_cw_write(chip);
+
+ data_buf += data_size;
+ oob_buf += oob_size;
+ }
+
+ ret = submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure to write page\n");
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+/* implements ecc->write_page_raw() */
+static int qcom_nandc_write_page_raw(struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u8 *data_buf, *oob_buf;
+ int i, ret;
+
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ clear_read_regs(nandc);
+ clear_bam_transaction(nandc);
+
+ data_buf = (u8 *)buf;
+ oob_buf = chip->oob_poi;
+
+ host->use_ecc = false;
+ update_rw_regs(host, ecc->steps, false, 0);
+ config_nand_page_write(chip);
+
+ for (i = 0; i < ecc->steps; i++) {
+ int data_size1, data_size2, oob_size1, oob_size2;
+ int reg_off = FLASH_BUF_ACC;
+
+ data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
+ oob_size1 = host->bbm_size;
+
+ if (qcom_nandc_is_last_cw(ecc, i) && !host->codeword_fixup) {
+ data_size2 = ecc->size - data_size1 -
+ ((ecc->steps - 1) << 2);
+ oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
+ host->spare_bytes;
+ } else {
+ data_size2 = host->cw_data - data_size1;
+ oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
+ }
+
+ write_data_dma(nandc, reg_off, data_buf, data_size1,
+ NAND_BAM_NO_EOT);
+ reg_off += data_size1;
+ data_buf += data_size1;
+
+ write_data_dma(nandc, reg_off, oob_buf, oob_size1,
+ NAND_BAM_NO_EOT);
+ reg_off += oob_size1;
+ oob_buf += oob_size1;
+
+ write_data_dma(nandc, reg_off, data_buf, data_size2,
+ NAND_BAM_NO_EOT);
+ reg_off += data_size2;
+ data_buf += data_size2;
+
+ write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
+ oob_buf += oob_size2;
+
+ config_nand_cw_write(chip);
+ }
+
+ ret = submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure to write raw page\n");
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+/*
+ * implements ecc->write_oob()
+ *
+ * the NAND controller cannot write only data or only OOB within a codeword
+ * since ECC is calculated for the combined codeword. So update the OOB from
+ * chip->oob_poi, and pad the data area with OxFF before writing.
+ */
+static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u8 *oob = chip->oob_poi;
+ int data_size, oob_size;
+ int ret;
+
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
+ host->use_ecc = true;
+ clear_bam_transaction(nandc);
+
+ /* calculate the data and oob size for the last codeword/step */
+ data_size = ecc->size - ((ecc->steps - 1) << 2);
+ oob_size = mtd->oobavail;
+
+ memset(nandc->data_buffer, 0xff, host->cw_data);
+ /* override new oob content to last codeword */
+ mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
+ 0, mtd->oobavail);
+
+ set_address(host, host->cw_size * (ecc->steps - 1), page);
+ update_rw_regs(host, 1, false, 0);
+
+ config_nand_page_write(chip);
+ write_data_dma(nandc, FLASH_BUF_ACC,
+ nandc->data_buffer, data_size + oob_size, 0);
+ config_nand_cw_write(chip);
+
+ ret = submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure to write oob\n");
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int qcom_nandc_block_bad(struct nand_chip *chip, loff_t ofs)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int page, ret, bbpos, bad = 0;
+
+ page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+
+ /*
+ * configure registers for a raw sub page read, the address is set to
+ * the beginning of the last codeword, we don't care about reading ecc
+ * portion of oob. we just want the first few bytes from this codeword
+ * that contains the BBM
+ */
+ host->use_ecc = false;
+
+ clear_bam_transaction(nandc);
+ ret = copy_last_cw(host, page);
+ if (ret)
+ goto err;
+
+ if (check_flash_errors(host, 1)) {
+ dev_warn(nandc->dev, "error when trying to read BBM\n");
+ goto err;
+ }
+
+ bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
+
+ bad = nandc->data_buffer[bbpos] != 0xff;
+
+ if (chip->options & NAND_BUSWIDTH_16)
+ bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
+err:
+ return bad;
+}
+
+static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int page, ret;
+
+ clear_read_regs(nandc);
+ clear_bam_transaction(nandc);
+
+ /*
+ * to mark the BBM as bad, we flash the entire last codeword with 0s.
+ * we don't care about the rest of the content in the codeword since
+ * we aren't going to use this block again
+ */
+ memset(nandc->data_buffer, 0x00, host->cw_size);
+
+ page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+
+ /* prepare write */
+ host->use_ecc = false;
+ set_address(host, host->cw_size * (ecc->steps - 1), page);
+ update_rw_regs(host, 1, false, ecc->steps - 1);
+
+ config_nand_page_write(chip);
+ write_data_dma(nandc, FLASH_BUF_ACC,
+ nandc->data_buffer, host->cw_size, 0);
+ config_nand_cw_write(chip);
+
+ ret = submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure to update BBM\n");
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+/*
+ * NAND controller page layout info
+ *
+ * Layout with ECC enabled:
+ *
+ * |----------------------| |---------------------------------|
+ * | xx.......yy| | *********xx.......yy|
+ * | DATA xx..ECC..yy| | DATA **SPARE**xx..ECC..yy|
+ * | (516) xx.......yy| | (516-n*4) **(n*4)**xx.......yy|
+ * | xx.......yy| | *********xx.......yy|
+ * |----------------------| |---------------------------------|
+ * codeword 1,2..n-1 codeword n
+ * <---(528/532 Bytes)--> <-------(528/532 Bytes)--------->
+ *
+ * n = Number of codewords in the page
+ * . = ECC bytes
+ * * = Spare/free bytes
+ * x = Unused byte(s)
+ * y = Reserved byte(s)
+ *
+ * 2K page: n = 4, spare = 16 bytes
+ * 4K page: n = 8, spare = 32 bytes
+ * 8K page: n = 16, spare = 64 bytes
+ *
+ * the qcom nand controller operates at a sub page/codeword level. each
+ * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
+ * the number of ECC bytes vary based on the ECC strength and the bus width.
+ *
+ * the first n - 1 codewords contains 516 bytes of user data, the remaining
+ * 12/16 bytes consist of ECC and reserved data. The nth codeword contains
+ * both user data and spare(oobavail) bytes that sum up to 516 bytes.
+ *
+ * When we access a page with ECC enabled, the reserved bytes(s) are not
+ * accessible at all. When reading, we fill up these unreadable positions
+ * with 0xffs. When writing, the controller skips writing the inaccessible
+ * bytes.
+ *
+ * Layout with ECC disabled:
+ *
+ * |------------------------------| |---------------------------------------|
+ * | yy xx.......| | bb *********xx.......|
+ * | DATA1 yy DATA2 xx..ECC..| | DATA1 bb DATA2 **SPARE**xx..ECC..|
+ * | (size1) yy (size2) xx.......| | (size1) bb (size2) **(n*4)**xx.......|
+ * | yy xx.......| | bb *********xx.......|
+ * |------------------------------| |---------------------------------------|
+ * codeword 1,2..n-1 codeword n
+ * <-------(528/532 Bytes)------> <-----------(528/532 Bytes)----------->
+ *
+ * n = Number of codewords in the page
+ * . = ECC bytes
+ * * = Spare/free bytes
+ * x = Unused byte(s)
+ * y = Dummy Bad Bock byte(s)
+ * b = Real Bad Block byte(s)
+ * size1/size2 = function of codeword size and 'n'
+ *
+ * when the ECC block is disabled, one reserved byte (or two for 16 bit bus
+ * width) is now accessible. For the first n - 1 codewords, these are dummy Bad
+ * Block Markers. In the last codeword, this position contains the real BBM
+ *
+ * In order to have a consistent layout between RAW and ECC modes, we assume
+ * the following OOB layout arrangement:
+ *
+ * |-----------| |--------------------|
+ * |yyxx.......| |bb*********xx.......|
+ * |yyxx..ECC..| |bb*FREEOOB*xx..ECC..|
+ * |yyxx.......| |bb*********xx.......|
+ * |yyxx.......| |bb*********xx.......|
+ * |-----------| |--------------------|
+ * first n - 1 nth OOB region
+ * OOB regions
+ *
+ * n = Number of codewords in the page
+ * . = ECC bytes
+ * * = FREE OOB bytes
+ * y = Dummy bad block byte(s) (inaccessible when ECC enabled)
+ * x = Unused byte(s)
+ * b = Real bad block byte(s) (inaccessible when ECC enabled)
+ *
+ * This layout is read as is when ECC is disabled. When ECC is enabled, the
+ * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
+ * and assumed as 0xffs when we read a page/oob. The ECC, unused and
+ * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is
+ * the sum of the three).
+ */
+static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section > 1)
+ return -ERANGE;
+
+ if (!section) {
+ oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
+ host->bbm_size;
+ oobregion->offset = 0;
+ } else {
+ oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
+ oobregion->offset = mtd->oobsize - oobregion->length;
+ }
+
+ return 0;
+}
+
+static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = ecc->steps * 4;
+ oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
+ .ecc = qcom_nand_ooblayout_ecc,
+ .free = qcom_nand_ooblayout_free,
+};
+
+static int
+qcom_nandc_calc_ecc_bytes(int step_size, int strength)
+{
+ return strength == 4 ? 12 : 16;
+}
+
+NAND_ECC_CAPS_SINGLE(qcom_nandc_ecc_caps, qcom_nandc_calc_ecc_bytes,
+ NANDC_STEP_SIZE, 4, 8);
+
+static int qcom_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ int cwperpage, bad_block_byte, ret;
+ bool wide_bus;
+ int ecc_mode = 1;
+
+ /* controller only supports 512 bytes data steps */
+ ecc->size = NANDC_STEP_SIZE;
+ wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
+ cwperpage = mtd->writesize / NANDC_STEP_SIZE;
+
+ /*
+ * Each CW has 4 available OOB bytes which will be protected with ECC
+ * so remaining bytes can be used for ECC.
+ */
+ ret = nand_ecc_choose_conf(chip, &qcom_nandc_ecc_caps,
+ mtd->oobsize - (cwperpage * 4));
+ if (ret) {
+ dev_err(nandc->dev, "No valid ECC settings possible\n");
+ return ret;
+ }
+
+ if (ecc->strength >= 8) {
+ /* 8 bit ECC defaults to BCH ECC on all platforms */
+ host->bch_enabled = true;
+ ecc_mode = 1;
+
+ if (wide_bus) {
+ host->ecc_bytes_hw = 14;
+ host->spare_bytes = 0;
+ host->bbm_size = 2;
+ } else {
+ host->ecc_bytes_hw = 13;
+ host->spare_bytes = 2;
+ host->bbm_size = 1;
+ }
+ } else {
+ /*
+ * if the controller supports BCH for 4 bit ECC, the controller
+ * uses lesser bytes for ECC. If RS is used, the ECC bytes is
+ * always 10 bytes
+ */
+ if (nandc->props->ecc_modes & ECC_BCH_4BIT) {
+ /* BCH */
+ host->bch_enabled = true;
+ ecc_mode = 0;
+
+ if (wide_bus) {
+ host->ecc_bytes_hw = 8;
+ host->spare_bytes = 2;
+ host->bbm_size = 2;
+ } else {
+ host->ecc_bytes_hw = 7;
+ host->spare_bytes = 4;
+ host->bbm_size = 1;
+ }
+ } else {
+ /* RS */
+ host->ecc_bytes_hw = 10;
+
+ if (wide_bus) {
+ host->spare_bytes = 0;
+ host->bbm_size = 2;
+ } else {
+ host->spare_bytes = 1;
+ host->bbm_size = 1;
+ }
+ }
+ }
+
+ /*
+ * we consider ecc->bytes as the sum of all the non-data content in a
+ * step. It gives us a clean representation of the oob area (even if
+ * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
+ * ECC and 12 bytes for 4 bit ECC
+ */
+ ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
+
+ ecc->read_page = qcom_nandc_read_page;
+ ecc->read_page_raw = qcom_nandc_read_page_raw;
+ ecc->read_oob = qcom_nandc_read_oob;
+ ecc->write_page = qcom_nandc_write_page;
+ ecc->write_page_raw = qcom_nandc_write_page_raw;
+ ecc->write_oob = qcom_nandc_write_oob;
+
+ ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+
+ mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
+ /* Free the initially allocated BAM transaction for reading the ONFI params */
+ if (nandc->props->is_bam)
+ free_bam_transaction(nandc);
+
+ nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
+ cwperpage);
+
+ /* Now allocate the BAM transaction based on updated max_cwperpage */
+ if (nandc->props->is_bam) {
+ nandc->bam_txn = alloc_bam_transaction(nandc);
+ if (!nandc->bam_txn) {
+ dev_err(nandc->dev,
+ "failed to allocate bam transaction\n");
+ return -ENOMEM;
+ }
+ }
+
+ /*
+ * DATA_UD_BYTES varies based on whether the read/write command protects
+ * spare data with ECC too. We protect spare data by default, so we set
+ * it to main + spare data, which are 512 and 4 bytes respectively.
+ */
+ host->cw_data = 516;
+
+ /*
+ * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
+ * for 8 bit ECC
+ */
+ host->cw_size = host->cw_data + ecc->bytes;
+ bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
+
+ host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
+ | host->cw_data << UD_SIZE_BYTES
+ | 0 << DISABLE_STATUS_AFTER_WRITE
+ | 5 << NUM_ADDR_CYCLES
+ | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
+ | 0 << STATUS_BFR_READ
+ | 1 << SET_RD_MODE_AFTER_STATUS
+ | host->spare_bytes << SPARE_SIZE_BYTES;
+
+ host->cfg1 = 7 << NAND_RECOVERY_CYCLES
+ | 0 << CS_ACTIVE_BSY
+ | bad_block_byte << BAD_BLOCK_BYTE_NUM
+ | 0 << BAD_BLOCK_IN_SPARE_AREA
+ | 2 << WR_RD_BSY_GAP
+ | wide_bus << WIDE_FLASH
+ | host->bch_enabled << ENABLE_BCH_ECC;
+
+ host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
+ | host->cw_size << UD_SIZE_BYTES
+ | 5 << NUM_ADDR_CYCLES
+ | 0 << SPARE_SIZE_BYTES;
+
+ host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
+ | 0 << CS_ACTIVE_BSY
+ | 17 << BAD_BLOCK_BYTE_NUM
+ | 1 << BAD_BLOCK_IN_SPARE_AREA
+ | 2 << WR_RD_BSY_GAP
+ | wide_bus << WIDE_FLASH
+ | 1 << DEV0_CFG1_ECC_DISABLE;
+
+ host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
+ | 0 << ECC_SW_RESET
+ | host->cw_data << ECC_NUM_DATA_BYTES
+ | 1 << ECC_FORCE_CLK_OPEN
+ | ecc_mode << ECC_MODE
+ | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
+
+ if (!nandc->props->qpic_v2)
+ host->ecc_buf_cfg = 0x203 << NUM_STEPS;
+
+ host->clrflashstatus = FS_READY_BSY_N;
+ host->clrreadstatus = 0xc0;
+ nandc->regs->erased_cw_detect_cfg_clr =
+ cpu_to_le32(CLR_ERASED_PAGE_DET);
+ nandc->regs->erased_cw_detect_cfg_set =
+ cpu_to_le32(SET_ERASED_PAGE_DET);
+
+ dev_dbg(nandc->dev,
+ "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
+ host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
+ host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
+ cwperpage);
+
+ return 0;
+}
+
+static int qcom_op_cmd_mapping(struct nand_chip *chip, u8 opcode,
+ struct qcom_op *q_op)
+{
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ int cmd;
+
+ switch (opcode) {
+ case NAND_CMD_RESET:
+ cmd = OP_RESET_DEVICE;
+ break;
+ case NAND_CMD_READID:
+ cmd = OP_FETCH_ID;
+ break;
+ case NAND_CMD_PARAM:
+ if (nandc->props->qpic_v2)
+ cmd = OP_PAGE_READ_ONFI_READ;
+ else
+ cmd = OP_PAGE_READ;
+ break;
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ cmd = OP_BLOCK_ERASE;
+ break;
+ case NAND_CMD_STATUS:
+ cmd = OP_CHECK_STATUS;
+ break;
+ case NAND_CMD_PAGEPROG:
+ cmd = OP_PROGRAM_PAGE;
+ q_op->flag = OP_PROGRAM_PAGE;
+ nandc->exec_opwrite = true;
+ break;
+ case NAND_CMD_READ0:
+ case NAND_CMD_READSTART:
+ if (host->use_ecc)
+ cmd = OP_PAGE_READ_WITH_ECC;
+ else
+ cmd = OP_PAGE_READ;
+ break;
+ default:
+ dev_err(nandc->dev, "Opcode not supported: %u\n", opcode);
+ return -EOPNOTSUPP;
+ }
+
+ return cmd;
+}
+
+/* NAND framework ->exec_op() hooks and related helpers */
+static int qcom_parse_instructions(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct qcom_op *q_op)
+{
+ const struct nand_op_instr *instr = NULL;
+ unsigned int op_id;
+ int i, ret;
+
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ unsigned int offset, naddrs;
+ const u8 *addrs;
+
+ instr = &subop->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ ret = qcom_op_cmd_mapping(chip, instr->ctx.cmd.opcode, q_op);
+ if (ret < 0)
+ return ret;
+
+ q_op->cmd_reg = ret;
+ q_op->rdy_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+
+ for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
+ q_op->addr1_reg |= addrs[i] << (i * 8);
+
+ if (naddrs > 4)
+ q_op->addr2_reg |= addrs[4];
+
+ q_op->rdy_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ q_op->data_instr = instr;
+ q_op->data_instr_idx = op_id;
+ q_op->rdy_delay_ns = instr->delay_ns;
+ fallthrough;
+ case NAND_OP_DATA_OUT_INSTR:
+ q_op->rdy_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ q_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
+ q_op->rdy_delay_ns = instr->delay_ns;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void qcom_delay_ns(unsigned int ns)
+{
+ if (!ns)
+ return;
+
+ if (ns < 10000)
+ ndelay(ns);
+ else
+ udelay(DIV_ROUND_UP(ns, 1000));
+}
+
+static int qcom_wait_rdy_poll(struct nand_chip *chip, unsigned int time_ms)
+{
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ unsigned long start = jiffies + msecs_to_jiffies(time_ms);
+ u32 flash;
+
+ nandc_read_buffer_sync(nandc, true);
+
+ do {
+ flash = le32_to_cpu(nandc->reg_read_buf[0]);
+ if (flash & FS_READY_BSY_N)
+ return 0;
+ cpu_relax();
+ } while (time_after(start, jiffies));
+
+ dev_err(nandc->dev, "Timeout waiting for device to be ready:0x%08x\n", flash);
+
+ return -ETIMEDOUT;
+}
+
+static int qcom_read_status_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct qcom_op q_op = {};
+ const struct nand_op_instr *instr = NULL;
+ unsigned int op_id = 0;
+ unsigned int len = 0;
+ int ret, num_cw, i;
+ u32 flash_status;
+
+ host->status = NAND_STATUS_READY | NAND_STATUS_WP;
+
+ ret = qcom_parse_instructions(chip, subop, &q_op);
+ if (ret)
+ return ret;
+
+ num_cw = nandc->exec_opwrite ? ecc->steps : 1;
+ nandc->exec_opwrite = false;
+
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ host->use_ecc = false;
+
+ clear_read_regs(nandc);
+ clear_bam_transaction(nandc);
+
+ nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
+ nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+
+ write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+
+ ret = submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure in submitting status descriptor\n");
+ goto err_out;
+ }
+
+ nandc_read_buffer_sync(nandc, true);
+
+ for (i = 0; i < num_cw; i++) {
+ flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
+
+ if (flash_status & FS_MPU_ERR)
+ host->status &= ~NAND_STATUS_WP;
+
+ if (flash_status & FS_OP_ERR ||
+ (i == (num_cw - 1) && (flash_status & FS_DEVICE_STS_ERR)))
+ host->status |= NAND_STATUS_FAIL;
+ }
+
+ flash_status = host->status;
+ instr = q_op.data_instr;
+ op_id = q_op.data_instr_idx;
+ len = nand_subop_get_data_len(subop, op_id);
+ memcpy(instr->ctx.data.buf.in, &flash_status, len);
+
+err_out:
+ return ret;
+}
+
+static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_op q_op = {};
+ const struct nand_op_instr *instr = NULL;
+ unsigned int op_id = 0;
+ unsigned int len = 0;
+ int ret;
+
+ ret = qcom_parse_instructions(chip, subop, &q_op);
+ if (ret)
+ return ret;
+
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ host->use_ecc = false;
+
+ clear_read_regs(nandc);
+ clear_bam_transaction(nandc);
+
+ nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
+ nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
+ nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
+ nandc_set_reg(chip, NAND_FLASH_CHIP_SELECT,
+ nandc->props->is_bam ? 0 : DM_EN);
+
+ nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+
+ write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
+ write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
+
+ ret = submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure in submitting read id descriptor\n");
+ goto err_out;
+ }
+
+ instr = q_op.data_instr;
+ op_id = q_op.data_instr_idx;
+ len = nand_subop_get_data_len(subop, op_id);
+
+ nandc_read_buffer_sync(nandc, true);
+ memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
+
+err_out:
+ return ret;
+}
+
+static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_op q_op = {};
+ int ret;
+ int instrs = 1;
+
+ ret = qcom_parse_instructions(chip, subop, &q_op);
+ if (ret)
+ return ret;
+
+ if (q_op.flag == OP_PROGRAM_PAGE) {
+ goto wait_rdy;
+ } else if (q_op.cmd_reg == OP_BLOCK_ERASE) {
+ q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
+ nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
+ nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
+ nandc_set_reg(chip, NAND_DEV0_CFG0,
+ host->cfg0_raw & ~(7 << CW_PER_PAGE));
+ nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
+ instrs = 3;
+ } else {
+ return 0;
+ }
+
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ host->use_ecc = false;
+
+ clear_read_regs(nandc);
+ clear_bam_transaction(nandc);
+
+ nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
+ nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+
+ write_reg_dma(nandc, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
+ (q_op.cmd_reg == OP_BLOCK_ERASE) ? write_reg_dma(nandc, NAND_DEV0_CFG0,
+ 2, NAND_BAM_NEXT_SGL) : read_reg_dma(nandc,
+ NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+
+ write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+
+ ret = submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure in submitting misc descriptor\n");
+ goto err_out;
+ }
+
+wait_rdy:
+ qcom_delay_ns(q_op.rdy_delay_ns);
+ ret = qcom_wait_rdy_poll(chip, q_op.rdy_timeout_ms);
+
+err_out:
+ return ret;
+}
+
+static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct qcom_op q_op = {};
+ const struct nand_op_instr *instr = NULL;
+ unsigned int op_id = 0;
+ unsigned int len = 0;
+ int ret;
+
+ ret = qcom_parse_instructions(chip, subop, &q_op);
+ if (ret)
+ return ret;
+
+ q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
+
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ host->use_ecc = false;
+ clear_read_regs(nandc);
+ clear_bam_transaction(nandc);
+
+ nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
+
+ nandc_set_reg(chip, NAND_ADDR0, 0);
+ nandc_set_reg(chip, NAND_ADDR1, 0);
+ nandc_set_reg(chip, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
+ | 512 << UD_SIZE_BYTES
+ | 5 << NUM_ADDR_CYCLES
+ | 0 << SPARE_SIZE_BYTES);
+ nandc_set_reg(chip, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
+ | 0 << CS_ACTIVE_BSY
+ | 17 << BAD_BLOCK_BYTE_NUM
+ | 1 << BAD_BLOCK_IN_SPARE_AREA
+ | 2 << WR_RD_BSY_GAP
+ | 0 << WIDE_FLASH
+ | 1 << DEV0_CFG1_ECC_DISABLE);
+ if (!nandc->props->qpic_v2)
+ nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
+
+ /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
+ if (!nandc->props->qpic_v2) {
+ nandc_set_reg(chip, NAND_DEV_CMD_VLD,
+ (nandc->vld & ~READ_START_VLD));
+ nandc_set_reg(chip, NAND_DEV_CMD1,
+ (nandc->cmd1 & ~(0xFF << READ_ADDR))
+ | NAND_CMD_PARAM << READ_ADDR);
+ }
+
+ nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+
+ if (!nandc->props->qpic_v2) {
+ nandc_set_reg(chip, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
+ nandc_set_reg(chip, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
+ }
+
+ instr = q_op.data_instr;
+ op_id = q_op.data_instr_idx;
+ len = nand_subop_get_data_len(subop, op_id);
+
+ nandc_set_read_loc(chip, 0, 0, 0, len, 1);
+
+ if (!nandc->props->qpic_v2) {
+ write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
+ write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
+ }
+
+ nandc->buf_count = len;
+ memset(nandc->data_buffer, 0xff, nandc->buf_count);
+
+ config_nand_single_cw_page_read(chip, false, 0);
+
+ read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
+ nandc->buf_count, 0);
+
+ /* restore CMD1 and VLD regs */
+ if (!nandc->props->qpic_v2) {
+ write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
+ write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
+ }
+
+ ret = submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure in submitting param page descriptor\n");
+ goto err_out;
+ }
+
+ ret = qcom_wait_rdy_poll(chip, q_op.rdy_timeout_ms);
+ if (ret)
+ goto err_out;
+
+ memcpy(instr->ctx.data.buf.in, nandc->data_buffer, len);
+
+err_out:
+ return ret;
+}
+
+static const struct nand_op_parser qcom_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(
+ qcom_read_id_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYCLE),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
+ NAND_OP_PARSER_PATTERN(
+ qcom_read_status_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
+ NAND_OP_PARSER_PATTERN(
+ qcom_param_page_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYCLE),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 512)),
+ NAND_OP_PARSER_PATTERN(
+ qcom_misc_cmd_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYCLE),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ );
+
+static int qcom_check_op(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ const struct nand_op_instr *instr;
+ int op_id;
+
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (instr->ctx.cmd.opcode != NAND_CMD_RESET &&
+ instr->ctx.cmd.opcode != NAND_CMD_READID &&
+ instr->ctx.cmd.opcode != NAND_CMD_PARAM &&
+ instr->ctx.cmd.opcode != NAND_CMD_ERASE1 &&
+ instr->ctx.cmd.opcode != NAND_CMD_ERASE2 &&
+ instr->ctx.cmd.opcode != NAND_CMD_STATUS &&
+ instr->ctx.cmd.opcode != NAND_CMD_PAGEPROG &&
+ instr->ctx.cmd.opcode != NAND_CMD_READ0 &&
+ instr->ctx.cmd.opcode != NAND_CMD_READSTART)
+ return -EOPNOTSUPP;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int qcom_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op, bool check_only)
+{
+ if (check_only)
+ return qcom_check_op(chip, op);
+
+ return nand_op_parser_exec_op(chip, &qcom_op_parser, op, check_only);
+}
+
+static const struct nand_controller_ops qcom_nandc_ops = {
+ .attach_chip = qcom_nand_attach_chip,
+ .exec_op = qcom_nand_exec_op,
+};
+
+static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
+{
+ if (nandc->props->is_bam) {
+ if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
+ dma_unmap_single(nandc->dev, nandc->reg_read_dma,
+ MAX_REG_RD *
+ sizeof(*nandc->reg_read_buf),
+ DMA_FROM_DEVICE);
+
+ if (nandc->tx_chan)
+ dma_release_channel(nandc->tx_chan);
+
+ if (nandc->rx_chan)
+ dma_release_channel(nandc->rx_chan);
+
+ if (nandc->cmd_chan)
+ dma_release_channel(nandc->cmd_chan);
+ } else {
+ if (nandc->chan)
+ dma_release_channel(nandc->chan);
+ }
+}
+
+static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
+{
+ int ret;
+
+ ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(nandc->dev, "failed to set DMA mask\n");
+ return ret;
+ }
+
+ /*
+ * we use the internal buffer for reading ONFI params, reading small
+ * data like ID and status, and preforming read-copy-write operations
+ * when writing to a codeword partially. 532 is the maximum possible
+ * size of a codeword for our nand controller
+ */
+ nandc->buf_size = 532;
+
+ nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
+ if (!nandc->data_buffer)
+ return -ENOMEM;
+
+ nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
+ if (!nandc->regs)
+ return -ENOMEM;
+
+ nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
+ sizeof(*nandc->reg_read_buf),
+ GFP_KERNEL);
+ if (!nandc->reg_read_buf)
+ return -ENOMEM;
+
+ if (nandc->props->is_bam) {
+ nandc->reg_read_dma =
+ dma_map_single(nandc->dev, nandc->reg_read_buf,
+ MAX_REG_RD *
+ sizeof(*nandc->reg_read_buf),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
+ dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
+ return -EIO;
+ }
+
+ nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
+ if (IS_ERR(nandc->tx_chan)) {
+ ret = PTR_ERR(nandc->tx_chan);
+ nandc->tx_chan = NULL;
+ dev_err_probe(nandc->dev, ret,
+ "tx DMA channel request failed\n");
+ goto unalloc;
+ }
+
+ nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
+ if (IS_ERR(nandc->rx_chan)) {
+ ret = PTR_ERR(nandc->rx_chan);
+ nandc->rx_chan = NULL;
+ dev_err_probe(nandc->dev, ret,
+ "rx DMA channel request failed\n");
+ goto unalloc;
+ }
+
+ nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
+ if (IS_ERR(nandc->cmd_chan)) {
+ ret = PTR_ERR(nandc->cmd_chan);
+ nandc->cmd_chan = NULL;
+ dev_err_probe(nandc->dev, ret,
+ "cmd DMA channel request failed\n");
+ goto unalloc;
+ }
+
+ /*
+ * Initially allocate BAM transaction to read ONFI param page.
+ * After detecting all the devices, this BAM transaction will
+ * be freed and the next BAM transaction will be allocated with
+ * maximum codeword size
+ */
+ nandc->max_cwperpage = 1;
+ nandc->bam_txn = alloc_bam_transaction(nandc);
+ if (!nandc->bam_txn) {
+ dev_err(nandc->dev,
+ "failed to allocate bam transaction\n");
+ ret = -ENOMEM;
+ goto unalloc;
+ }
+ } else {
+ nandc->chan = dma_request_chan(nandc->dev, "rxtx");
+ if (IS_ERR(nandc->chan)) {
+ ret = PTR_ERR(nandc->chan);
+ nandc->chan = NULL;
+ dev_err_probe(nandc->dev, ret,
+ "rxtx DMA channel request failed\n");
+ return ret;
+ }
+ }
+
+ INIT_LIST_HEAD(&nandc->desc_list);
+ INIT_LIST_HEAD(&nandc->host_list);
+
+ nand_controller_init(&nandc->controller);
+ nandc->controller.ops = &qcom_nandc_ops;
+
+ return 0;
+unalloc:
+ qcom_nandc_unalloc(nandc);
+ return ret;
+}
+
+/* one time setup of a few nand controller registers */
+static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
+{
+ u32 nand_ctrl;
+
+ /* kill onenand */
+ if (!nandc->props->is_qpic)
+ nandc_write(nandc, SFLASHC_BURST_CFG, 0);
+
+ if (!nandc->props->qpic_v2)
+ nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
+ NAND_DEV_CMD_VLD_VAL);
+
+ /* enable ADM or BAM DMA */
+ if (nandc->props->is_bam) {
+ nand_ctrl = nandc_read(nandc, NAND_CTRL);
+
+ /*
+ *NAND_CTRL is an operational registers, and CPU
+ * access to operational registers are read only
+ * in BAM mode. So update the NAND_CTRL register
+ * only if it is not in BAM mode. In most cases BAM
+ * mode will be enabled in bootloader
+ */
+ if (!(nand_ctrl & BAM_MODE_EN))
+ nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
+ } else {
+ nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
+ }
+
+ /* save the original values of these registers */
+ if (!nandc->props->qpic_v2) {
+ nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
+ nandc->vld = NAND_DEV_CMD_VLD_VAL;
+ }
+
+ return 0;
+}
+
+static const char * const probes[] = { "cmdlinepart", "ofpart", "qcomsmem", NULL };
+
+static int qcom_nand_host_parse_boot_partitions(struct qcom_nand_controller *nandc,
+ struct qcom_nand_host *host,
+ struct device_node *dn)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct qcom_nand_boot_partition *boot_partition;
+ struct device *dev = nandc->dev;
+ int partitions_count, i, j, ret;
+
+ if (!of_property_present(dn, "qcom,boot-partitions"))
+ return 0;
+
+ partitions_count = of_property_count_u32_elems(dn, "qcom,boot-partitions");
+ if (partitions_count <= 0) {
+ dev_err(dev, "Error parsing boot partition\n");
+ return partitions_count ? partitions_count : -EINVAL;
+ }
+
+ host->nr_boot_partitions = partitions_count / 2;
+ host->boot_partitions = devm_kcalloc(dev, host->nr_boot_partitions,
+ sizeof(*host->boot_partitions), GFP_KERNEL);
+ if (!host->boot_partitions) {
+ host->nr_boot_partitions = 0;
+ return -ENOMEM;
+ }
+
+ for (i = 0, j = 0; i < host->nr_boot_partitions; i++, j += 2) {
+ boot_partition = &host->boot_partitions[i];
+
+ ret = of_property_read_u32_index(dn, "qcom,boot-partitions", j,
+ &boot_partition->page_offset);
+ if (ret) {
+ dev_err(dev, "Error parsing boot partition offset at index %d\n", i);
+ host->nr_boot_partitions = 0;
+ return ret;
+ }
+
+ if (boot_partition->page_offset % mtd->writesize) {
+ dev_err(dev, "Boot partition offset not multiple of writesize at index %i\n",
+ i);
+ host->nr_boot_partitions = 0;
+ return -EINVAL;
+ }
+ /* Convert offset to nand pages */
+ boot_partition->page_offset /= mtd->writesize;
+
+ ret = of_property_read_u32_index(dn, "qcom,boot-partitions", j + 1,
+ &boot_partition->page_size);
+ if (ret) {
+ dev_err(dev, "Error parsing boot partition size at index %d\n", i);
+ host->nr_boot_partitions = 0;
+ return ret;
+ }
+
+ if (boot_partition->page_size % mtd->writesize) {
+ dev_err(dev, "Boot partition size not multiple of writesize at index %i\n",
+ i);
+ host->nr_boot_partitions = 0;
+ return -EINVAL;
+ }
+ /* Convert size to nand pages */
+ boot_partition->page_size /= mtd->writesize;
+ }
+
+ return 0;
+}
+
+static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
+ struct qcom_nand_host *host,
+ struct device_node *dn)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct device *dev = nandc->dev;
+ int ret;
+
+ ret = of_property_read_u32(dn, "reg", &host->cs);
+ if (ret) {
+ dev_err(dev, "can't get chip-select\n");
+ return -ENXIO;
+ }
+
+ nand_set_flash_node(chip, dn);
+ mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
+ if (!mtd->name)
+ return -ENOMEM;
+
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = dev;
+
+ /*
+ * the bad block marker is readable only when we read the last codeword
+ * of a page with ECC disabled. currently, the nand_base and nand_bbt
+ * helpers don't allow us to read BB from a nand chip with ECC
+ * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad
+ * and block_markbad helpers until we permanently switch to using
+ * MTD_OPS_RAW for all drivers (with the help of badblockbits)
+ */
+ chip->legacy.block_bad = qcom_nandc_block_bad;
+ chip->legacy.block_markbad = qcom_nandc_block_markbad;
+
+ chip->controller = &nandc->controller;
+ chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA |
+ NAND_SKIP_BBTSCAN;
+
+ /* set up initial status value */
+ host->status = NAND_STATUS_READY | NAND_STATUS_WP;
+
+ ret = nand_scan(chip, 1);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_parse_register(mtd, probes, NULL, NULL, 0);
+ if (ret)
+ goto err;
+
+ if (nandc->props->use_codeword_fixup) {
+ ret = qcom_nand_host_parse_boot_partitions(nandc, host, dn);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ nand_cleanup(chip);
+ return ret;
+}
+
+static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
+{
+ struct device *dev = nandc->dev;
+ struct device_node *dn = dev->of_node, *child;
+ struct qcom_nand_host *host;
+ int ret = -ENODEV;
+
+ for_each_available_child_of_node(dn, child) {
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host) {
+ of_node_put(child);
+ return -ENOMEM;
+ }
+
+ ret = qcom_nand_host_init_and_register(nandc, host, child);
+ if (ret) {
+ devm_kfree(dev, host);
+ continue;
+ }
+
+ list_add_tail(&host->node, &nandc->host_list);
+ }
+
+ return ret;
+}
+
+/* parse custom DT properties here */
+static int qcom_nandc_parse_dt(struct platform_device *pdev)
+{
+ struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
+ struct device_node *np = nandc->dev->of_node;
+ int ret;
+
+ if (!nandc->props->is_bam) {
+ ret = of_property_read_u32(np, "qcom,cmd-crci",
+ &nandc->cmd_crci);
+ if (ret) {
+ dev_err(nandc->dev, "command CRCI unspecified\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "qcom,data-crci",
+ &nandc->data_crci);
+ if (ret) {
+ dev_err(nandc->dev, "data CRCI unspecified\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int qcom_nandc_probe(struct platform_device *pdev)
+{
+ struct qcom_nand_controller *nandc;
+ const void *dev_data;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int ret;
+
+ nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
+ if (!nandc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, nandc);
+ nandc->dev = dev;
+
+ dev_data = of_device_get_match_data(dev);
+ if (!dev_data) {
+ dev_err(&pdev->dev, "failed to get device data\n");
+ return -ENODEV;
+ }
+
+ nandc->props = dev_data;
+
+ nandc->core_clk = devm_clk_get(dev, "core");
+ if (IS_ERR(nandc->core_clk))
+ return PTR_ERR(nandc->core_clk);
+
+ nandc->aon_clk = devm_clk_get(dev, "aon");
+ if (IS_ERR(nandc->aon_clk))
+ return PTR_ERR(nandc->aon_clk);
+
+ ret = qcom_nandc_parse_dt(pdev);
+ if (ret)
+ return ret;
+
+ nandc->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(nandc->base))
+ return PTR_ERR(nandc->base);
+
+ nandc->base_phys = res->start;
+ nandc->base_dma = dma_map_resource(dev, res->start,
+ resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+ if (dma_mapping_error(dev, nandc->base_dma))
+ return -ENXIO;
+
+ ret = clk_prepare_enable(nandc->core_clk);
+ if (ret)
+ goto err_core_clk;
+
+ ret = clk_prepare_enable(nandc->aon_clk);
+ if (ret)
+ goto err_aon_clk;
+
+ ret = qcom_nandc_alloc(nandc);
+ if (ret)
+ goto err_nandc_alloc;
+
+ ret = qcom_nandc_setup(nandc);
+ if (ret)
+ goto err_setup;
+
+ ret = qcom_probe_nand_devices(nandc);
+ if (ret)
+ goto err_setup;
+
+ return 0;
+
+err_setup:
+ qcom_nandc_unalloc(nandc);
+err_nandc_alloc:
+ clk_disable_unprepare(nandc->aon_clk);
+err_aon_clk:
+ clk_disable_unprepare(nandc->core_clk);
+err_core_clk:
+ dma_unmap_resource(dev, nandc->base_dma, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+ return ret;
+}
+
+static void qcom_nandc_remove(struct platform_device *pdev)
+{
+ struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct qcom_nand_host *host;
+ struct nand_chip *chip;
+ int ret;
+
+ list_for_each_entry(host, &nandc->host_list, node) {
+ chip = &host->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ }
+
+ qcom_nandc_unalloc(nandc);
+
+ clk_disable_unprepare(nandc->aon_clk);
+ clk_disable_unprepare(nandc->core_clk);
+
+ dma_unmap_resource(&pdev->dev, nandc->base_dma, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+}
+
+static const struct qcom_nandc_props ipq806x_nandc_props = {
+ .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
+ .is_bam = false,
+ .use_codeword_fixup = true,
+ .dev_cmd_reg_start = 0x0,
+};
+
+static const struct qcom_nandc_props ipq4019_nandc_props = {
+ .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
+ .is_bam = true,
+ .is_qpic = true,
+ .dev_cmd_reg_start = 0x0,
+};
+
+static const struct qcom_nandc_props ipq8074_nandc_props = {
+ .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
+ .is_bam = true,
+ .is_qpic = true,
+ .dev_cmd_reg_start = 0x7000,
+};
+
+static const struct qcom_nandc_props sdx55_nandc_props = {
+ .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
+ .is_bam = true,
+ .is_qpic = true,
+ .qpic_v2 = true,
+ .dev_cmd_reg_start = 0x7000,
+};
+
+/*
+ * data will hold a struct pointer containing more differences once we support
+ * more controller variants
+ */
+static const struct of_device_id qcom_nandc_of_match[] = {
+ {
+ .compatible = "qcom,ipq806x-nand",
+ .data = &ipq806x_nandc_props,
+ },
+ {
+ .compatible = "qcom,ipq4019-nand",
+ .data = &ipq4019_nandc_props,
+ },
+ {
+ .compatible = "qcom,ipq6018-nand",
+ .data = &ipq8074_nandc_props,
+ },
+ {
+ .compatible = "qcom,ipq8074-nand",
+ .data = &ipq8074_nandc_props,
+ },
+ {
+ .compatible = "qcom,sdx55-nand",
+ .data = &sdx55_nandc_props,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
+
+static struct platform_driver qcom_nandc_driver = {
+ .driver = {
+ .name = "qcom-nandc",
+ .of_match_table = qcom_nandc_of_match,
+ },
+ .probe = qcom_nandc_probe,
+ .remove_new = qcom_nandc_remove,
+};
+module_platform_driver(qcom_nandc_driver);
+
+MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c
new file mode 100644
index 0000000000..ed0cf732d2
--- /dev/null
+++ b/drivers/mtd/nand/raw/r852.c
@@ -0,0 +1,1091 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * driver for Ricoh xD readers
+ */
+
+#define DRV_NAME "r852"
+#define pr_fmt(fmt) DRV_NAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+#include <linux/sched.h>
+#include "sm_common.h"
+#include "r852.h"
+
+
+static bool r852_enable_dma = 1;
+module_param(r852_enable_dma, bool, S_IRUGO);
+MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)");
+
+static int debug;
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+/* read register */
+static inline uint8_t r852_read_reg(struct r852_device *dev, int address)
+{
+ uint8_t reg = readb(dev->mmio + address);
+ return reg;
+}
+
+/* write register */
+static inline void r852_write_reg(struct r852_device *dev,
+ int address, uint8_t value)
+{
+ writeb(value, dev->mmio + address);
+}
+
+
+/* read dword sized register */
+static inline uint32_t r852_read_reg_dword(struct r852_device *dev, int address)
+{
+ uint32_t reg = le32_to_cpu(readl(dev->mmio + address));
+ return reg;
+}
+
+/* write dword sized register */
+static inline void r852_write_reg_dword(struct r852_device *dev,
+ int address, uint32_t value)
+{
+ writel(cpu_to_le32(value), dev->mmio + address);
+}
+
+/* returns pointer to our private structure */
+static inline struct r852_device *r852_get_dev(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ return nand_get_controller_data(chip);
+}
+
+
+/* check if controller supports dma */
+static void r852_dma_test(struct r852_device *dev)
+{
+ dev->dma_usable = (r852_read_reg(dev, R852_DMA_CAP) &
+ (R852_DMA1 | R852_DMA2)) == (R852_DMA1 | R852_DMA2);
+
+ if (!dev->dma_usable)
+ message("Non dma capable device detected, dma disabled");
+
+ if (!r852_enable_dma) {
+ message("disabling dma on user request");
+ dev->dma_usable = 0;
+ }
+}
+
+/*
+ * Enable dma. Enables ether first or second stage of the DMA,
+ * Expects dev->dma_dir and dev->dma_state be set
+ */
+static void r852_dma_enable(struct r852_device *dev)
+{
+ uint8_t dma_reg, dma_irq_reg;
+
+ /* Set up dma settings */
+ dma_reg = r852_read_reg_dword(dev, R852_DMA_SETTINGS);
+ dma_reg &= ~(R852_DMA_READ | R852_DMA_INTERNAL | R852_DMA_MEMORY);
+
+ if (dev->dma_dir)
+ dma_reg |= R852_DMA_READ;
+
+ if (dev->dma_state == DMA_INTERNAL) {
+ dma_reg |= R852_DMA_INTERNAL;
+ /* Precaution to make sure HW doesn't write */
+ /* to random kernel memory */
+ r852_write_reg_dword(dev, R852_DMA_ADDR,
+ cpu_to_le32(dev->phys_bounce_buffer));
+ } else {
+ dma_reg |= R852_DMA_MEMORY;
+ r852_write_reg_dword(dev, R852_DMA_ADDR,
+ cpu_to_le32(dev->phys_dma_addr));
+ }
+
+ /* Precaution: make sure write reached the device */
+ r852_read_reg_dword(dev, R852_DMA_ADDR);
+
+ r852_write_reg_dword(dev, R852_DMA_SETTINGS, dma_reg);
+
+ /* Set dma irq */
+ dma_irq_reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
+ dma_irq_reg |
+ R852_DMA_IRQ_INTERNAL |
+ R852_DMA_IRQ_ERROR |
+ R852_DMA_IRQ_MEMORY);
+}
+
+/*
+ * Disable dma, called from the interrupt handler, which specifies
+ * success of the operation via 'error' argument
+ */
+static void r852_dma_done(struct r852_device *dev, int error)
+{
+ WARN_ON(dev->dma_stage == 0);
+
+ r852_write_reg_dword(dev, R852_DMA_IRQ_STA,
+ r852_read_reg_dword(dev, R852_DMA_IRQ_STA));
+
+ r852_write_reg_dword(dev, R852_DMA_SETTINGS, 0);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 0);
+
+ /* Precaution to make sure HW doesn't write to random kernel memory */
+ r852_write_reg_dword(dev, R852_DMA_ADDR,
+ cpu_to_le32(dev->phys_bounce_buffer));
+ r852_read_reg_dword(dev, R852_DMA_ADDR);
+
+ dev->dma_error = error;
+ dev->dma_stage = 0;
+
+ if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer)
+ dma_unmap_single(&dev->pci_dev->dev, dev->phys_dma_addr,
+ R852_DMA_LEN,
+ dev->dma_dir ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+}
+
+/*
+ * Wait, till dma is done, which includes both phases of it
+ */
+static int r852_dma_wait(struct r852_device *dev)
+{
+ long timeout = wait_for_completion_timeout(&dev->dma_done,
+ msecs_to_jiffies(1000));
+ if (!timeout) {
+ dbg("timeout waiting for DMA interrupt");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * Read/Write one page using dma. Only pages can be read (512 bytes)
+*/
+static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)
+{
+ int bounce = 0;
+ unsigned long flags;
+ int error;
+
+ dev->dma_error = 0;
+
+ /* Set dma direction */
+ dev->dma_dir = do_read;
+ dev->dma_stage = 1;
+ reinit_completion(&dev->dma_done);
+
+ dbg_verbose("doing dma %s ", do_read ? "read" : "write");
+
+ /* Set initial dma state: for reading first fill on board buffer,
+ from device, for writes first fill the buffer from memory*/
+ dev->dma_state = do_read ? DMA_INTERNAL : DMA_MEMORY;
+
+ /* if incoming buffer is not page aligned, we should do bounce */
+ if ((unsigned long)buf & (R852_DMA_LEN-1))
+ bounce = 1;
+
+ if (!bounce) {
+ dev->phys_dma_addr = dma_map_single(&dev->pci_dev->dev, buf,
+ R852_DMA_LEN,
+ do_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ if (dma_mapping_error(&dev->pci_dev->dev, dev->phys_dma_addr))
+ bounce = 1;
+ }
+
+ if (bounce) {
+ dbg_verbose("dma: using bounce buffer");
+ dev->phys_dma_addr = dev->phys_bounce_buffer;
+ if (!do_read)
+ memcpy(dev->bounce_buffer, buf, R852_DMA_LEN);
+ }
+
+ /* Enable DMA */
+ spin_lock_irqsave(&dev->irqlock, flags);
+ r852_dma_enable(dev);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+
+ /* Wait till complete */
+ error = r852_dma_wait(dev);
+
+ if (error) {
+ r852_dma_done(dev, error);
+ return;
+ }
+
+ if (do_read && bounce)
+ memcpy((void *)buf, dev->bounce_buffer, R852_DMA_LEN);
+}
+
+/*
+ * Program data lines of the nand chip to send data to it
+ */
+static void r852_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
+{
+ struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
+ uint32_t reg;
+
+ /* Don't allow any access to hardware if we suspect card removal */
+ if (dev->card_unstable)
+ return;
+
+ /* Special case for whole sector read */
+ if (len == R852_DMA_LEN && dev->dma_usable) {
+ r852_do_dma(dev, (uint8_t *)buf, 0);
+ return;
+ }
+
+ /* write DWORD chinks - faster */
+ while (len >= 4) {
+ reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24;
+ r852_write_reg_dword(dev, R852_DATALINE, reg);
+ buf += 4;
+ len -= 4;
+
+ }
+
+ /* write rest */
+ while (len > 0) {
+ r852_write_reg(dev, R852_DATALINE, *buf++);
+ len--;
+ }
+}
+
+/*
+ * Read data lines of the nand chip to retrieve data
+ */
+static void r852_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
+{
+ struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
+ uint32_t reg;
+
+ if (dev->card_unstable) {
+ /* since we can't signal error here, at least, return
+ predictable buffer */
+ memset(buf, 0, len);
+ return;
+ }
+
+ /* special case for whole sector read */
+ if (len == R852_DMA_LEN && dev->dma_usable) {
+ r852_do_dma(dev, buf, 1);
+ return;
+ }
+
+ /* read in dword sized chunks */
+ while (len >= 4) {
+
+ reg = r852_read_reg_dword(dev, R852_DATALINE);
+ *buf++ = reg & 0xFF;
+ *buf++ = (reg >> 8) & 0xFF;
+ *buf++ = (reg >> 16) & 0xFF;
+ *buf++ = (reg >> 24) & 0xFF;
+ len -= 4;
+ }
+
+ /* read the reset by bytes */
+ while (len--)
+ *buf++ = r852_read_reg(dev, R852_DATALINE);
+}
+
+/*
+ * Read one byte from nand chip
+ */
+static uint8_t r852_read_byte(struct nand_chip *chip)
+{
+ struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
+
+ /* Same problem as in r852_read_buf.... */
+ if (dev->card_unstable)
+ return 0;
+
+ return r852_read_reg(dev, R852_DATALINE);
+}
+
+/*
+ * Control several chip lines & send commands
+ */
+static void r852_cmdctl(struct nand_chip *chip, int dat, unsigned int ctrl)
+{
+ struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
+
+ if (dev->card_unstable)
+ return;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+
+ dev->ctlreg &= ~(R852_CTL_DATA | R852_CTL_COMMAND |
+ R852_CTL_ON | R852_CTL_CARDENABLE);
+
+ if (ctrl & NAND_ALE)
+ dev->ctlreg |= R852_CTL_DATA;
+
+ if (ctrl & NAND_CLE)
+ dev->ctlreg |= R852_CTL_COMMAND;
+
+ if (ctrl & NAND_NCE)
+ dev->ctlreg |= (R852_CTL_CARDENABLE | R852_CTL_ON);
+ else
+ dev->ctlreg &= ~R852_CTL_WRITE;
+
+ /* when write is stareted, enable write access */
+ if (dat == NAND_CMD_ERASE1)
+ dev->ctlreg |= R852_CTL_WRITE;
+
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ }
+
+ /* HACK: NAND_CMD_SEQIN is called without NAND_CTRL_CHANGE, but we need
+ to set write mode */
+ if (dat == NAND_CMD_SEQIN && (dev->ctlreg & R852_CTL_COMMAND)) {
+ dev->ctlreg |= R852_CTL_WRITE;
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ }
+
+ if (dat != NAND_CMD_NONE)
+ r852_write_reg(dev, R852_DATALINE, dat);
+}
+
+/*
+ * Wait till card is ready.
+ * based on nand_wait, but returns errors on DMA error
+ */
+static int r852_wait(struct nand_chip *chip)
+{
+ struct r852_device *dev = nand_get_controller_data(chip);
+
+ unsigned long timeout;
+ u8 status;
+
+ timeout = jiffies + msecs_to_jiffies(400);
+
+ while (time_before(jiffies, timeout))
+ if (chip->legacy.dev_ready(chip))
+ break;
+
+ nand_status_op(chip, &status);
+
+ /* Unfortunelly, no way to send detailed error status... */
+ if (dev->dma_error) {
+ status |= NAND_STATUS_FAIL;
+ dev->dma_error = 0;
+ }
+ return status;
+}
+
+/*
+ * Check if card is ready
+ */
+
+static int r852_ready(struct nand_chip *chip)
+{
+ struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
+ return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY);
+}
+
+
+/*
+ * Set ECC engine mode
+*/
+
+static void r852_ecc_hwctl(struct nand_chip *chip, int mode)
+{
+ struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
+
+ if (dev->card_unstable)
+ return;
+
+ switch (mode) {
+ case NAND_ECC_READ:
+ case NAND_ECC_WRITE:
+ /* enable ecc generation/check*/
+ dev->ctlreg |= R852_CTL_ECC_ENABLE;
+
+ /* flush ecc buffer */
+ r852_write_reg(dev, R852_CTL,
+ dev->ctlreg | R852_CTL_ECC_ACCESS);
+
+ r852_read_reg_dword(dev, R852_DATALINE);
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ return;
+
+ case NAND_ECC_READSYN:
+ /* disable ecc generation */
+ dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ }
+}
+
+/*
+ * Calculate ECC, only used for writes
+ */
+
+static int r852_ecc_calculate(struct nand_chip *chip, const uint8_t *dat,
+ uint8_t *ecc_code)
+{
+ struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
+ struct sm_oob *oob = (struct sm_oob *)ecc_code;
+ uint32_t ecc1, ecc2;
+
+ if (dev->card_unstable)
+ return 0;
+
+ dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
+ r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
+
+ ecc1 = r852_read_reg_dword(dev, R852_DATALINE);
+ ecc2 = r852_read_reg_dword(dev, R852_DATALINE);
+
+ oob->ecc1[0] = (ecc1) & 0xFF;
+ oob->ecc1[1] = (ecc1 >> 8) & 0xFF;
+ oob->ecc1[2] = (ecc1 >> 16) & 0xFF;
+
+ oob->ecc2[0] = (ecc2) & 0xFF;
+ oob->ecc2[1] = (ecc2 >> 8) & 0xFF;
+ oob->ecc2[2] = (ecc2 >> 16) & 0xFF;
+
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ return 0;
+}
+
+/*
+ * Correct the data using ECC, hw did almost everything for us
+ */
+
+static int r852_ecc_correct(struct nand_chip *chip, uint8_t *dat,
+ uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+ uint32_t ecc_reg;
+ uint8_t ecc_status, err_byte;
+ int i, error = 0;
+
+ struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
+
+ if (dev->card_unstable)
+ return 0;
+
+ if (dev->dma_error) {
+ dev->dma_error = 0;
+ return -EIO;
+ }
+
+ r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
+ ecc_reg = r852_read_reg_dword(dev, R852_DATALINE);
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+
+ for (i = 0 ; i <= 1 ; i++) {
+
+ ecc_status = (ecc_reg >> 8) & 0xFF;
+
+ /* ecc uncorrectable error */
+ if (ecc_status & R852_ECC_FAIL) {
+ dbg("ecc: unrecoverable error, in half %d", i);
+ error = -EBADMSG;
+ goto exit;
+ }
+
+ /* correctable error */
+ if (ecc_status & R852_ECC_CORRECTABLE) {
+
+ err_byte = ecc_reg & 0xFF;
+ dbg("ecc: recoverable error, "
+ "in half %d, byte %d, bit %d", i,
+ err_byte, ecc_status & R852_ECC_ERR_BIT_MSK);
+
+ dat[err_byte] ^=
+ 1 << (ecc_status & R852_ECC_ERR_BIT_MSK);
+ error++;
+ }
+
+ dat += 256;
+ ecc_reg >>= 16;
+ }
+exit:
+ return error;
+}
+
+/*
+ * This is copy of nand_read_oob_std
+ * nand_read_oob_syndrome assumes we can send column address - we can't
+ */
+static int r852_read_oob(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+}
+
+/*
+ * Start the nand engine
+ */
+
+static void r852_engine_enable(struct r852_device *dev)
+{
+ if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) {
+ r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
+ r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
+ } else {
+ r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
+ r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
+ }
+ msleep(300);
+ r852_write_reg(dev, R852_CTL, 0);
+}
+
+
+/*
+ * Stop the nand engine
+ */
+
+static void r852_engine_disable(struct r852_device *dev)
+{
+ r852_write_reg_dword(dev, R852_HW, 0);
+ r852_write_reg(dev, R852_CTL, R852_CTL_RESET);
+}
+
+/*
+ * Test if card is present
+ */
+
+static void r852_card_update_present(struct r852_device *dev)
+{
+ unsigned long flags;
+ uint8_t reg;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ reg = r852_read_reg(dev, R852_CARD_STA);
+ dev->card_detected = !!(reg & R852_CARD_STA_PRESENT);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+}
+
+/*
+ * Update card detection IRQ state according to current card state
+ * which is read in r852_card_update_present
+ */
+static void r852_update_card_detect(struct r852_device *dev)
+{
+ int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
+ dev->card_unstable = 0;
+
+ card_detect_reg &= ~(R852_CARD_IRQ_REMOVE | R852_CARD_IRQ_INSERT);
+ card_detect_reg |= R852_CARD_IRQ_GENABLE;
+
+ card_detect_reg |= dev->card_detected ?
+ R852_CARD_IRQ_REMOVE : R852_CARD_IRQ_INSERT;
+
+ r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg);
+}
+
+static ssize_t media_type_show(struct device *sys_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev);
+ struct r852_device *dev = r852_get_dev(mtd);
+ char *data = dev->sm ? "smartmedia" : "xd";
+
+ strcpy(buf, data);
+ return strlen(data);
+}
+static DEVICE_ATTR_RO(media_type);
+
+
+/* Detect properties of card in slot */
+static void r852_update_media_status(struct r852_device *dev)
+{
+ uint8_t reg;
+ unsigned long flags;
+ int readonly;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ if (!dev->card_detected) {
+ message("card removed");
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return ;
+ }
+
+ readonly = r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_RO;
+ reg = r852_read_reg(dev, R852_DMA_CAP);
+ dev->sm = (reg & (R852_DMA1 | R852_DMA2)) && (reg & R852_SMBIT);
+
+ message("detected %s %s card in slot",
+ dev->sm ? "SmartMedia" : "xD",
+ readonly ? "readonly" : "writeable");
+
+ dev->readonly = readonly;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+}
+
+/*
+ * Register the nand device
+ * Called when the card is detected
+ */
+static int r852_register_nand_device(struct r852_device *dev)
+{
+ struct mtd_info *mtd = nand_to_mtd(dev->chip);
+
+ WARN_ON(dev->card_registered);
+
+ mtd->dev.parent = &dev->pci_dev->dev;
+
+ if (dev->readonly)
+ dev->chip->options |= NAND_ROM;
+
+ r852_engine_enable(dev);
+
+ if (sm_register_device(mtd, dev->sm))
+ goto error1;
+
+ if (device_create_file(&mtd->dev, &dev_attr_media_type)) {
+ message("can't create media type sysfs attribute");
+ goto error3;
+ }
+
+ dev->card_registered = 1;
+ return 0;
+error3:
+ WARN_ON(mtd_device_unregister(nand_to_mtd(dev->chip)));
+ nand_cleanup(dev->chip);
+error1:
+ /* Force card redetect */
+ dev->card_detected = 0;
+ return -1;
+}
+
+/*
+ * Unregister the card
+ */
+
+static void r852_unregister_nand_device(struct r852_device *dev)
+{
+ struct mtd_info *mtd = nand_to_mtd(dev->chip);
+
+ if (!dev->card_registered)
+ return;
+
+ device_remove_file(&mtd->dev, &dev_attr_media_type);
+ WARN_ON(mtd_device_unregister(mtd));
+ nand_cleanup(dev->chip);
+ r852_engine_disable(dev);
+ dev->card_registered = 0;
+}
+
+/* Card state updater */
+static void r852_card_detect_work(struct work_struct *work)
+{
+ struct r852_device *dev =
+ container_of(work, struct r852_device, card_detect_work.work);
+
+ r852_card_update_present(dev);
+ r852_update_card_detect(dev);
+ dev->card_unstable = 0;
+
+ /* False alarm */
+ if (dev->card_detected == dev->card_registered)
+ goto exit;
+
+ /* Read media properties */
+ r852_update_media_status(dev);
+
+ /* Register the card */
+ if (dev->card_detected)
+ r852_register_nand_device(dev);
+ else
+ r852_unregister_nand_device(dev);
+exit:
+ r852_update_card_detect(dev);
+}
+
+/* Ack + disable IRQ generation */
+static void r852_disable_irqs(struct r852_device *dev)
+{
+ uint8_t reg;
+ reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
+ r852_write_reg(dev, R852_CARD_IRQ_ENABLE, reg & ~R852_CARD_IRQ_MASK);
+
+ reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
+ reg & ~R852_DMA_IRQ_MASK);
+
+ r852_write_reg(dev, R852_CARD_IRQ_STA, R852_CARD_IRQ_MASK);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_STA, R852_DMA_IRQ_MASK);
+}
+
+/* Interrupt handler */
+static irqreturn_t r852_irq(int irq, void *data)
+{
+ struct r852_device *dev = (struct r852_device *)data;
+
+ uint8_t card_status, dma_status;
+ irqreturn_t ret = IRQ_NONE;
+
+ spin_lock(&dev->irqlock);
+
+ /* handle card detection interrupts first */
+ card_status = r852_read_reg(dev, R852_CARD_IRQ_STA);
+ r852_write_reg(dev, R852_CARD_IRQ_STA, card_status);
+
+ if (card_status & (R852_CARD_IRQ_INSERT|R852_CARD_IRQ_REMOVE)) {
+
+ ret = IRQ_HANDLED;
+ dev->card_detected = !!(card_status & R852_CARD_IRQ_INSERT);
+
+ /* we shouldn't receive any interrupts if we wait for card
+ to settle */
+ WARN_ON(dev->card_unstable);
+
+ /* disable irqs while card is unstable */
+ /* this will timeout DMA if active, but better that garbage */
+ r852_disable_irqs(dev);
+
+ if (dev->card_unstable)
+ goto out;
+
+ /* let, card state to settle a bit, and then do the work */
+ dev->card_unstable = 1;
+ queue_delayed_work(dev->card_workqueue,
+ &dev->card_detect_work, msecs_to_jiffies(100));
+ goto out;
+ }
+
+
+ /* Handle dma interrupts */
+ dma_status = r852_read_reg_dword(dev, R852_DMA_IRQ_STA);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_STA, dma_status);
+
+ if (dma_status & R852_DMA_IRQ_MASK) {
+
+ ret = IRQ_HANDLED;
+
+ if (dma_status & R852_DMA_IRQ_ERROR) {
+ dbg("received dma error IRQ");
+ r852_dma_done(dev, -EIO);
+ complete(&dev->dma_done);
+ goto out;
+ }
+
+ /* received DMA interrupt out of nowhere? */
+ WARN_ON_ONCE(dev->dma_stage == 0);
+
+ if (dev->dma_stage == 0)
+ goto out;
+
+ /* done device access */
+ if (dev->dma_state == DMA_INTERNAL &&
+ (dma_status & R852_DMA_IRQ_INTERNAL)) {
+
+ dev->dma_state = DMA_MEMORY;
+ dev->dma_stage++;
+ }
+
+ /* done memory DMA */
+ if (dev->dma_state == DMA_MEMORY &&
+ (dma_status & R852_DMA_IRQ_MEMORY)) {
+ dev->dma_state = DMA_INTERNAL;
+ dev->dma_stage++;
+ }
+
+ /* Enable 2nd half of dma dance */
+ if (dev->dma_stage == 2)
+ r852_dma_enable(dev);
+
+ /* Operation done */
+ if (dev->dma_stage == 3) {
+ r852_dma_done(dev, 0);
+ complete(&dev->dma_done);
+ }
+ goto out;
+ }
+
+ /* Handle unknown interrupts */
+ if (dma_status)
+ dbg("bad dma IRQ status = %x", dma_status);
+
+ if (card_status & ~R852_CARD_STA_CD)
+ dbg("strange card status = %x", card_status);
+
+out:
+ spin_unlock(&dev->irqlock);
+ return ret;
+}
+
+static int r852_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
+ chip->ecc.size = R852_DMA_LEN;
+ chip->ecc.bytes = SM_OOB_SIZE;
+ chip->ecc.strength = 2;
+ chip->ecc.hwctl = r852_ecc_hwctl;
+ chip->ecc.calculate = r852_ecc_calculate;
+ chip->ecc.correct = r852_ecc_correct;
+
+ /* TODO: hack */
+ chip->ecc.read_oob = r852_read_oob;
+
+ return 0;
+}
+
+static const struct nand_controller_ops r852_ops = {
+ .attach_chip = r852_attach_chip,
+};
+
+static int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
+{
+ int error;
+ struct nand_chip *chip;
+ struct r852_device *dev;
+
+ /* pci initialization */
+ error = pci_enable_device(pci_dev);
+
+ if (error)
+ goto error1;
+
+ pci_set_master(pci_dev);
+
+ error = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
+ if (error)
+ goto error2;
+
+ error = pci_request_regions(pci_dev, DRV_NAME);
+
+ if (error)
+ goto error3;
+
+ error = -ENOMEM;
+
+ /* init nand chip, but register it only on card insert */
+ chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
+
+ if (!chip)
+ goto error4;
+
+ /* commands */
+ chip->legacy.cmd_ctrl = r852_cmdctl;
+ chip->legacy.waitfunc = r852_wait;
+ chip->legacy.dev_ready = r852_ready;
+
+ /* I/O */
+ chip->legacy.read_byte = r852_read_byte;
+ chip->legacy.read_buf = r852_read_buf;
+ chip->legacy.write_buf = r852_write_buf;
+
+ /* init our device structure */
+ dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL);
+
+ if (!dev)
+ goto error5;
+
+ nand_set_controller_data(chip, dev);
+ dev->chip = chip;
+ dev->pci_dev = pci_dev;
+ pci_set_drvdata(pci_dev, dev);
+
+ nand_controller_init(&dev->controller);
+ dev->controller.ops = &r852_ops;
+ chip->controller = &dev->controller;
+
+ dev->bounce_buffer = dma_alloc_coherent(&pci_dev->dev, R852_DMA_LEN,
+ &dev->phys_bounce_buffer, GFP_KERNEL);
+
+ if (!dev->bounce_buffer)
+ goto error6;
+
+
+ error = -ENODEV;
+ dev->mmio = pci_ioremap_bar(pci_dev, 0);
+
+ if (!dev->mmio)
+ goto error7;
+
+ error = -ENOMEM;
+ dev->tmp_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
+
+ if (!dev->tmp_buffer)
+ goto error8;
+
+ init_completion(&dev->dma_done);
+
+ dev->card_workqueue = create_freezable_workqueue(DRV_NAME);
+
+ if (!dev->card_workqueue)
+ goto error9;
+
+ INIT_DELAYED_WORK(&dev->card_detect_work, r852_card_detect_work);
+
+ /* shutdown everything - precation */
+ r852_engine_disable(dev);
+ r852_disable_irqs(dev);
+
+ r852_dma_test(dev);
+
+ dev->irq = pci_dev->irq;
+ spin_lock_init(&dev->irqlock);
+
+ dev->card_detected = 0;
+ r852_card_update_present(dev);
+
+ /*register irq handler*/
+ error = -ENODEV;
+ if (request_irq(pci_dev->irq, &r852_irq, IRQF_SHARED,
+ DRV_NAME, dev))
+ goto error10;
+
+ /* kick initial present test */
+ queue_delayed_work(dev->card_workqueue,
+ &dev->card_detect_work, 0);
+
+
+ pr_notice("driver loaded successfully\n");
+ return 0;
+
+error10:
+ destroy_workqueue(dev->card_workqueue);
+error9:
+ kfree(dev->tmp_buffer);
+error8:
+ pci_iounmap(pci_dev, dev->mmio);
+error7:
+ dma_free_coherent(&pci_dev->dev, R852_DMA_LEN, dev->bounce_buffer,
+ dev->phys_bounce_buffer);
+error6:
+ kfree(dev);
+error5:
+ kfree(chip);
+error4:
+ pci_release_regions(pci_dev);
+error3:
+error2:
+ pci_disable_device(pci_dev);
+error1:
+ return error;
+}
+
+static void r852_remove(struct pci_dev *pci_dev)
+{
+ struct r852_device *dev = pci_get_drvdata(pci_dev);
+
+ /* Stop detect workqueue -
+ we are going to unregister the device anyway*/
+ cancel_delayed_work_sync(&dev->card_detect_work);
+ destroy_workqueue(dev->card_workqueue);
+
+ /* Unregister the device, this might make more IO */
+ r852_unregister_nand_device(dev);
+
+ /* Stop interrupts */
+ r852_disable_irqs(dev);
+ free_irq(dev->irq, dev);
+
+ /* Cleanup */
+ kfree(dev->tmp_buffer);
+ pci_iounmap(pci_dev, dev->mmio);
+ dma_free_coherent(&pci_dev->dev, R852_DMA_LEN, dev->bounce_buffer,
+ dev->phys_bounce_buffer);
+
+ kfree(dev->chip);
+ kfree(dev);
+
+ /* Shutdown the PCI device */
+ pci_release_regions(pci_dev);
+ pci_disable_device(pci_dev);
+}
+
+static void r852_shutdown(struct pci_dev *pci_dev)
+{
+ struct r852_device *dev = pci_get_drvdata(pci_dev);
+
+ cancel_delayed_work_sync(&dev->card_detect_work);
+ r852_disable_irqs(dev);
+ synchronize_irq(dev->irq);
+ pci_disable_device(pci_dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int r852_suspend(struct device *device)
+{
+ struct r852_device *dev = dev_get_drvdata(device);
+
+ if (dev->ctlreg & R852_CTL_CARDENABLE)
+ return -EBUSY;
+
+ /* First make sure the detect work is gone */
+ cancel_delayed_work_sync(&dev->card_detect_work);
+
+ /* Turn off the interrupts and stop the device */
+ r852_disable_irqs(dev);
+ r852_engine_disable(dev);
+
+ /* If card was pulled off just during the suspend, which is very
+ unlikely, we will remove it on resume, it too late now
+ anyway... */
+ dev->card_unstable = 0;
+ return 0;
+}
+
+static int r852_resume(struct device *device)
+{
+ struct r852_device *dev = dev_get_drvdata(device);
+
+ r852_disable_irqs(dev);
+ r852_card_update_present(dev);
+ r852_engine_disable(dev);
+
+
+ /* If card status changed, just do the work */
+ if (dev->card_detected != dev->card_registered) {
+ dbg("card was %s during low power state",
+ dev->card_detected ? "added" : "removed");
+
+ queue_delayed_work(dev->card_workqueue,
+ &dev->card_detect_work, msecs_to_jiffies(1000));
+ return 0;
+ }
+
+ /* Otherwise, initialize the card */
+ if (dev->card_registered) {
+ r852_engine_enable(dev);
+ nand_select_target(dev->chip, 0);
+ nand_reset_op(dev->chip);
+ nand_deselect_target(dev->chip);
+ }
+
+ /* Program card detection IRQ */
+ r852_update_card_detect(dev);
+ return 0;
+}
+#endif
+
+static const struct pci_device_id r852_pci_id_tbl[] = {
+
+ { PCI_VDEVICE(RICOH, 0x0852), },
+ { },
+};
+
+MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl);
+
+static SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
+
+static struct pci_driver r852_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = r852_pci_id_tbl,
+ .probe = r852_probe,
+ .remove = r852_remove,
+ .shutdown = r852_shutdown,
+ .driver.pm = &r852_pm_ops,
+};
+
+module_pci_driver(r852_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
+MODULE_DESCRIPTION("Ricoh 85xx xD/smartmedia card reader driver");
diff --git a/drivers/mtd/nand/raw/r852.h b/drivers/mtd/nand/raw/r852.h
new file mode 100644
index 0000000000..96fe301d15
--- /dev/null
+++ b/drivers/mtd/nand/raw/r852.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * driver for Ricoh xD readers
+ */
+
+#include <linux/pci.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/spinlock.h>
+
+
+/* nand interface + ecc
+ byte write/read does one cycle on nand data lines.
+ dword write/read does 4 cycles
+ if R852_CTL_ECC_ACCESS is set in R852_CTL, then dword read reads
+ results of ecc correction, if DMA read was done before.
+ If write was done two dword reads read generated ecc checksums
+*/
+#define R852_DATALINE 0x00
+
+/* control register */
+#define R852_CTL 0x04
+#define R852_CTL_COMMAND 0x01 /* send command (#CLE)*/
+#define R852_CTL_DATA 0x02 /* read/write data (#ALE)*/
+#define R852_CTL_ON 0x04 /* only seem to controls the hd led, */
+ /* but has to be set on start...*/
+#define R852_CTL_RESET 0x08 /* unknown, set only on start once*/
+#define R852_CTL_CARDENABLE 0x10 /* probably (#CE) - always set*/
+#define R852_CTL_ECC_ENABLE 0x20 /* enable ecc engine */
+#define R852_CTL_ECC_ACCESS 0x40 /* read/write ecc via reg #0*/
+#define R852_CTL_WRITE 0x80 /* set when performing writes (#WP) */
+
+/* card detection status */
+#define R852_CARD_STA 0x05
+
+#define R852_CARD_STA_CD 0x01 /* state of #CD line, same as 0x04 */
+#define R852_CARD_STA_RO 0x02 /* card is readonly */
+#define R852_CARD_STA_PRESENT 0x04 /* card is present (#CD) */
+#define R852_CARD_STA_ABSENT 0x08 /* card is absent */
+#define R852_CARD_STA_BUSY 0x80 /* card is busy - (#R/B) */
+
+/* card detection irq status & enable*/
+#define R852_CARD_IRQ_STA 0x06 /* IRQ status */
+#define R852_CARD_IRQ_ENABLE 0x07 /* IRQ enable */
+
+#define R852_CARD_IRQ_CD 0x01 /* fire when #CD lights, same as 0x04*/
+#define R852_CARD_IRQ_REMOVE 0x04 /* detect card removal */
+#define R852_CARD_IRQ_INSERT 0x08 /* detect card insert */
+#define R852_CARD_IRQ_UNK1 0x10 /* unknown */
+#define R852_CARD_IRQ_GENABLE 0x80 /* general enable */
+#define R852_CARD_IRQ_MASK 0x1D
+
+
+
+/* hardware enable */
+#define R852_HW 0x08
+#define R852_HW_ENABLED 0x01 /* hw enabled */
+#define R852_HW_UNKNOWN 0x80
+
+
+/* dma capabilities */
+#define R852_DMA_CAP 0x09
+#define R852_SMBIT 0x20 /* if set with bit #6 or bit #7, then */
+ /* hw is smartmedia */
+#define R852_DMA1 0x40 /* if set w/bit #7, dma is supported */
+#define R852_DMA2 0x80 /* if set w/bit #6, dma is supported */
+
+
+/* physical DMA address - 32 bit value*/
+#define R852_DMA_ADDR 0x0C
+
+
+/* dma settings */
+#define R852_DMA_SETTINGS 0x10
+#define R852_DMA_MEMORY 0x01 /* (memory <-> internal hw buffer) */
+#define R852_DMA_READ 0x02 /* 0 = write, 1 = read */
+#define R852_DMA_INTERNAL 0x04 /* (internal hw buffer <-> card) */
+
+/* dma IRQ status */
+#define R852_DMA_IRQ_STA 0x14
+
+/* dma IRQ enable */
+#define R852_DMA_IRQ_ENABLE 0x18
+
+#define R852_DMA_IRQ_MEMORY 0x01 /* (memory <-> internal hw buffer) */
+#define R852_DMA_IRQ_ERROR 0x02 /* error did happen */
+#define R852_DMA_IRQ_INTERNAL 0x04 /* (internal hw buffer <-> card) */
+#define R852_DMA_IRQ_MASK 0x07 /* mask of all IRQ bits */
+
+
+/* ECC syndrome format - read from reg #0 will return two copies of these for
+ each half of the page.
+ first byte is error byte location, and second, bit location + flags */
+#define R852_ECC_ERR_BIT_MSK 0x07 /* error bit location */
+#define R852_ECC_CORRECT 0x10 /* no errors - (guessed) */
+#define R852_ECC_CORRECTABLE 0x20 /* correctable error exist */
+#define R852_ECC_FAIL 0x40 /* non correctable error detected */
+
+#define R852_DMA_LEN 512
+
+#define DMA_INTERNAL 0
+#define DMA_MEMORY 1
+
+struct r852_device {
+ struct nand_controller controller;
+ void __iomem *mmio; /* mmio */
+ struct nand_chip *chip; /* nand chip backpointer */
+ struct pci_dev *pci_dev; /* pci backpointer */
+
+ /* dma area */
+ dma_addr_t phys_dma_addr; /* bus address of buffer*/
+ struct completion dma_done; /* data transfer done */
+
+ dma_addr_t phys_bounce_buffer; /* bus address of bounce buffer */
+ uint8_t *bounce_buffer; /* virtual address of bounce buffer */
+
+ int dma_dir; /* 1 = read, 0 = write */
+ int dma_stage; /* 0 - idle, 1 - first step,
+ 2 - second step */
+
+ int dma_state; /* 0 = internal, 1 = memory */
+ int dma_error; /* dma errors */
+ int dma_usable; /* is it possible to use dma */
+
+ /* card status area */
+ struct delayed_work card_detect_work;
+ struct workqueue_struct *card_workqueue;
+ int card_registered; /* card registered with mtd */
+ int card_detected; /* card detected in slot */
+ int card_unstable; /* whenever the card is inserted,
+ is not known yet */
+ int readonly; /* card is readonly */
+ int sm; /* Is card smartmedia */
+
+ /* interrupt handling */
+ spinlock_t irqlock; /* IRQ protecting lock */
+ int irq; /* irq num */
+ /* misc */
+ void *tmp_buffer; /* temporary buffer */
+ uint8_t ctlreg; /* cached contents of control reg */
+};
+
+#define dbg(format, ...) \
+ if (debug) \
+ pr_debug(format "\n", ## __VA_ARGS__)
+
+#define dbg_verbose(format, ...) \
+ if (debug > 1) \
+ pr_debug(format "\n", ## __VA_ARGS__)
+
+
+#define message(format, ...) \
+ pr_info(format "\n", ## __VA_ARGS__)
diff --git a/drivers/mtd/nand/raw/renesas-nand-controller.c b/drivers/mtd/nand/raw/renesas-nand-controller.c
new file mode 100644
index 0000000000..589021ea9e
--- /dev/null
+++ b/drivers/mtd/nand/raw/renesas-nand-controller.c
@@ -0,0 +1,1417 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Evatronix/Renesas R-Car Gen3, RZ/N1D, RZ/N1S, RZ/N1L NAND controller driver
+ *
+ * Copyright (C) 2021 Schneider Electric
+ * Author: Miquel RAYNAL <miquel.raynal@bootlin.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#define COMMAND_REG 0x00
+#define COMMAND_SEQ(x) FIELD_PREP(GENMASK(5, 0), (x))
+#define COMMAND_SEQ_10 COMMAND_SEQ(0x2A)
+#define COMMAND_SEQ_12 COMMAND_SEQ(0x0C)
+#define COMMAND_SEQ_18 COMMAND_SEQ(0x32)
+#define COMMAND_SEQ_19 COMMAND_SEQ(0x13)
+#define COMMAND_SEQ_GEN_IN COMMAND_SEQ_18
+#define COMMAND_SEQ_GEN_OUT COMMAND_SEQ_19
+#define COMMAND_SEQ_READ_PAGE COMMAND_SEQ_10
+#define COMMAND_SEQ_WRITE_PAGE COMMAND_SEQ_12
+#define COMMAND_INPUT_SEL_AHBS 0
+#define COMMAND_INPUT_SEL_DMA BIT(6)
+#define COMMAND_FIFO_SEL 0
+#define COMMAND_DATA_SEL BIT(7)
+#define COMMAND_0(x) FIELD_PREP(GENMASK(15, 8), (x))
+#define COMMAND_1(x) FIELD_PREP(GENMASK(23, 16), (x))
+#define COMMAND_2(x) FIELD_PREP(GENMASK(31, 24), (x))
+
+#define CONTROL_REG 0x04
+#define CONTROL_CHECK_RB_LINE 0
+#define CONTROL_ECC_BLOCK_SIZE(x) FIELD_PREP(GENMASK(2, 1), (x))
+#define CONTROL_ECC_BLOCK_SIZE_256 CONTROL_ECC_BLOCK_SIZE(0)
+#define CONTROL_ECC_BLOCK_SIZE_512 CONTROL_ECC_BLOCK_SIZE(1)
+#define CONTROL_ECC_BLOCK_SIZE_1024 CONTROL_ECC_BLOCK_SIZE(2)
+#define CONTROL_INT_EN BIT(4)
+#define CONTROL_ECC_EN BIT(5)
+#define CONTROL_BLOCK_SIZE(x) FIELD_PREP(GENMASK(7, 6), (x))
+#define CONTROL_BLOCK_SIZE_32P CONTROL_BLOCK_SIZE(0)
+#define CONTROL_BLOCK_SIZE_64P CONTROL_BLOCK_SIZE(1)
+#define CONTROL_BLOCK_SIZE_128P CONTROL_BLOCK_SIZE(2)
+#define CONTROL_BLOCK_SIZE_256P CONTROL_BLOCK_SIZE(3)
+
+#define STATUS_REG 0x8
+#define MEM_RDY(cs, reg) (FIELD_GET(GENMASK(3, 0), (reg)) & BIT(cs))
+#define CTRL_RDY(reg) (FIELD_GET(BIT(8), (reg)) == 0)
+
+#define ECC_CTRL_REG 0x18
+#define ECC_CTRL_CAP(x) FIELD_PREP(GENMASK(2, 0), (x))
+#define ECC_CTRL_CAP_2B ECC_CTRL_CAP(0)
+#define ECC_CTRL_CAP_4B ECC_CTRL_CAP(1)
+#define ECC_CTRL_CAP_8B ECC_CTRL_CAP(2)
+#define ECC_CTRL_CAP_16B ECC_CTRL_CAP(3)
+#define ECC_CTRL_CAP_24B ECC_CTRL_CAP(4)
+#define ECC_CTRL_CAP_32B ECC_CTRL_CAP(5)
+#define ECC_CTRL_ERR_THRESHOLD(x) FIELD_PREP(GENMASK(13, 8), (x))
+
+#define INT_MASK_REG 0x10
+#define INT_STATUS_REG 0x14
+#define INT_CMD_END BIT(1)
+#define INT_DMA_END BIT(3)
+#define INT_MEM_RDY(cs) FIELD_PREP(GENMASK(11, 8), BIT(cs))
+#define INT_DMA_ENDED BIT(3)
+#define MEM_IS_RDY(cs, reg) (FIELD_GET(GENMASK(11, 8), (reg)) & BIT(cs))
+#define DMA_HAS_ENDED(reg) FIELD_GET(BIT(3), (reg))
+
+#define ECC_OFFSET_REG 0x1C
+#define ECC_OFFSET(x) FIELD_PREP(GENMASK(15, 0), (x))
+
+#define ECC_STAT_REG 0x20
+#define ECC_STAT_CORRECTABLE(cs, reg) (FIELD_GET(GENMASK(3, 0), (reg)) & BIT(cs))
+#define ECC_STAT_UNCORRECTABLE(cs, reg) (FIELD_GET(GENMASK(11, 8), (reg)) & BIT(cs))
+
+#define ADDR0_COL_REG 0x24
+#define ADDR0_COL(x) FIELD_PREP(GENMASK(15, 0), (x))
+
+#define ADDR0_ROW_REG 0x28
+#define ADDR0_ROW(x) FIELD_PREP(GENMASK(23, 0), (x))
+
+#define ADDR1_COL_REG 0x2C
+#define ADDR1_COL(x) FIELD_PREP(GENMASK(15, 0), (x))
+
+#define ADDR1_ROW_REG 0x30
+#define ADDR1_ROW(x) FIELD_PREP(GENMASK(23, 0), (x))
+
+#define FIFO_DATA_REG 0x38
+
+#define DATA_REG 0x3C
+
+#define DATA_REG_SIZE_REG 0x40
+
+#define DMA_ADDR_LOW_REG 0x64
+
+#define DMA_ADDR_HIGH_REG 0x68
+
+#define DMA_CNT_REG 0x6C
+
+#define DMA_CTRL_REG 0x70
+#define DMA_CTRL_INCREMENT_BURST_4 0
+#define DMA_CTRL_REGISTER_MANAGED_MODE 0
+#define DMA_CTRL_START BIT(7)
+
+#define MEM_CTRL_REG 0x80
+#define MEM_CTRL_CS(cs) FIELD_PREP(GENMASK(1, 0), (cs))
+#define MEM_CTRL_DIS_WP(cs) FIELD_PREP(GENMASK(11, 8), BIT((cs)))
+
+#define DATA_SIZE_REG 0x84
+#define DATA_SIZE(x) FIELD_PREP(GENMASK(14, 0), (x))
+
+#define TIMINGS_ASYN_REG 0x88
+#define TIMINGS_ASYN_TRWP(x) FIELD_PREP(GENMASK(3, 0), max((x), 1U) - 1)
+#define TIMINGS_ASYN_TRWH(x) FIELD_PREP(GENMASK(7, 4), max((x), 1U) - 1)
+
+#define TIM_SEQ0_REG 0x90
+#define TIM_SEQ0_TCCS(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
+#define TIM_SEQ0_TADL(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
+#define TIM_SEQ0_TRHW(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
+#define TIM_SEQ0_TWHR(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
+
+#define TIM_SEQ1_REG 0x94
+#define TIM_SEQ1_TWB(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
+#define TIM_SEQ1_TRR(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
+#define TIM_SEQ1_TWW(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
+
+#define TIM_GEN_SEQ0_REG 0x98
+#define TIM_GEN_SEQ0_D0(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
+#define TIM_GEN_SEQ0_D1(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
+#define TIM_GEN_SEQ0_D2(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
+#define TIM_GEN_SEQ0_D3(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
+
+#define TIM_GEN_SEQ1_REG 0x9c
+#define TIM_GEN_SEQ1_D4(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
+#define TIM_GEN_SEQ1_D5(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
+#define TIM_GEN_SEQ1_D6(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
+#define TIM_GEN_SEQ1_D7(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
+
+#define TIM_GEN_SEQ2_REG 0xA0
+#define TIM_GEN_SEQ2_D8(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
+#define TIM_GEN_SEQ2_D9(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
+#define TIM_GEN_SEQ2_D10(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
+#define TIM_GEN_SEQ2_D11(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
+
+#define FIFO_INIT_REG 0xB4
+#define FIFO_INIT BIT(0)
+
+#define FIFO_STATE_REG 0xB4
+#define FIFO_STATE_R_EMPTY(reg) FIELD_GET(BIT(0), (reg))
+#define FIFO_STATE_W_FULL(reg) FIELD_GET(BIT(1), (reg))
+#define FIFO_STATE_C_EMPTY(reg) FIELD_GET(BIT(2), (reg))
+#define FIFO_STATE_R_FULL(reg) FIELD_GET(BIT(6), (reg))
+#define FIFO_STATE_W_EMPTY(reg) FIELD_GET(BIT(7), (reg))
+
+#define GEN_SEQ_CTRL_REG 0xB8
+#define GEN_SEQ_CMD0_EN BIT(0)
+#define GEN_SEQ_CMD1_EN BIT(1)
+#define GEN_SEQ_CMD2_EN BIT(2)
+#define GEN_SEQ_CMD3_EN BIT(3)
+#define GEN_SEQ_COL_A0(x) FIELD_PREP(GENMASK(5, 4), min((x), 2U))
+#define GEN_SEQ_COL_A1(x) FIELD_PREP(GENMASK(7, 6), min((x), 2U))
+#define GEN_SEQ_ROW_A0(x) FIELD_PREP(GENMASK(9, 8), min((x), 3U))
+#define GEN_SEQ_ROW_A1(x) FIELD_PREP(GENMASK(11, 10), min((x), 3U))
+#define GEN_SEQ_DATA_EN BIT(12)
+#define GEN_SEQ_DELAY_EN(x) FIELD_PREP(GENMASK(14, 13), (x))
+#define GEN_SEQ_DELAY0_EN GEN_SEQ_DELAY_EN(1)
+#define GEN_SEQ_DELAY1_EN GEN_SEQ_DELAY_EN(2)
+#define GEN_SEQ_IMD_SEQ BIT(15)
+#define GEN_SEQ_COMMAND_3(x) FIELD_PREP(GENMASK(26, 16), (x))
+
+#define DMA_TLVL_REG 0x114
+#define DMA_TLVL(x) FIELD_PREP(GENMASK(7, 0), (x))
+#define DMA_TLVL_MAX DMA_TLVL(0xFF)
+
+#define TIM_GEN_SEQ3_REG 0x134
+#define TIM_GEN_SEQ3_D12(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
+
+#define ECC_CNT_REG 0x14C
+#define ECC_CNT(cs, reg) FIELD_GET(GENMASK(5, 0), (reg) >> ((cs) * 8))
+
+#define RNANDC_CS_NUM 4
+
+#define TO_CYCLES64(ps, period_ns) ((unsigned int)DIV_ROUND_UP_ULL(div_u64(ps, 1000), \
+ period_ns))
+
+struct rnand_chip_sel {
+ unsigned int cs;
+};
+
+struct rnand_chip {
+ struct nand_chip chip;
+ struct list_head node;
+ int selected_die;
+ u32 ctrl;
+ unsigned int nsels;
+ u32 control;
+ u32 ecc_ctrl;
+ u32 timings_asyn;
+ u32 tim_seq0;
+ u32 tim_seq1;
+ u32 tim_gen_seq0;
+ u32 tim_gen_seq1;
+ u32 tim_gen_seq2;
+ u32 tim_gen_seq3;
+ struct rnand_chip_sel sels[];
+};
+
+struct rnandc {
+ struct nand_controller controller;
+ struct device *dev;
+ void __iomem *regs;
+ unsigned long ext_clk_rate;
+ unsigned long assigned_cs;
+ struct list_head chips;
+ struct nand_chip *selected_chip;
+ struct completion complete;
+ bool use_polling;
+ u8 *buf;
+ unsigned int buf_sz;
+};
+
+struct rnandc_op {
+ u32 command;
+ u32 addr0_col;
+ u32 addr0_row;
+ u32 addr1_col;
+ u32 addr1_row;
+ u32 data_size;
+ u32 ecc_offset;
+ u32 gen_seq_ctrl;
+ u8 *buf;
+ bool read;
+ unsigned int len;
+};
+
+static inline struct rnandc *to_rnandc(struct nand_controller *ctrl)
+{
+ return container_of(ctrl, struct rnandc, controller);
+}
+
+static inline struct rnand_chip *to_rnand(struct nand_chip *chip)
+{
+ return container_of(chip, struct rnand_chip, chip);
+}
+
+static inline unsigned int to_rnandc_cs(struct rnand_chip *nand)
+{
+ return nand->sels[nand->selected_die].cs;
+}
+
+static void rnandc_dis_correction(struct rnandc *rnandc)
+{
+ u32 control;
+
+ control = readl_relaxed(rnandc->regs + CONTROL_REG);
+ control &= ~CONTROL_ECC_EN;
+ writel_relaxed(control, rnandc->regs + CONTROL_REG);
+}
+
+static void rnandc_en_correction(struct rnandc *rnandc)
+{
+ u32 control;
+
+ control = readl_relaxed(rnandc->regs + CONTROL_REG);
+ control |= CONTROL_ECC_EN;
+ writel_relaxed(control, rnandc->regs + CONTROL_REG);
+}
+
+static void rnandc_clear_status(struct rnandc *rnandc)
+{
+ writel_relaxed(0, rnandc->regs + INT_STATUS_REG);
+ writel_relaxed(0, rnandc->regs + ECC_STAT_REG);
+ writel_relaxed(0, rnandc->regs + ECC_CNT_REG);
+}
+
+static void rnandc_dis_interrupts(struct rnandc *rnandc)
+{
+ writel_relaxed(0, rnandc->regs + INT_MASK_REG);
+}
+
+static void rnandc_en_interrupts(struct rnandc *rnandc, u32 val)
+{
+ if (!rnandc->use_polling)
+ writel_relaxed(val, rnandc->regs + INT_MASK_REG);
+}
+
+static void rnandc_clear_fifo(struct rnandc *rnandc)
+{
+ writel_relaxed(FIFO_INIT, rnandc->regs + FIFO_INIT_REG);
+}
+
+static void rnandc_select_target(struct nand_chip *chip, int die_nr)
+{
+ struct rnand_chip *rnand = to_rnand(chip);
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+ unsigned int cs = rnand->sels[die_nr].cs;
+
+ if (chip == rnandc->selected_chip && die_nr == rnand->selected_die)
+ return;
+
+ rnandc_clear_status(rnandc);
+ writel_relaxed(MEM_CTRL_CS(cs) | MEM_CTRL_DIS_WP(cs), rnandc->regs + MEM_CTRL_REG);
+ writel_relaxed(rnand->control, rnandc->regs + CONTROL_REG);
+ writel_relaxed(rnand->ecc_ctrl, rnandc->regs + ECC_CTRL_REG);
+ writel_relaxed(rnand->timings_asyn, rnandc->regs + TIMINGS_ASYN_REG);
+ writel_relaxed(rnand->tim_seq0, rnandc->regs + TIM_SEQ0_REG);
+ writel_relaxed(rnand->tim_seq1, rnandc->regs + TIM_SEQ1_REG);
+ writel_relaxed(rnand->tim_gen_seq0, rnandc->regs + TIM_GEN_SEQ0_REG);
+ writel_relaxed(rnand->tim_gen_seq1, rnandc->regs + TIM_GEN_SEQ1_REG);
+ writel_relaxed(rnand->tim_gen_seq2, rnandc->regs + TIM_GEN_SEQ2_REG);
+ writel_relaxed(rnand->tim_gen_seq3, rnandc->regs + TIM_GEN_SEQ3_REG);
+
+ rnandc->selected_chip = chip;
+ rnand->selected_die = die_nr;
+}
+
+static void rnandc_trigger_op(struct rnandc *rnandc, struct rnandc_op *rop)
+{
+ writel_relaxed(rop->addr0_col, rnandc->regs + ADDR0_COL_REG);
+ writel_relaxed(rop->addr0_row, rnandc->regs + ADDR0_ROW_REG);
+ writel_relaxed(rop->addr1_col, rnandc->regs + ADDR1_COL_REG);
+ writel_relaxed(rop->addr1_row, rnandc->regs + ADDR1_ROW_REG);
+ writel_relaxed(rop->ecc_offset, rnandc->regs + ECC_OFFSET_REG);
+ writel_relaxed(rop->gen_seq_ctrl, rnandc->regs + GEN_SEQ_CTRL_REG);
+ writel_relaxed(DATA_SIZE(rop->len), rnandc->regs + DATA_SIZE_REG);
+ writel_relaxed(rop->command, rnandc->regs + COMMAND_REG);
+}
+
+static void rnandc_trigger_dma(struct rnandc *rnandc)
+{
+ writel_relaxed(DMA_CTRL_INCREMENT_BURST_4 |
+ DMA_CTRL_REGISTER_MANAGED_MODE |
+ DMA_CTRL_START, rnandc->regs + DMA_CTRL_REG);
+}
+
+static irqreturn_t rnandc_irq_handler(int irq, void *private)
+{
+ struct rnandc *rnandc = private;
+
+ rnandc_dis_interrupts(rnandc);
+ complete(&rnandc->complete);
+
+ return IRQ_HANDLED;
+}
+
+static int rnandc_wait_end_of_op(struct rnandc *rnandc,
+ struct nand_chip *chip)
+{
+ struct rnand_chip *rnand = to_rnand(chip);
+ unsigned int cs = to_rnandc_cs(rnand);
+ u32 status;
+ int ret;
+
+ ret = readl_poll_timeout(rnandc->regs + STATUS_REG, status,
+ MEM_RDY(cs, status) && CTRL_RDY(status),
+ 1, 100000);
+ if (ret)
+ dev_err(rnandc->dev, "Operation timed out, status: 0x%08x\n",
+ status);
+
+ return ret;
+}
+
+static int rnandc_wait_end_of_io(struct rnandc *rnandc,
+ struct nand_chip *chip)
+{
+ int timeout_ms = 1000;
+ int ret;
+
+ if (rnandc->use_polling) {
+ struct rnand_chip *rnand = to_rnand(chip);
+ unsigned int cs = to_rnandc_cs(rnand);
+ u32 status;
+
+ ret = readl_poll_timeout(rnandc->regs + INT_STATUS_REG, status,
+ MEM_IS_RDY(cs, status) &
+ DMA_HAS_ENDED(status),
+ 0, timeout_ms * 1000);
+ } else {
+ ret = wait_for_completion_timeout(&rnandc->complete,
+ msecs_to_jiffies(timeout_ms));
+ if (!ret)
+ ret = -ETIMEDOUT;
+ else
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int rnandc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct rnand_chip *rnand = to_rnand(chip);
+ unsigned int cs = to_rnandc_cs(rnand);
+ struct rnandc_op rop = {
+ .command = COMMAND_INPUT_SEL_DMA | COMMAND_0(NAND_CMD_READ0) |
+ COMMAND_2(NAND_CMD_READSTART) | COMMAND_FIFO_SEL |
+ COMMAND_SEQ_READ_PAGE,
+ .addr0_row = page,
+ .len = mtd->writesize,
+ .ecc_offset = ECC_OFFSET(mtd->writesize + 2),
+ };
+ unsigned int max_bitflips = 0;
+ dma_addr_t dma_addr;
+ u32 ecc_stat;
+ int bf, ret, i;
+
+ /* Prepare controller */
+ rnandc_select_target(chip, chip->cur_cs);
+ rnandc_clear_status(rnandc);
+ reinit_completion(&rnandc->complete);
+ rnandc_en_interrupts(rnandc, INT_DMA_ENDED);
+ rnandc_en_correction(rnandc);
+
+ /* Configure DMA */
+ dma_addr = dma_map_single(rnandc->dev, rnandc->buf, mtd->writesize,
+ DMA_FROM_DEVICE);
+ writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG);
+ writel(mtd->writesize, rnandc->regs + DMA_CNT_REG);
+ writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG);
+
+ rnandc_trigger_op(rnandc, &rop);
+ rnandc_trigger_dma(rnandc);
+
+ ret = rnandc_wait_end_of_io(rnandc, chip);
+ dma_unmap_single(rnandc->dev, dma_addr, mtd->writesize, DMA_FROM_DEVICE);
+ rnandc_dis_correction(rnandc);
+ if (ret) {
+ dev_err(rnandc->dev, "Read page operation never ending\n");
+ return ret;
+ }
+
+ ecc_stat = readl_relaxed(rnandc->regs + ECC_STAT_REG);
+
+ if (oob_required || ECC_STAT_UNCORRECTABLE(cs, ecc_stat)) {
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ if (ECC_STAT_UNCORRECTABLE(cs, ecc_stat)) {
+ for (i = 0; i < chip->ecc.steps; i++) {
+ unsigned int off = i * chip->ecc.size;
+ unsigned int eccoff = i * chip->ecc.bytes;
+
+ bf = nand_check_erased_ecc_chunk(rnandc->buf + off,
+ chip->ecc.size,
+ chip->oob_poi + 2 + eccoff,
+ chip->ecc.bytes,
+ NULL, 0,
+ chip->ecc.strength);
+ if (bf < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += bf;
+ max_bitflips = max_t(unsigned int, max_bitflips, bf);
+ }
+ }
+ } else if (ECC_STAT_CORRECTABLE(cs, ecc_stat)) {
+ bf = ECC_CNT(cs, readl_relaxed(rnandc->regs + ECC_CNT_REG));
+ /*
+ * The number of bitflips is an approximation given the fact
+ * that this controller does not provide per-chunk details but
+ * only gives statistics on the entire page.
+ */
+ mtd->ecc_stats.corrected += bf;
+ }
+
+ memcpy(buf, rnandc->buf, mtd->writesize);
+
+ return 0;
+}
+
+static int rnandc_read_subpage_hw_ecc(struct nand_chip *chip, u32 req_offset,
+ u32 req_len, u8 *bufpoi, int page)
+{
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct rnand_chip *rnand = to_rnand(chip);
+ unsigned int cs = to_rnandc_cs(rnand);
+ unsigned int page_off = round_down(req_offset, chip->ecc.size);
+ unsigned int real_len = round_up(req_offset + req_len - page_off,
+ chip->ecc.size);
+ unsigned int start_chunk = page_off / chip->ecc.size;
+ unsigned int nchunks = real_len / chip->ecc.size;
+ unsigned int ecc_off = 2 + (start_chunk * chip->ecc.bytes);
+ struct rnandc_op rop = {
+ .command = COMMAND_INPUT_SEL_AHBS | COMMAND_0(NAND_CMD_READ0) |
+ COMMAND_2(NAND_CMD_READSTART) | COMMAND_FIFO_SEL |
+ COMMAND_SEQ_READ_PAGE,
+ .addr0_row = page,
+ .addr0_col = page_off,
+ .len = real_len,
+ .ecc_offset = ECC_OFFSET(mtd->writesize + ecc_off),
+ };
+ unsigned int max_bitflips = 0, i;
+ u32 ecc_stat;
+ int bf, ret;
+
+ /* Prepare controller */
+ rnandc_select_target(chip, chip->cur_cs);
+ rnandc_clear_status(rnandc);
+ rnandc_en_correction(rnandc);
+ rnandc_trigger_op(rnandc, &rop);
+
+ while (!FIFO_STATE_C_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
+ cpu_relax();
+
+ while (FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
+ cpu_relax();
+
+ ioread32_rep(rnandc->regs + FIFO_DATA_REG, bufpoi + page_off,
+ real_len / 4);
+
+ if (!FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG))) {
+ dev_err(rnandc->dev, "Clearing residual data in the read FIFO\n");
+ rnandc_clear_fifo(rnandc);
+ }
+
+ ret = rnandc_wait_end_of_op(rnandc, chip);
+ rnandc_dis_correction(rnandc);
+ if (ret) {
+ dev_err(rnandc->dev, "Read subpage operation never ending\n");
+ return ret;
+ }
+
+ ecc_stat = readl_relaxed(rnandc->regs + ECC_STAT_REG);
+
+ if (ECC_STAT_UNCORRECTABLE(cs, ecc_stat)) {
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+
+ for (i = start_chunk; i < nchunks; i++) {
+ unsigned int dataoff = i * chip->ecc.size;
+ unsigned int eccoff = 2 + (i * chip->ecc.bytes);
+
+ bf = nand_check_erased_ecc_chunk(bufpoi + dataoff,
+ chip->ecc.size,
+ chip->oob_poi + eccoff,
+ chip->ecc.bytes,
+ NULL, 0,
+ chip->ecc.strength);
+ if (bf < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += bf;
+ max_bitflips = max_t(unsigned int, max_bitflips, bf);
+ }
+ }
+ } else if (ECC_STAT_CORRECTABLE(cs, ecc_stat)) {
+ bf = ECC_CNT(cs, readl_relaxed(rnandc->regs + ECC_CNT_REG));
+ /*
+ * The number of bitflips is an approximation given the fact
+ * that this controller does not provide per-chunk details but
+ * only gives statistics on the entire page.
+ */
+ mtd->ecc_stats.corrected += bf;
+ }
+
+ return 0;
+}
+
+static int rnandc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct rnand_chip *rnand = to_rnand(chip);
+ unsigned int cs = to_rnandc_cs(rnand);
+ struct rnandc_op rop = {
+ .command = COMMAND_INPUT_SEL_DMA | COMMAND_0(NAND_CMD_SEQIN) |
+ COMMAND_1(NAND_CMD_PAGEPROG) | COMMAND_FIFO_SEL |
+ COMMAND_SEQ_WRITE_PAGE,
+ .addr0_row = page,
+ .len = mtd->writesize,
+ .ecc_offset = ECC_OFFSET(mtd->writesize + 2),
+ };
+ dma_addr_t dma_addr;
+ int ret;
+
+ memcpy(rnandc->buf, buf, mtd->writesize);
+
+ /* Prepare controller */
+ rnandc_select_target(chip, chip->cur_cs);
+ rnandc_clear_status(rnandc);
+ reinit_completion(&rnandc->complete);
+ rnandc_en_interrupts(rnandc, INT_MEM_RDY(cs));
+ rnandc_en_correction(rnandc);
+
+ /* Configure DMA */
+ dma_addr = dma_map_single(rnandc->dev, (void *)rnandc->buf, mtd->writesize,
+ DMA_TO_DEVICE);
+ writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG);
+ writel(mtd->writesize, rnandc->regs + DMA_CNT_REG);
+ writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG);
+
+ rnandc_trigger_op(rnandc, &rop);
+ rnandc_trigger_dma(rnandc);
+
+ ret = rnandc_wait_end_of_io(rnandc, chip);
+ dma_unmap_single(rnandc->dev, dma_addr, mtd->writesize, DMA_TO_DEVICE);
+ rnandc_dis_correction(rnandc);
+ if (ret) {
+ dev_err(rnandc->dev, "Write page operation never ending\n");
+ return ret;
+ }
+
+ if (!oob_required)
+ return 0;
+
+ return nand_change_write_column_op(chip, mtd->writesize, chip->oob_poi,
+ mtd->oobsize, false);
+}
+
+static int rnandc_write_subpage_hw_ecc(struct nand_chip *chip, u32 req_offset,
+ u32 req_len, const u8 *bufpoi,
+ int oob_required, int page)
+{
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int page_off = round_down(req_offset, chip->ecc.size);
+ unsigned int real_len = round_up(req_offset + req_len - page_off,
+ chip->ecc.size);
+ unsigned int start_chunk = page_off / chip->ecc.size;
+ unsigned int ecc_off = 2 + (start_chunk * chip->ecc.bytes);
+ struct rnandc_op rop = {
+ .command = COMMAND_INPUT_SEL_AHBS | COMMAND_0(NAND_CMD_SEQIN) |
+ COMMAND_1(NAND_CMD_PAGEPROG) | COMMAND_FIFO_SEL |
+ COMMAND_SEQ_WRITE_PAGE,
+ .addr0_row = page,
+ .addr0_col = page_off,
+ .len = real_len,
+ .ecc_offset = ECC_OFFSET(mtd->writesize + ecc_off),
+ };
+ int ret;
+
+ /* Prepare controller */
+ rnandc_select_target(chip, chip->cur_cs);
+ rnandc_clear_status(rnandc);
+ rnandc_en_correction(rnandc);
+ rnandc_trigger_op(rnandc, &rop);
+
+ while (FIFO_STATE_W_FULL(readl(rnandc->regs + FIFO_STATE_REG)))
+ cpu_relax();
+
+ iowrite32_rep(rnandc->regs + FIFO_DATA_REG, bufpoi + page_off,
+ real_len / 4);
+
+ while (!FIFO_STATE_W_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
+ cpu_relax();
+
+ ret = rnandc_wait_end_of_op(rnandc, chip);
+ rnandc_dis_correction(rnandc);
+ if (ret) {
+ dev_err(rnandc->dev, "Write subpage operation never ending\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * This controller is simple enough and thus does not need to use the parser
+ * provided by the core, instead, handle every situation here.
+ */
+static int rnandc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op, bool check_only)
+{
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+ const struct nand_op_instr *instr = NULL;
+ struct rnandc_op rop = {
+ .command = COMMAND_INPUT_SEL_AHBS,
+ .gen_seq_ctrl = GEN_SEQ_IMD_SEQ,
+ };
+ unsigned int cmd_phase = 0, addr_phase = 0, data_phase = 0,
+ delay_phase = 0, delays = 0;
+ unsigned int op_id, col_addrs, row_addrs, naddrs, remainder, words, i;
+ const u8 *addrs;
+ u32 last_bytes;
+ int ret;
+
+ if (!check_only)
+ rnandc_select_target(chip, op->cs);
+
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+
+ nand_op_trace(" ", instr);
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ switch (cmd_phase++) {
+ case 0:
+ rop.command |= COMMAND_0(instr->ctx.cmd.opcode);
+ rop.gen_seq_ctrl |= GEN_SEQ_CMD0_EN;
+ break;
+ case 1:
+ rop.gen_seq_ctrl |= GEN_SEQ_COMMAND_3(instr->ctx.cmd.opcode);
+ rop.gen_seq_ctrl |= GEN_SEQ_CMD3_EN;
+ if (addr_phase == 0)
+ addr_phase = 1;
+ break;
+ case 2:
+ rop.command |= COMMAND_2(instr->ctx.cmd.opcode);
+ rop.gen_seq_ctrl |= GEN_SEQ_CMD2_EN;
+ if (addr_phase <= 1)
+ addr_phase = 2;
+ break;
+ case 3:
+ rop.command |= COMMAND_1(instr->ctx.cmd.opcode);
+ rop.gen_seq_ctrl |= GEN_SEQ_CMD1_EN;
+ if (addr_phase <= 1)
+ addr_phase = 2;
+ if (delay_phase == 0)
+ delay_phase = 1;
+ if (data_phase == 0)
+ data_phase = 1;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ addrs = instr->ctx.addr.addrs;
+ naddrs = instr->ctx.addr.naddrs;
+ if (naddrs > 5)
+ return -EOPNOTSUPP;
+
+ col_addrs = min(2U, naddrs);
+ row_addrs = naddrs > 2 ? naddrs - col_addrs : 0;
+
+ switch (addr_phase++) {
+ case 0:
+ for (i = 0; i < col_addrs; i++)
+ rop.addr0_col |= addrs[i] << (i * 8);
+ rop.gen_seq_ctrl |= GEN_SEQ_COL_A0(col_addrs);
+
+ for (i = 0; i < row_addrs; i++)
+ rop.addr0_row |= addrs[2 + i] << (i * 8);
+ rop.gen_seq_ctrl |= GEN_SEQ_ROW_A0(row_addrs);
+
+ if (cmd_phase == 0)
+ cmd_phase = 1;
+ break;
+ case 1:
+ for (i = 0; i < col_addrs; i++)
+ rop.addr1_col |= addrs[i] << (i * 8);
+ rop.gen_seq_ctrl |= GEN_SEQ_COL_A1(col_addrs);
+
+ for (i = 0; i < row_addrs; i++)
+ rop.addr1_row |= addrs[2 + i] << (i * 8);
+ rop.gen_seq_ctrl |= GEN_SEQ_ROW_A1(row_addrs);
+
+ if (cmd_phase <= 1)
+ cmd_phase = 2;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ rop.read = true;
+ fallthrough;
+ case NAND_OP_DATA_OUT_INSTR:
+ rop.gen_seq_ctrl |= GEN_SEQ_DATA_EN;
+ rop.buf = instr->ctx.data.buf.in;
+ rop.len = instr->ctx.data.len;
+ rop.command |= COMMAND_FIFO_SEL;
+
+ switch (data_phase++) {
+ case 0:
+ if (cmd_phase <= 2)
+ cmd_phase = 3;
+ if (addr_phase <= 1)
+ addr_phase = 2;
+ if (delay_phase == 0)
+ delay_phase = 1;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ switch (delay_phase++) {
+ case 0:
+ rop.gen_seq_ctrl |= GEN_SEQ_DELAY0_EN;
+
+ if (cmd_phase <= 2)
+ cmd_phase = 3;
+ break;
+ case 1:
+ rop.gen_seq_ctrl |= GEN_SEQ_DELAY1_EN;
+
+ if (cmd_phase <= 3)
+ cmd_phase = 4;
+ if (data_phase == 0)
+ data_phase = 1;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ }
+ }
+
+ /*
+ * Sequence 19 is generic and dedicated to write operations.
+ * Sequence 18 is also generic and works for all other operations.
+ */
+ if (rop.buf && !rop.read)
+ rop.command |= COMMAND_SEQ_GEN_OUT;
+ else
+ rop.command |= COMMAND_SEQ_GEN_IN;
+
+ if (delays > 1) {
+ dev_err(rnandc->dev, "Cannot handle more than one wait delay\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (check_only)
+ return 0;
+
+ rnandc_trigger_op(rnandc, &rop);
+
+ words = rop.len / sizeof(u32);
+ remainder = rop.len % sizeof(u32);
+ if (rop.buf && rop.read) {
+ while (!FIFO_STATE_C_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
+ cpu_relax();
+
+ while (FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
+ cpu_relax();
+
+ ioread32_rep(rnandc->regs + FIFO_DATA_REG, rop.buf, words);
+ if (remainder) {
+ last_bytes = readl_relaxed(rnandc->regs + FIFO_DATA_REG);
+ memcpy(rop.buf + (words * sizeof(u32)), &last_bytes,
+ remainder);
+ }
+
+ if (!FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG))) {
+ dev_warn(rnandc->dev,
+ "Clearing residual data in the read FIFO\n");
+ rnandc_clear_fifo(rnandc);
+ }
+ } else if (rop.len && !rop.read) {
+ while (FIFO_STATE_W_FULL(readl(rnandc->regs + FIFO_STATE_REG)))
+ cpu_relax();
+
+ iowrite32_rep(rnandc->regs + FIFO_DATA_REG, rop.buf,
+ DIV_ROUND_UP(rop.len, 4));
+
+ if (remainder) {
+ last_bytes = 0;
+ memcpy(&last_bytes, rop.buf + (words * sizeof(u32)), remainder);
+ writel_relaxed(last_bytes, rnandc->regs + FIFO_DATA_REG);
+ }
+
+ while (!FIFO_STATE_W_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
+ cpu_relax();
+ }
+
+ ret = rnandc_wait_end_of_op(rnandc, chip);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rnandc_setup_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_interface_config *conf)
+{
+ struct rnand_chip *rnand = to_rnand(chip);
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+ unsigned int period_ns = 1000000000 / rnandc->ext_clk_rate;
+ const struct nand_sdr_timings *sdr;
+ unsigned int cyc, cle, ale, bef_dly, ca_to_data;
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ if (sdr->tRP_min != sdr->tWP_min || sdr->tREH_min != sdr->tWH_min) {
+ dev_err(rnandc->dev, "Read and write hold times must be identical\n");
+ return -EINVAL;
+ }
+
+ if (chipnr < 0)
+ return 0;
+
+ rnand->timings_asyn =
+ TIMINGS_ASYN_TRWP(TO_CYCLES64(sdr->tRP_min, period_ns)) |
+ TIMINGS_ASYN_TRWH(TO_CYCLES64(sdr->tREH_min, period_ns));
+ rnand->tim_seq0 =
+ TIM_SEQ0_TCCS(TO_CYCLES64(sdr->tCCS_min, period_ns)) |
+ TIM_SEQ0_TADL(TO_CYCLES64(sdr->tADL_min, period_ns)) |
+ TIM_SEQ0_TRHW(TO_CYCLES64(sdr->tRHW_min, period_ns)) |
+ TIM_SEQ0_TWHR(TO_CYCLES64(sdr->tWHR_min, period_ns));
+ rnand->tim_seq1 =
+ TIM_SEQ1_TWB(TO_CYCLES64(sdr->tWB_max, period_ns)) |
+ TIM_SEQ1_TRR(TO_CYCLES64(sdr->tRR_min, period_ns)) |
+ TIM_SEQ1_TWW(TO_CYCLES64(sdr->tWW_min, period_ns));
+
+ cyc = sdr->tDS_min + sdr->tDH_min;
+ cle = sdr->tCLH_min + sdr->tCLS_min;
+ ale = sdr->tALH_min + sdr->tALS_min;
+ bef_dly = sdr->tWB_max - sdr->tDH_min;
+ ca_to_data = sdr->tWHR_min + sdr->tREA_max - sdr->tDH_min;
+
+ /*
+ * D0 = CMD -> ADDR = tCLH + tCLS - 1 cycle
+ * D1 = CMD -> CMD = tCLH + tCLS - 1 cycle
+ * D2 = CMD -> DLY = tWB - tDH
+ * D3 = CMD -> DATA = tWHR + tREA - tDH
+ */
+ rnand->tim_gen_seq0 =
+ TIM_GEN_SEQ0_D0(TO_CYCLES64(cle - cyc, period_ns)) |
+ TIM_GEN_SEQ0_D1(TO_CYCLES64(cle - cyc, period_ns)) |
+ TIM_GEN_SEQ0_D2(TO_CYCLES64(bef_dly, period_ns)) |
+ TIM_GEN_SEQ0_D3(TO_CYCLES64(ca_to_data, period_ns));
+
+ /*
+ * D4 = ADDR -> CMD = tALH + tALS - 1 cyle
+ * D5 = ADDR -> ADDR = tALH + tALS - 1 cyle
+ * D6 = ADDR -> DLY = tWB - tDH
+ * D7 = ADDR -> DATA = tWHR + tREA - tDH
+ */
+ rnand->tim_gen_seq1 =
+ TIM_GEN_SEQ1_D4(TO_CYCLES64(ale - cyc, period_ns)) |
+ TIM_GEN_SEQ1_D5(TO_CYCLES64(ale - cyc, period_ns)) |
+ TIM_GEN_SEQ1_D6(TO_CYCLES64(bef_dly, period_ns)) |
+ TIM_GEN_SEQ1_D7(TO_CYCLES64(ca_to_data, period_ns));
+
+ /*
+ * D8 = DLY -> DATA = tRR + tREA
+ * D9 = DLY -> CMD = tRR
+ * D10 = DATA -> CMD = tCLH + tCLS - 1 cycle
+ * D11 = DATA -> DLY = tWB - tDH
+ */
+ rnand->tim_gen_seq2 =
+ TIM_GEN_SEQ2_D8(TO_CYCLES64(sdr->tRR_min + sdr->tREA_max, period_ns)) |
+ TIM_GEN_SEQ2_D9(TO_CYCLES64(sdr->tRR_min, period_ns)) |
+ TIM_GEN_SEQ2_D10(TO_CYCLES64(cle - cyc, period_ns)) |
+ TIM_GEN_SEQ2_D11(TO_CYCLES64(bef_dly, period_ns));
+
+ /* D12 = DATA -> END = tCLH - tDH */
+ rnand->tim_gen_seq3 =
+ TIM_GEN_SEQ3_D12(TO_CYCLES64(sdr->tCLH_min - sdr->tDH_min, period_ns));
+
+ return 0;
+}
+
+static int rnandc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ unsigned int eccbytes = round_up(chip->ecc.bytes, 4) * chip->ecc.steps;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 2;
+ oobregion->length = eccbytes;
+
+ return 0;
+}
+
+static int rnandc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ unsigned int eccbytes = round_up(chip->ecc.bytes, 4) * chip->ecc.steps;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 2 + eccbytes;
+ oobregion->length = mtd->oobsize - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops rnandc_ooblayout_ops = {
+ .ecc = rnandc_ooblayout_ecc,
+ .free = rnandc_ooblayout_free,
+};
+
+static int rnandc_hw_ecc_controller_init(struct nand_chip *chip)
+{
+ struct rnand_chip *rnand = to_rnand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+
+ if (mtd->writesize > SZ_16K) {
+ dev_err(rnandc->dev, "Unsupported page size\n");
+ return -EINVAL;
+ }
+
+ switch (chip->ecc.size) {
+ case SZ_256:
+ rnand->control |= CONTROL_ECC_BLOCK_SIZE_256;
+ break;
+ case SZ_512:
+ rnand->control |= CONTROL_ECC_BLOCK_SIZE_512;
+ break;
+ case SZ_1K:
+ rnand->control |= CONTROL_ECC_BLOCK_SIZE_1024;
+ break;
+ default:
+ dev_err(rnandc->dev, "Unsupported ECC chunk size\n");
+ return -EINVAL;
+ }
+
+ switch (chip->ecc.strength) {
+ case 2:
+ chip->ecc.bytes = 4;
+ rnand->ecc_ctrl |= ECC_CTRL_CAP_2B;
+ break;
+ case 4:
+ chip->ecc.bytes = 7;
+ rnand->ecc_ctrl |= ECC_CTRL_CAP_4B;
+ break;
+ case 8:
+ chip->ecc.bytes = 14;
+ rnand->ecc_ctrl |= ECC_CTRL_CAP_8B;
+ break;
+ case 16:
+ chip->ecc.bytes = 28;
+ rnand->ecc_ctrl |= ECC_CTRL_CAP_16B;
+ break;
+ case 24:
+ chip->ecc.bytes = 42;
+ rnand->ecc_ctrl |= ECC_CTRL_CAP_24B;
+ break;
+ case 32:
+ chip->ecc.bytes = 56;
+ rnand->ecc_ctrl |= ECC_CTRL_CAP_32B;
+ break;
+ default:
+ dev_err(rnandc->dev, "Unsupported ECC strength\n");
+ return -EINVAL;
+ }
+
+ rnand->ecc_ctrl |= ECC_CTRL_ERR_THRESHOLD(chip->ecc.strength);
+
+ mtd_set_ooblayout(mtd, &rnandc_ooblayout_ops);
+ chip->ecc.steps = mtd->writesize / chip->ecc.size;
+ chip->ecc.read_page = rnandc_read_page_hw_ecc;
+ chip->ecc.read_subpage = rnandc_read_subpage_hw_ecc;
+ chip->ecc.write_page = rnandc_write_page_hw_ecc;
+ chip->ecc.write_subpage = rnandc_write_subpage_hw_ecc;
+
+ return 0;
+}
+
+static int rnandc_ecc_init(struct nand_chip *chip)
+{
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&chip->base);
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+ int ret;
+
+ if (ecc->engine_type != NAND_ECC_ENGINE_TYPE_NONE &&
+ (!ecc->size || !ecc->strength)) {
+ if (requirements->step_size && requirements->strength) {
+ ecc->size = requirements->step_size;
+ ecc->strength = requirements->strength;
+ } else {
+ dev_err(rnandc->dev, "No minimum ECC strength\n");
+ return -EINVAL;
+ }
+ }
+
+ switch (ecc->engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ ret = rnandc_hw_ecc_controller_init(chip);
+ if (ret)
+ return ret;
+ break;
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rnandc_attach_chip(struct nand_chip *chip)
+{
+ struct rnand_chip *rnand = to_rnand(chip);
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg = nanddev_get_memorg(&chip->base);
+ int ret;
+
+ /* Do not store BBT bits in the OOB section as it is not protected */
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ if (mtd->writesize <= 512) {
+ dev_err(rnandc->dev, "Small page devices not supported\n");
+ return -EINVAL;
+ }
+
+ rnand->control |= CONTROL_CHECK_RB_LINE | CONTROL_INT_EN;
+
+ switch (memorg->pages_per_eraseblock) {
+ case 32:
+ rnand->control |= CONTROL_BLOCK_SIZE_32P;
+ break;
+ case 64:
+ rnand->control |= CONTROL_BLOCK_SIZE_64P;
+ break;
+ case 128:
+ rnand->control |= CONTROL_BLOCK_SIZE_128P;
+ break;
+ case 256:
+ rnand->control |= CONTROL_BLOCK_SIZE_256P;
+ break;
+ default:
+ dev_err(rnandc->dev, "Unsupported memory organization\n");
+ return -EINVAL;
+ }
+
+ chip->options |= NAND_SUBPAGE_READ;
+
+ ret = rnandc_ecc_init(chip);
+ if (ret) {
+ dev_err(rnandc->dev, "ECC initialization failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* Force an update of the configuration registers */
+ rnand->selected_die = -1;
+
+ return 0;
+}
+
+static const struct nand_controller_ops rnandc_ops = {
+ .attach_chip = rnandc_attach_chip,
+ .exec_op = rnandc_exec_op,
+ .setup_interface = rnandc_setup_interface,
+};
+
+static int rnandc_alloc_dma_buf(struct rnandc *rnandc,
+ struct mtd_info *new_mtd)
+{
+ unsigned int max_len = new_mtd->writesize + new_mtd->oobsize;
+ struct rnand_chip *entry, *temp;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+
+ list_for_each_entry_safe(entry, temp, &rnandc->chips, node) {
+ chip = &entry->chip;
+ mtd = nand_to_mtd(chip);
+ max_len = max(max_len, mtd->writesize + mtd->oobsize);
+ }
+
+ if (rnandc->buf && rnandc->buf_sz < max_len) {
+ devm_kfree(rnandc->dev, rnandc->buf);
+ rnandc->buf = NULL;
+ }
+
+ if (!rnandc->buf) {
+ rnandc->buf_sz = max_len;
+ rnandc->buf = devm_kmalloc(rnandc->dev, max_len,
+ GFP_KERNEL | GFP_DMA);
+ if (!rnandc->buf)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int rnandc_chip_init(struct rnandc *rnandc, struct device_node *np)
+{
+ struct rnand_chip *rnand;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ int nsels, ret, i;
+ u32 cs;
+
+ nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
+ if (nsels <= 0) {
+ ret = (nsels < 0) ? nsels : -EINVAL;
+ dev_err(rnandc->dev, "Invalid reg property (%d)\n", ret);
+ return ret;
+ }
+
+ /* Alloc the driver's NAND chip structure */
+ rnand = devm_kzalloc(rnandc->dev, struct_size(rnand, sels, nsels),
+ GFP_KERNEL);
+ if (!rnand)
+ return -ENOMEM;
+
+ rnand->nsels = nsels;
+ rnand->selected_die = -1;
+
+ for (i = 0; i < nsels; i++) {
+ ret = of_property_read_u32_index(np, "reg", i, &cs);
+ if (ret) {
+ dev_err(rnandc->dev, "Incomplete reg property (%d)\n", ret);
+ return ret;
+ }
+
+ if (cs >= RNANDC_CS_NUM) {
+ dev_err(rnandc->dev, "Invalid reg property (%d)\n", cs);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(cs, &rnandc->assigned_cs)) {
+ dev_err(rnandc->dev, "CS %d already assigned\n", cs);
+ return -EINVAL;
+ }
+
+ /*
+ * No need to check for RB or WP properties, there is a 1:1
+ * mandatory mapping with the CS.
+ */
+ rnand->sels[i].cs = cs;
+ }
+
+ chip = &rnand->chip;
+ chip->controller = &rnandc->controller;
+ nand_set_flash_node(chip, np);
+
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = rnandc->dev;
+ if (!mtd->name) {
+ dev_err(rnandc->dev, "Missing MTD label\n");
+ return -EINVAL;
+ }
+
+ ret = nand_scan(chip, rnand->nsels);
+ if (ret) {
+ dev_err(rnandc->dev, "Failed to scan the NAND chip (%d)\n", ret);
+ return ret;
+ }
+
+ ret = rnandc_alloc_dma_buf(rnandc, mtd);
+ if (ret)
+ goto cleanup_nand;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(rnandc->dev, "Failed to register MTD device (%d)\n", ret);
+ goto cleanup_nand;
+ }
+
+ list_add_tail(&rnand->node, &rnandc->chips);
+
+ return 0;
+
+cleanup_nand:
+ nand_cleanup(chip);
+
+ return ret;
+}
+
+static void rnandc_chips_cleanup(struct rnandc *rnandc)
+{
+ struct rnand_chip *entry, *temp;
+ struct nand_chip *chip;
+ int ret;
+
+ list_for_each_entry_safe(entry, temp, &rnandc->chips, node) {
+ chip = &entry->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&entry->node);
+ }
+}
+
+static int rnandc_chips_init(struct rnandc *rnandc)
+{
+ struct device_node *np;
+ int ret;
+
+ for_each_child_of_node(rnandc->dev->of_node, np) {
+ ret = rnandc_chip_init(rnandc, np);
+ if (ret) {
+ of_node_put(np);
+ goto cleanup_chips;
+ }
+ }
+
+ return 0;
+
+cleanup_chips:
+ rnandc_chips_cleanup(rnandc);
+
+ return ret;
+}
+
+static int rnandc_probe(struct platform_device *pdev)
+{
+ struct rnandc *rnandc;
+ struct clk *eclk;
+ int irq, ret;
+
+ rnandc = devm_kzalloc(&pdev->dev, sizeof(*rnandc), GFP_KERNEL);
+ if (!rnandc)
+ return -ENOMEM;
+
+ rnandc->dev = &pdev->dev;
+ nand_controller_init(&rnandc->controller);
+ rnandc->controller.ops = &rnandc_ops;
+ INIT_LIST_HEAD(&rnandc->chips);
+ init_completion(&rnandc->complete);
+
+ rnandc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rnandc->regs))
+ return PTR_ERR(rnandc->regs);
+
+ devm_pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ /* The external NAND bus clock rate is needed for computing timings */
+ eclk = clk_get(&pdev->dev, "eclk");
+ if (IS_ERR(eclk)) {
+ ret = PTR_ERR(eclk);
+ goto dis_runtime_pm;
+ }
+
+ rnandc->ext_clk_rate = clk_get_rate(eclk);
+ clk_put(eclk);
+
+ rnandc_dis_interrupts(rnandc);
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq == -EPROBE_DEFER) {
+ ret = irq;
+ goto dis_runtime_pm;
+ } else if (irq < 0) {
+ dev_info(&pdev->dev, "No IRQ found, fallback to polling\n");
+ rnandc->use_polling = true;
+ } else {
+ ret = devm_request_irq(&pdev->dev, irq, rnandc_irq_handler, 0,
+ "renesas-nand-controller", rnandc);
+ if (ret < 0)
+ goto dis_runtime_pm;
+ }
+
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto dis_runtime_pm;
+
+ rnandc_clear_fifo(rnandc);
+
+ platform_set_drvdata(pdev, rnandc);
+
+ ret = rnandc_chips_init(rnandc);
+ if (ret)
+ goto dis_runtime_pm;
+
+ return 0;
+
+dis_runtime_pm:
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static void rnandc_remove(struct platform_device *pdev)
+{
+ struct rnandc *rnandc = platform_get_drvdata(pdev);
+
+ rnandc_chips_cleanup(rnandc);
+
+ pm_runtime_put(&pdev->dev);
+}
+
+static const struct of_device_id rnandc_id_table[] = {
+ { .compatible = "renesas,rcar-gen3-nandc" },
+ { .compatible = "renesas,rzn1-nandc" },
+ {} /* sentinel */
+};
+MODULE_DEVICE_TABLE(of, rnandc_id_table);
+
+static struct platform_driver rnandc_driver = {
+ .driver = {
+ .name = "renesas-nandc",
+ .of_match_table = rnandc_id_table,
+ },
+ .probe = rnandc_probe,
+ .remove_new = rnandc_remove,
+};
+module_platform_driver(rnandc_driver);
+
+MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
+MODULE_DESCRIPTION("Renesas R-Car Gen3 & RZ/N1 NAND controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c
new file mode 100644
index 0000000000..5bc90ffa72
--- /dev/null
+++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c
@@ -0,0 +1,1494 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Rockchip NAND Flash controller driver.
+ * Copyright (C) 2020 Rockchip Inc.
+ * Author: Yifeng Zhao <yifeng.zhao@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/*
+ * NFC Page Data Layout:
+ * 1024 bytes data + 4Bytes sys data + 28Bytes~124Bytes ECC data +
+ * 1024 bytes data + 4Bytes sys data + 28Bytes~124Bytes ECC data +
+ * ......
+ * NAND Page Data Layout:
+ * 1024 * n data + m Bytes oob
+ * Original Bad Block Mask Location:
+ * First byte of oob(spare).
+ * nand_chip->oob_poi data layout:
+ * 4Bytes sys data + .... + 4Bytes sys data + ECC data.
+ */
+
+/* NAND controller register definition */
+#define NFC_READ (0)
+#define NFC_WRITE (1)
+
+#define NFC_FMCTL (0x00)
+#define FMCTL_CE_SEL_M 0xFF
+#define FMCTL_CE_SEL(x) (1 << (x))
+#define FMCTL_WP BIT(8)
+#define FMCTL_RDY BIT(9)
+
+#define NFC_FMWAIT (0x04)
+#define FLCTL_RST BIT(0)
+#define FLCTL_WR (1) /* 0: read, 1: write */
+#define FLCTL_XFER_ST BIT(2)
+#define FLCTL_XFER_EN BIT(3)
+#define FLCTL_ACORRECT BIT(10) /* Auto correct error bits. */
+#define FLCTL_XFER_READY BIT(20)
+#define FLCTL_XFER_SECTOR (22)
+#define FLCTL_TOG_FIX BIT(29)
+
+#define BCHCTL_BANK_M (7 << 5)
+#define BCHCTL_BANK (5)
+
+#define DMA_ST BIT(0)
+#define DMA_WR (1) /* 0: write, 1: read */
+#define DMA_EN BIT(2)
+#define DMA_AHB_SIZE (3) /* 0: 1, 1: 2, 2: 4 */
+#define DMA_BURST_SIZE (6) /* 0: 1, 3: 4, 5: 8, 7: 16 */
+#define DMA_INC_NUM (9) /* 1 - 16 */
+
+#define ECC_ERR_CNT(x, e) ((((x) >> (e).low) & (e).low_mask) |\
+ (((x) >> (e).high) & (e).high_mask) << (e).low_bn)
+#define INT_DMA BIT(0)
+#define NFC_BANK (0x800)
+#define NFC_BANK_STEP (0x100)
+#define BANK_DATA (0x00)
+#define BANK_ADDR (0x04)
+#define BANK_CMD (0x08)
+#define NFC_SRAM0 (0x1000)
+#define NFC_SRAM1 (0x1400)
+#define NFC_SRAM_SIZE (0x400)
+#define NFC_TIMEOUT (500000)
+#define NFC_MAX_OOB_PER_STEP 128
+#define NFC_MIN_OOB_PER_STEP 64
+#define MAX_DATA_SIZE 0xFFFC
+#define MAX_ADDRESS_CYC 6
+#define NFC_ECC_MAX_MODES 4
+#define NFC_MAX_NSELS (8) /* Some Socs only have 1 or 2 CSs. */
+#define NFC_SYS_DATA_SIZE (4) /* 4 bytes sys data in oob pre 1024 data.*/
+#define RK_DEFAULT_CLOCK_RATE (150 * 1000 * 1000) /* 150 Mhz */
+#define ACCTIMING(csrw, rwpw, rwcs) ((csrw) << 12 | (rwpw) << 5 | (rwcs))
+
+enum nfc_type {
+ NFC_V6,
+ NFC_V8,
+ NFC_V9,
+};
+
+/**
+ * struct rk_ecc_cnt_status: represent a ecc status data.
+ * @err_flag_bit: error flag bit index at register.
+ * @low: ECC count low bit index at register.
+ * @low_mask: mask bit.
+ * @low_bn: ECC count low bit number.
+ * @high: ECC count high bit index at register.
+ * @high_mask: mask bit
+ */
+struct ecc_cnt_status {
+ u8 err_flag_bit;
+ u8 low;
+ u8 low_mask;
+ u8 low_bn;
+ u8 high;
+ u8 high_mask;
+};
+
+/**
+ * @type: NFC version
+ * @ecc_strengths: ECC strengths
+ * @ecc_cfgs: ECC config values
+ * @flctl_off: FLCTL register offset
+ * @bchctl_off: BCHCTL register offset
+ * @dma_data_buf_off: DMA_DATA_BUF register offset
+ * @dma_oob_buf_off: DMA_OOB_BUF register offset
+ * @dma_cfg_off: DMA_CFG register offset
+ * @dma_st_off: DMA_ST register offset
+ * @bch_st_off: BCG_ST register offset
+ * @randmz_off: RANDMZ register offset
+ * @int_en_off: interrupt enable register offset
+ * @int_clr_off: interrupt clean register offset
+ * @int_st_off: interrupt status register offset
+ * @oob0_off: oob0 register offset
+ * @oob1_off: oob1 register offset
+ * @ecc0: represent ECC0 status data
+ * @ecc1: represent ECC1 status data
+ */
+struct nfc_cfg {
+ enum nfc_type type;
+ u8 ecc_strengths[NFC_ECC_MAX_MODES];
+ u32 ecc_cfgs[NFC_ECC_MAX_MODES];
+ u32 flctl_off;
+ u32 bchctl_off;
+ u32 dma_cfg_off;
+ u32 dma_data_buf_off;
+ u32 dma_oob_buf_off;
+ u32 dma_st_off;
+ u32 bch_st_off;
+ u32 randmz_off;
+ u32 int_en_off;
+ u32 int_clr_off;
+ u32 int_st_off;
+ u32 oob0_off;
+ u32 oob1_off;
+ struct ecc_cnt_status ecc0;
+ struct ecc_cnt_status ecc1;
+};
+
+struct rk_nfc_nand_chip {
+ struct list_head node;
+ struct nand_chip chip;
+
+ u16 boot_blks;
+ u16 metadata_size;
+ u32 boot_ecc;
+ u32 timing;
+
+ u8 nsels;
+ u8 sels[];
+ /* Nothing after this field. */
+};
+
+struct rk_nfc {
+ struct nand_controller controller;
+ const struct nfc_cfg *cfg;
+ struct device *dev;
+
+ struct clk *nfc_clk;
+ struct clk *ahb_clk;
+ void __iomem *regs;
+
+ u32 selected_bank;
+ u32 band_offset;
+ u32 cur_ecc;
+ u32 cur_timing;
+
+ struct completion done;
+ struct list_head chips;
+
+ u8 *page_buf;
+ u32 *oob_buf;
+ u32 page_buf_size;
+ u32 oob_buf_size;
+
+ unsigned long assigned_cs;
+};
+
+static inline struct rk_nfc_nand_chip *rk_nfc_to_rknand(struct nand_chip *chip)
+{
+ return container_of(chip, struct rk_nfc_nand_chip, chip);
+}
+
+static inline u8 *rk_nfc_buf_to_data_ptr(struct nand_chip *chip, const u8 *p, int i)
+{
+ return (u8 *)p + i * chip->ecc.size;
+}
+
+static inline u8 *rk_nfc_buf_to_oob_ptr(struct nand_chip *chip, int i)
+{
+ u8 *poi;
+
+ poi = chip->oob_poi + i * NFC_SYS_DATA_SIZE;
+
+ return poi;
+}
+
+static inline u8 *rk_nfc_buf_to_oob_ecc_ptr(struct nand_chip *chip, int i)
+{
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+ u8 *poi;
+
+ poi = chip->oob_poi + rknand->metadata_size + chip->ecc.bytes * i;
+
+ return poi;
+}
+
+static inline int rk_nfc_data_len(struct nand_chip *chip)
+{
+ return chip->ecc.size + chip->ecc.bytes + NFC_SYS_DATA_SIZE;
+}
+
+static inline u8 *rk_nfc_data_ptr(struct nand_chip *chip, int i)
+{
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+
+ return nfc->page_buf + i * rk_nfc_data_len(chip);
+}
+
+static inline u8 *rk_nfc_oob_ptr(struct nand_chip *chip, int i)
+{
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+
+ return nfc->page_buf + i * rk_nfc_data_len(chip) + chip->ecc.size;
+}
+
+static int rk_nfc_hw_ecc_setup(struct nand_chip *chip, u32 strength)
+{
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ u32 reg, i;
+
+ for (i = 0; i < NFC_ECC_MAX_MODES; i++) {
+ if (strength == nfc->cfg->ecc_strengths[i]) {
+ reg = nfc->cfg->ecc_cfgs[i];
+ break;
+ }
+ }
+
+ if (i >= NFC_ECC_MAX_MODES)
+ return -EINVAL;
+
+ writel(reg, nfc->regs + nfc->cfg->bchctl_off);
+
+ /* Save chip ECC setting */
+ nfc->cur_ecc = strength;
+
+ return 0;
+}
+
+static void rk_nfc_select_chip(struct nand_chip *chip, int cs)
+{
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u32 val;
+
+ if (cs < 0) {
+ nfc->selected_bank = -1;
+ /* Deselect the currently selected target. */
+ val = readl_relaxed(nfc->regs + NFC_FMCTL);
+ val &= ~FMCTL_CE_SEL_M;
+ writel(val, nfc->regs + NFC_FMCTL);
+ return;
+ }
+
+ nfc->selected_bank = rknand->sels[cs];
+ nfc->band_offset = NFC_BANK + nfc->selected_bank * NFC_BANK_STEP;
+
+ val = readl_relaxed(nfc->regs + NFC_FMCTL);
+ val &= ~FMCTL_CE_SEL_M;
+ val |= FMCTL_CE_SEL(nfc->selected_bank);
+
+ writel(val, nfc->regs + NFC_FMCTL);
+
+ /*
+ * Compare current chip timing with selected chip timing and
+ * change if needed.
+ */
+ if (nfc->cur_timing != rknand->timing) {
+ writel(rknand->timing, nfc->regs + NFC_FMWAIT);
+ nfc->cur_timing = rknand->timing;
+ }
+
+ /*
+ * Compare current chip ECC setting with selected chip ECC setting and
+ * change if needed.
+ */
+ if (nfc->cur_ecc != ecc->strength)
+ rk_nfc_hw_ecc_setup(chip, ecc->strength);
+}
+
+static inline int rk_nfc_wait_ioready(struct rk_nfc *nfc)
+{
+ int rc;
+ u32 val;
+
+ rc = readl_relaxed_poll_timeout(nfc->regs + NFC_FMCTL, val,
+ val & FMCTL_RDY, 10, NFC_TIMEOUT);
+
+ return rc;
+}
+
+static void rk_nfc_read_buf(struct rk_nfc *nfc, u8 *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = readb_relaxed(nfc->regs + nfc->band_offset +
+ BANK_DATA);
+}
+
+static void rk_nfc_write_buf(struct rk_nfc *nfc, const u8 *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ writeb(buf[i], nfc->regs + nfc->band_offset + BANK_DATA);
+}
+
+static int rk_nfc_cmd(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ unsigned int i, j, remaining, start;
+ int reg_offset = nfc->band_offset;
+ u8 *inbuf = NULL;
+ const u8 *outbuf;
+ u32 cnt = 0;
+ int ret = 0;
+
+ for (i = 0; i < subop->ninstrs; i++) {
+ const struct nand_op_instr *instr = &subop->instrs[i];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ writeb(instr->ctx.cmd.opcode,
+ nfc->regs + reg_offset + BANK_CMD);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ remaining = nand_subop_get_num_addr_cyc(subop, i);
+ start = nand_subop_get_addr_start_off(subop, i);
+
+ for (j = 0; j < 8 && j + start < remaining; j++)
+ writeb(instr->ctx.addr.addrs[j + start],
+ nfc->regs + reg_offset + BANK_ADDR);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_DATA_OUT_INSTR:
+ start = nand_subop_get_data_start_off(subop, i);
+ cnt = nand_subop_get_data_len(subop, i);
+
+ if (instr->type == NAND_OP_DATA_OUT_INSTR) {
+ outbuf = instr->ctx.data.buf.out + start;
+ rk_nfc_write_buf(nfc, outbuf, cnt);
+ } else {
+ inbuf = instr->ctx.data.buf.in + start;
+ rk_nfc_read_buf(nfc, inbuf, cnt);
+ }
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ if (rk_nfc_wait_ioready(nfc) < 0) {
+ ret = -ETIMEDOUT;
+ dev_err(nfc->dev, "IO not ready\n");
+ }
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static const struct nand_op_parser rk_nfc_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(
+ rk_nfc_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYC),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, MAX_DATA_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ rk_nfc_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYC),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(true, MAX_DATA_SIZE),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+);
+
+static int rk_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ if (!check_only)
+ rk_nfc_select_chip(chip, op->cs);
+
+ return nand_op_parser_exec_op(chip, &rk_nfc_op_parser, op,
+ check_only);
+}
+
+static int rk_nfc_setup_interface(struct nand_chip *chip, int target,
+ const struct nand_interface_config *conf)
+{
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ const struct nand_sdr_timings *timings;
+ u32 rate, tc2rw, trwpw, trw2c;
+ u32 temp;
+
+ if (target < 0)
+ return 0;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return -EOPNOTSUPP;
+
+ if (IS_ERR(nfc->nfc_clk))
+ rate = clk_get_rate(nfc->ahb_clk);
+ else
+ rate = clk_get_rate(nfc->nfc_clk);
+
+ /* Turn clock rate into kHz. */
+ rate /= 1000;
+
+ tc2rw = 1;
+ trw2c = 1;
+
+ trwpw = max(timings->tWC_min, timings->tRC_min) / 1000;
+ trwpw = DIV_ROUND_UP(trwpw * rate, 1000000);
+
+ temp = timings->tREA_max / 1000;
+ temp = DIV_ROUND_UP(temp * rate, 1000000);
+
+ if (trwpw < temp)
+ trwpw = temp;
+
+ /*
+ * ACCON: access timing control register
+ * -------------------------------------
+ * 31:18: reserved
+ * 17:12: csrw, clock cycles from the falling edge of CSn to the
+ * falling edge of RDn or WRn
+ * 11:11: reserved
+ * 10:05: rwpw, the width of RDn or WRn in processor clock cycles
+ * 04:00: rwcs, clock cycles from the rising edge of RDn or WRn to the
+ * rising edge of CSn
+ */
+
+ /* Save chip timing */
+ rknand->timing = ACCTIMING(tc2rw, trwpw, trw2c);
+
+ return 0;
+}
+
+static void rk_nfc_xfer_start(struct rk_nfc *nfc, u8 rw, u8 n_KB,
+ dma_addr_t dma_data, dma_addr_t dma_oob)
+{
+ u32 dma_reg, fl_reg, bch_reg;
+
+ dma_reg = DMA_ST | ((!rw) << DMA_WR) | DMA_EN | (2 << DMA_AHB_SIZE) |
+ (7 << DMA_BURST_SIZE) | (16 << DMA_INC_NUM);
+
+ fl_reg = (rw << FLCTL_WR) | FLCTL_XFER_EN | FLCTL_ACORRECT |
+ (n_KB << FLCTL_XFER_SECTOR) | FLCTL_TOG_FIX;
+
+ if (nfc->cfg->type == NFC_V6 || nfc->cfg->type == NFC_V8) {
+ bch_reg = readl_relaxed(nfc->regs + nfc->cfg->bchctl_off);
+ bch_reg = (bch_reg & (~BCHCTL_BANK_M)) |
+ (nfc->selected_bank << BCHCTL_BANK);
+ writel(bch_reg, nfc->regs + nfc->cfg->bchctl_off);
+ }
+
+ writel(dma_reg, nfc->regs + nfc->cfg->dma_cfg_off);
+ writel((u32)dma_data, nfc->regs + nfc->cfg->dma_data_buf_off);
+ writel((u32)dma_oob, nfc->regs + nfc->cfg->dma_oob_buf_off);
+ writel(fl_reg, nfc->regs + nfc->cfg->flctl_off);
+ fl_reg |= FLCTL_XFER_ST;
+ writel(fl_reg, nfc->regs + nfc->cfg->flctl_off);
+}
+
+static int rk_nfc_wait_for_xfer_done(struct rk_nfc *nfc)
+{
+ void __iomem *ptr;
+ u32 reg;
+
+ ptr = nfc->regs + nfc->cfg->flctl_off;
+
+ return readl_relaxed_poll_timeout(ptr, reg,
+ reg & FLCTL_XFER_READY,
+ 10, NFC_TIMEOUT);
+}
+
+static int rk_nfc_write_page_raw(struct nand_chip *chip, const u8 *buf,
+ int oob_on, int page)
+{
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int i, pages_per_blk;
+
+ pages_per_blk = mtd->erasesize / mtd->writesize;
+ if ((chip->options & NAND_IS_BOOT_MEDIUM) &&
+ (page < (pages_per_blk * rknand->boot_blks)) &&
+ rknand->boot_ecc != ecc->strength) {
+ /*
+ * There's currently no method to notify the MTD framework that
+ * a different ECC strength is in use for the boot blocks.
+ */
+ return -EIO;
+ }
+
+ if (!buf)
+ memset(nfc->page_buf, 0xff, mtd->writesize + mtd->oobsize);
+
+ for (i = 0; i < ecc->steps; i++) {
+ /* Copy data to the NFC buffer. */
+ if (buf)
+ memcpy(rk_nfc_data_ptr(chip, i),
+ rk_nfc_buf_to_data_ptr(chip, buf, i),
+ ecc->size);
+ /*
+ * The first four bytes of OOB are reserved for the
+ * boot ROM. In some debugging cases, such as with a
+ * read, erase and write back test these 4 bytes stored
+ * in OOB also need to be written back.
+ *
+ * The function nand_block_bad detects bad blocks like:
+ *
+ * bad = chip->oob_poi[chip->badblockpos];
+ *
+ * chip->badblockpos == 0 for a large page NAND Flash,
+ * so chip->oob_poi[0] is the bad block mask (BBM).
+ *
+ * The OOB data layout on the NFC is:
+ *
+ * PA0 PA1 PA2 PA3 | BBM OOB1 OOB2 OOB3 | ...
+ *
+ * or
+ *
+ * 0xFF 0xFF 0xFF 0xFF | BBM OOB1 OOB2 OOB3 | ...
+ *
+ * The code here just swaps the first 4 bytes with the last
+ * 4 bytes without losing any data.
+ *
+ * The chip->oob_poi data layout:
+ *
+ * BBM OOB1 OOB2 OOB3 |......| PA0 PA1 PA2 PA3
+ *
+ * The rk_nfc_ooblayout_free() function already has reserved
+ * these 4 bytes together with 2 bytes for BBM
+ * by reducing it's length:
+ *
+ * oob_region->length = rknand->metadata_size - NFC_SYS_DATA_SIZE - 2;
+ */
+ if (!i)
+ memcpy(rk_nfc_oob_ptr(chip, i),
+ rk_nfc_buf_to_oob_ptr(chip, ecc->steps - 1),
+ NFC_SYS_DATA_SIZE);
+ else
+ memcpy(rk_nfc_oob_ptr(chip, i),
+ rk_nfc_buf_to_oob_ptr(chip, i - 1),
+ NFC_SYS_DATA_SIZE);
+ /* Copy ECC data to the NFC buffer. */
+ memcpy(rk_nfc_oob_ptr(chip, i) + NFC_SYS_DATA_SIZE,
+ rk_nfc_buf_to_oob_ecc_ptr(chip, i),
+ ecc->bytes);
+ }
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ rk_nfc_write_buf(nfc, buf, mtd->writesize + mtd->oobsize);
+ return nand_prog_page_end_op(chip);
+}
+
+static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
+ int oob_on, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int oob_step = (ecc->bytes > 60) ? NFC_MAX_OOB_PER_STEP :
+ NFC_MIN_OOB_PER_STEP;
+ int pages_per_blk = mtd->erasesize / mtd->writesize;
+ int ret = 0, i, boot_rom_mode = 0;
+ dma_addr_t dma_data, dma_oob;
+ u32 tmp;
+ u8 *oob;
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
+ if (buf)
+ memcpy(nfc->page_buf, buf, mtd->writesize);
+ else
+ memset(nfc->page_buf, 0xFF, mtd->writesize);
+
+ /*
+ * The first blocks (4, 8 or 16 depending on the device) are used
+ * by the boot ROM and the first 32 bits of OOB need to link to
+ * the next page address in the same block. We can't directly copy
+ * OOB data from the MTD framework, because this page address
+ * conflicts for example with the bad block marker (BBM),
+ * so we shift all OOB data including the BBM with 4 byte positions.
+ * As a consequence the OOB size available to the MTD framework is
+ * also reduced with 4 bytes.
+ *
+ * PA0 PA1 PA2 PA3 | BBM OOB1 OOB2 OOB3 | ...
+ *
+ * If a NAND is not a boot medium or the page is not a boot block,
+ * the first 4 bytes are left untouched by writing 0xFF to them.
+ *
+ * 0xFF 0xFF 0xFF 0xFF | BBM OOB1 OOB2 OOB3 | ...
+ *
+ * The code here just swaps the first 4 bytes with the last
+ * 4 bytes without losing any data.
+ *
+ * The chip->oob_poi data layout:
+ *
+ * BBM OOB1 OOB2 OOB3 |......| PA0 PA1 PA2 PA3
+ *
+ * Configure the ECC algorithm supported by the boot ROM.
+ */
+ if ((page < (pages_per_blk * rknand->boot_blks)) &&
+ (chip->options & NAND_IS_BOOT_MEDIUM)) {
+ boot_rom_mode = 1;
+ if (rknand->boot_ecc != ecc->strength)
+ rk_nfc_hw_ecc_setup(chip, rknand->boot_ecc);
+ }
+
+ for (i = 0; i < ecc->steps; i++) {
+ if (!i)
+ oob = chip->oob_poi + (ecc->steps - 1) * NFC_SYS_DATA_SIZE;
+ else
+ oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE;
+
+ tmp = oob[0] | oob[1] << 8 | oob[2] << 16 | oob[3] << 24;
+
+ if (nfc->cfg->type == NFC_V9)
+ nfc->oob_buf[i] = tmp;
+ else
+ nfc->oob_buf[i * (oob_step / 4)] = tmp;
+ }
+
+ dma_data = dma_map_single(nfc->dev, (void *)nfc->page_buf,
+ mtd->writesize, DMA_TO_DEVICE);
+ dma_oob = dma_map_single(nfc->dev, nfc->oob_buf,
+ ecc->steps * oob_step,
+ DMA_TO_DEVICE);
+
+ reinit_completion(&nfc->done);
+ writel(INT_DMA, nfc->regs + nfc->cfg->int_en_off);
+
+ rk_nfc_xfer_start(nfc, NFC_WRITE, ecc->steps, dma_data,
+ dma_oob);
+ ret = wait_for_completion_timeout(&nfc->done,
+ msecs_to_jiffies(100));
+ if (!ret)
+ dev_warn(nfc->dev, "write: wait dma done timeout.\n");
+ /*
+ * Whether the DMA transfer is completed or not. The driver
+ * needs to check the NFC`s status register to see if the data
+ * transfer was completed.
+ */
+ ret = rk_nfc_wait_for_xfer_done(nfc);
+
+ dma_unmap_single(nfc->dev, dma_data, mtd->writesize,
+ DMA_TO_DEVICE);
+ dma_unmap_single(nfc->dev, dma_oob, ecc->steps * oob_step,
+ DMA_TO_DEVICE);
+
+ if (boot_rom_mode && rknand->boot_ecc != ecc->strength)
+ rk_nfc_hw_ecc_setup(chip, ecc->strength);
+
+ if (ret) {
+ dev_err(nfc->dev, "write: wait transfer done timeout.\n");
+ return -ETIMEDOUT;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int rk_nfc_write_oob(struct nand_chip *chip, int page)
+{
+ return rk_nfc_write_page_hwecc(chip, NULL, 1, page);
+}
+
+static int rk_nfc_read_page_raw(struct nand_chip *chip, u8 *buf, int oob_on,
+ int page)
+{
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int i, pages_per_blk;
+
+ pages_per_blk = mtd->erasesize / mtd->writesize;
+ if ((chip->options & NAND_IS_BOOT_MEDIUM) &&
+ (page < (pages_per_blk * rknand->boot_blks)) &&
+ rknand->boot_ecc != ecc->strength) {
+ /*
+ * There's currently no method to notify the MTD framework that
+ * a different ECC strength is in use for the boot blocks.
+ */
+ return -EIO;
+ }
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+ rk_nfc_read_buf(nfc, nfc->page_buf, mtd->writesize + mtd->oobsize);
+ for (i = 0; i < ecc->steps; i++) {
+ /*
+ * The first four bytes of OOB are reserved for the
+ * boot ROM. In some debugging cases, such as with a read,
+ * erase and write back test, these 4 bytes also must be
+ * saved somewhere, otherwise this information will be
+ * lost during a write back.
+ */
+ if (!i)
+ memcpy(rk_nfc_buf_to_oob_ptr(chip, ecc->steps - 1),
+ rk_nfc_oob_ptr(chip, i),
+ NFC_SYS_DATA_SIZE);
+ else
+ memcpy(rk_nfc_buf_to_oob_ptr(chip, i - 1),
+ rk_nfc_oob_ptr(chip, i),
+ NFC_SYS_DATA_SIZE);
+
+ /* Copy ECC data from the NFC buffer. */
+ memcpy(rk_nfc_buf_to_oob_ecc_ptr(chip, i),
+ rk_nfc_oob_ptr(chip, i) + NFC_SYS_DATA_SIZE,
+ ecc->bytes);
+
+ /* Copy data from the NFC buffer. */
+ if (buf)
+ memcpy(rk_nfc_buf_to_data_ptr(chip, buf, i),
+ rk_nfc_data_ptr(chip, i),
+ ecc->size);
+ }
+
+ return 0;
+}
+
+static int rk_nfc_read_page_hwecc(struct nand_chip *chip, u8 *buf, int oob_on,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int oob_step = (ecc->bytes > 60) ? NFC_MAX_OOB_PER_STEP :
+ NFC_MIN_OOB_PER_STEP;
+ int pages_per_blk = mtd->erasesize / mtd->writesize;
+ dma_addr_t dma_data, dma_oob;
+ int ret = 0, i, cnt, boot_rom_mode = 0;
+ int max_bitflips = 0, bch_st, ecc_fail = 0;
+ u8 *oob;
+ u32 tmp;
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ dma_data = dma_map_single(nfc->dev, nfc->page_buf,
+ mtd->writesize,
+ DMA_FROM_DEVICE);
+ dma_oob = dma_map_single(nfc->dev, nfc->oob_buf,
+ ecc->steps * oob_step,
+ DMA_FROM_DEVICE);
+
+ /*
+ * The first blocks (4, 8 or 16 depending on the device)
+ * are used by the boot ROM.
+ * Configure the ECC algorithm supported by the boot ROM.
+ */
+ if ((page < (pages_per_blk * rknand->boot_blks)) &&
+ (chip->options & NAND_IS_BOOT_MEDIUM)) {
+ boot_rom_mode = 1;
+ if (rknand->boot_ecc != ecc->strength)
+ rk_nfc_hw_ecc_setup(chip, rknand->boot_ecc);
+ }
+
+ reinit_completion(&nfc->done);
+ writel(INT_DMA, nfc->regs + nfc->cfg->int_en_off);
+ rk_nfc_xfer_start(nfc, NFC_READ, ecc->steps, dma_data,
+ dma_oob);
+ ret = wait_for_completion_timeout(&nfc->done,
+ msecs_to_jiffies(100));
+ if (!ret)
+ dev_warn(nfc->dev, "read: wait dma done timeout.\n");
+ /*
+ * Whether the DMA transfer is completed or not. The driver
+ * needs to check the NFC`s status register to see if the data
+ * transfer was completed.
+ */
+ ret = rk_nfc_wait_for_xfer_done(nfc);
+
+ dma_unmap_single(nfc->dev, dma_data, mtd->writesize,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(nfc->dev, dma_oob, ecc->steps * oob_step,
+ DMA_FROM_DEVICE);
+
+ if (ret) {
+ ret = -ETIMEDOUT;
+ dev_err(nfc->dev, "read: wait transfer done timeout.\n");
+ goto timeout_err;
+ }
+
+ for (i = 0; i < ecc->steps; i++) {
+ if (!i)
+ oob = chip->oob_poi + (ecc->steps - 1) * NFC_SYS_DATA_SIZE;
+ else
+ oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE;
+
+ if (nfc->cfg->type == NFC_V9)
+ tmp = nfc->oob_buf[i];
+ else
+ tmp = nfc->oob_buf[i * (oob_step / 4)];
+
+ *oob++ = (u8)tmp;
+ *oob++ = (u8)(tmp >> 8);
+ *oob++ = (u8)(tmp >> 16);
+ *oob++ = (u8)(tmp >> 24);
+ }
+
+ for (i = 0; i < (ecc->steps / 2); i++) {
+ bch_st = readl_relaxed(nfc->regs +
+ nfc->cfg->bch_st_off + i * 4);
+ if (bch_st & BIT(nfc->cfg->ecc0.err_flag_bit) ||
+ bch_st & BIT(nfc->cfg->ecc1.err_flag_bit)) {
+ mtd->ecc_stats.failed++;
+ ecc_fail = 1;
+ } else {
+ cnt = ECC_ERR_CNT(bch_st, nfc->cfg->ecc0);
+ mtd->ecc_stats.corrected += cnt;
+ max_bitflips = max_t(u32, max_bitflips, cnt);
+
+ cnt = ECC_ERR_CNT(bch_st, nfc->cfg->ecc1);
+ mtd->ecc_stats.corrected += cnt;
+ max_bitflips = max_t(u32, max_bitflips, cnt);
+ }
+ }
+
+ if (buf)
+ memcpy(buf, nfc->page_buf, mtd->writesize);
+
+timeout_err:
+ if (boot_rom_mode && rknand->boot_ecc != ecc->strength)
+ rk_nfc_hw_ecc_setup(chip, ecc->strength);
+
+ if (ret)
+ return ret;
+
+ if (ecc_fail) {
+ dev_err(nfc->dev, "read page: %x ecc error!\n", page);
+ return 0;
+ }
+
+ return max_bitflips;
+}
+
+static int rk_nfc_read_oob(struct nand_chip *chip, int page)
+{
+ return rk_nfc_read_page_hwecc(chip, NULL, 1, page);
+}
+
+static inline void rk_nfc_hw_init(struct rk_nfc *nfc)
+{
+ /* Disable flash wp. */
+ writel(FMCTL_WP, nfc->regs + NFC_FMCTL);
+ /* Config default timing 40ns at 150 Mhz NFC clock. */
+ writel(0x1081, nfc->regs + NFC_FMWAIT);
+ nfc->cur_timing = 0x1081;
+ /* Disable randomizer and DMA. */
+ writel(0, nfc->regs + nfc->cfg->randmz_off);
+ writel(0, nfc->regs + nfc->cfg->dma_cfg_off);
+ writel(FLCTL_RST, nfc->regs + nfc->cfg->flctl_off);
+}
+
+static irqreturn_t rk_nfc_irq(int irq, void *id)
+{
+ struct rk_nfc *nfc = id;
+ u32 sta, ien;
+
+ sta = readl_relaxed(nfc->regs + nfc->cfg->int_st_off);
+ ien = readl_relaxed(nfc->regs + nfc->cfg->int_en_off);
+
+ if (!(sta & ien))
+ return IRQ_NONE;
+
+ writel(sta, nfc->regs + nfc->cfg->int_clr_off);
+ writel(~sta & ien, nfc->regs + nfc->cfg->int_en_off);
+
+ complete(&nfc->done);
+
+ return IRQ_HANDLED;
+}
+
+static int rk_nfc_enable_clks(struct device *dev, struct rk_nfc *nfc)
+{
+ int ret;
+
+ if (!IS_ERR(nfc->nfc_clk)) {
+ ret = clk_prepare_enable(nfc->nfc_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable NFC clk\n");
+ return ret;
+ }
+ }
+
+ ret = clk_prepare_enable(nfc->ahb_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable ahb clk\n");
+ clk_disable_unprepare(nfc->nfc_clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rk_nfc_disable_clks(struct rk_nfc *nfc)
+{
+ clk_disable_unprepare(nfc->nfc_clk);
+ clk_disable_unprepare(nfc->ahb_clk);
+}
+
+static int rk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oob_region)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+
+ if (section)
+ return -ERANGE;
+
+ oob_region->length = rknand->metadata_size - NFC_SYS_DATA_SIZE - 2;
+ oob_region->offset = 2;
+
+ return 0;
+}
+
+static int rk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oob_region)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+
+ if (section)
+ return -ERANGE;
+
+ oob_region->length = mtd->oobsize - rknand->metadata_size;
+ oob_region->offset = rknand->metadata_size;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops rk_nfc_ooblayout_ops = {
+ .free = rk_nfc_ooblayout_free,
+ .ecc = rk_nfc_ooblayout_ecc,
+};
+
+static int rk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ const u8 *strengths = nfc->cfg->ecc_strengths;
+ u8 max_strength, nfc_max_strength;
+ int i;
+
+ nfc_max_strength = nfc->cfg->ecc_strengths[0];
+ /* If optional dt settings not present. */
+ if (!ecc->size || !ecc->strength ||
+ ecc->strength > nfc_max_strength) {
+ chip->ecc.size = 1024;
+ ecc->steps = mtd->writesize / ecc->size;
+
+ /*
+ * HW ECC always requests the number of ECC bytes per 1024 byte
+ * blocks. The first 4 OOB bytes are reserved for sys data.
+ */
+ max_strength = ((mtd->oobsize / ecc->steps) - 4) * 8 /
+ fls(8 * 1024);
+ if (max_strength > nfc_max_strength)
+ max_strength = nfc_max_strength;
+
+ for (i = 0; i < 4; i++) {
+ if (max_strength >= strengths[i])
+ break;
+ }
+
+ if (i >= 4) {
+ dev_err(nfc->dev, "unsupported ECC strength\n");
+ return -EOPNOTSUPP;
+ }
+
+ ecc->strength = strengths[i];
+ }
+ ecc->steps = mtd->writesize / ecc->size;
+ ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * chip->ecc.size), 8);
+
+ return 0;
+}
+
+static int rk_nfc_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct device *dev = mtd->dev.parent;
+ struct rk_nfc *nfc = nand_get_controller_data(chip);
+ struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int new_page_len, new_oob_len;
+ void *buf;
+ int ret;
+
+ if (chip->options & NAND_BUSWIDTH_16) {
+ dev_err(dev, "16 bits bus width not supported");
+ return -EINVAL;
+ }
+
+ if (ecc->engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ ret = rk_nfc_ecc_init(dev, mtd);
+ if (ret)
+ return ret;
+
+ rknand->metadata_size = NFC_SYS_DATA_SIZE * ecc->steps;
+
+ if (rknand->metadata_size < NFC_SYS_DATA_SIZE + 2) {
+ dev_err(dev,
+ "driver needs at least %d bytes of meta data\n",
+ NFC_SYS_DATA_SIZE + 2);
+ return -EIO;
+ }
+
+ /* Check buffer first, avoid duplicate alloc buffer. */
+ new_page_len = mtd->writesize + mtd->oobsize;
+ if (nfc->page_buf && new_page_len > nfc->page_buf_size) {
+ buf = krealloc(nfc->page_buf, new_page_len,
+ GFP_KERNEL | GFP_DMA);
+ if (!buf)
+ return -ENOMEM;
+ nfc->page_buf = buf;
+ nfc->page_buf_size = new_page_len;
+ }
+
+ new_oob_len = ecc->steps * NFC_MAX_OOB_PER_STEP;
+ if (nfc->oob_buf && new_oob_len > nfc->oob_buf_size) {
+ buf = krealloc(nfc->oob_buf, new_oob_len,
+ GFP_KERNEL | GFP_DMA);
+ if (!buf) {
+ kfree(nfc->page_buf);
+ nfc->page_buf = NULL;
+ return -ENOMEM;
+ }
+ nfc->oob_buf = buf;
+ nfc->oob_buf_size = new_oob_len;
+ }
+
+ if (!nfc->page_buf) {
+ nfc->page_buf = kzalloc(new_page_len, GFP_KERNEL | GFP_DMA);
+ if (!nfc->page_buf)
+ return -ENOMEM;
+ nfc->page_buf_size = new_page_len;
+ }
+
+ if (!nfc->oob_buf) {
+ nfc->oob_buf = kzalloc(new_oob_len, GFP_KERNEL | GFP_DMA);
+ if (!nfc->oob_buf) {
+ kfree(nfc->page_buf);
+ nfc->page_buf = NULL;
+ return -ENOMEM;
+ }
+ nfc->oob_buf_size = new_oob_len;
+ }
+
+ chip->ecc.write_page_raw = rk_nfc_write_page_raw;
+ chip->ecc.write_page = rk_nfc_write_page_hwecc;
+ chip->ecc.write_oob = rk_nfc_write_oob;
+
+ chip->ecc.read_page_raw = rk_nfc_read_page_raw;
+ chip->ecc.read_page = rk_nfc_read_page_hwecc;
+ chip->ecc.read_oob = rk_nfc_read_oob;
+
+ return 0;
+}
+
+static const struct nand_controller_ops rk_nfc_controller_ops = {
+ .attach_chip = rk_nfc_attach_chip,
+ .exec_op = rk_nfc_exec_op,
+ .setup_interface = rk_nfc_setup_interface,
+};
+
+static int rk_nfc_nand_chip_init(struct device *dev, struct rk_nfc *nfc,
+ struct device_node *np)
+{
+ struct rk_nfc_nand_chip *rknand;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ int nsels;
+ u32 tmp;
+ int ret;
+ int i;
+
+ if (!of_get_property(np, "reg", &nsels))
+ return -ENODEV;
+ nsels /= sizeof(u32);
+ if (!nsels || nsels > NFC_MAX_NSELS) {
+ dev_err(dev, "invalid reg property size %d\n", nsels);
+ return -EINVAL;
+ }
+
+ rknand = devm_kzalloc(dev, sizeof(*rknand) + nsels * sizeof(u8),
+ GFP_KERNEL);
+ if (!rknand)
+ return -ENOMEM;
+
+ rknand->nsels = nsels;
+ for (i = 0; i < nsels; i++) {
+ ret = of_property_read_u32_index(np, "reg", i, &tmp);
+ if (ret) {
+ dev_err(dev, "reg property failure : %d\n", ret);
+ return ret;
+ }
+
+ if (tmp >= NFC_MAX_NSELS) {
+ dev_err(dev, "invalid CS: %u\n", tmp);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
+ dev_err(dev, "CS %u already assigned\n", tmp);
+ return -EINVAL;
+ }
+
+ rknand->sels[i] = tmp;
+ }
+
+ chip = &rknand->chip;
+ chip->controller = &nfc->controller;
+
+ nand_set_flash_node(chip, np);
+
+ nand_set_controller_data(chip, nfc);
+
+ chip->options |= NAND_USES_DMA | NAND_NO_SUBPAGE_WRITE;
+ chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
+
+ /* Set default mode in case dt entry is missing. */
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+
+ mtd = nand_to_mtd(chip);
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = dev;
+
+ if (!mtd->name) {
+ dev_err(nfc->dev, "NAND label property is mandatory\n");
+ return -EINVAL;
+ }
+
+ mtd_set_ooblayout(mtd, &rk_nfc_ooblayout_ops);
+ rk_nfc_hw_init(nfc);
+ ret = nand_scan(chip, nsels);
+ if (ret)
+ return ret;
+
+ if (chip->options & NAND_IS_BOOT_MEDIUM) {
+ ret = of_property_read_u32(np, "rockchip,boot-blks", &tmp);
+ rknand->boot_blks = ret ? 0 : tmp;
+
+ ret = of_property_read_u32(np, "rockchip,boot-ecc-strength",
+ &tmp);
+ rknand->boot_ecc = ret ? chip->ecc.strength : tmp;
+ }
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(dev, "MTD parse partition error\n");
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ list_add_tail(&rknand->node, &nfc->chips);
+
+ return 0;
+}
+
+static void rk_nfc_chips_cleanup(struct rk_nfc *nfc)
+{
+ struct rk_nfc_nand_chip *rknand, *tmp;
+ struct nand_chip *chip;
+ int ret;
+
+ list_for_each_entry_safe(rknand, tmp, &nfc->chips, node) {
+ chip = &rknand->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&rknand->node);
+ }
+}
+
+static int rk_nfc_nand_chips_init(struct device *dev, struct rk_nfc *nfc)
+{
+ struct device_node *np = dev->of_node, *nand_np;
+ int nchips = of_get_child_count(np);
+ int ret;
+
+ if (!nchips || nchips > NFC_MAX_NSELS) {
+ dev_err(nfc->dev, "incorrect number of NAND chips (%d)\n",
+ nchips);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(np, nand_np) {
+ ret = rk_nfc_nand_chip_init(dev, nfc, nand_np);
+ if (ret) {
+ of_node_put(nand_np);
+ rk_nfc_chips_cleanup(nfc);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static struct nfc_cfg nfc_v6_cfg = {
+ .type = NFC_V6,
+ .ecc_strengths = {60, 40, 24, 16},
+ .ecc_cfgs = {
+ 0x00040011, 0x00040001, 0x00000011, 0x00000001,
+ },
+ .flctl_off = 0x08,
+ .bchctl_off = 0x0C,
+ .dma_cfg_off = 0x10,
+ .dma_data_buf_off = 0x14,
+ .dma_oob_buf_off = 0x18,
+ .dma_st_off = 0x1C,
+ .bch_st_off = 0x20,
+ .randmz_off = 0x150,
+ .int_en_off = 0x16C,
+ .int_clr_off = 0x170,
+ .int_st_off = 0x174,
+ .oob0_off = 0x200,
+ .oob1_off = 0x230,
+ .ecc0 = {
+ .err_flag_bit = 2,
+ .low = 3,
+ .low_mask = 0x1F,
+ .low_bn = 5,
+ .high = 27,
+ .high_mask = 0x1,
+ },
+ .ecc1 = {
+ .err_flag_bit = 15,
+ .low = 16,
+ .low_mask = 0x1F,
+ .low_bn = 5,
+ .high = 29,
+ .high_mask = 0x1,
+ },
+};
+
+static struct nfc_cfg nfc_v8_cfg = {
+ .type = NFC_V8,
+ .ecc_strengths = {16, 16, 16, 16},
+ .ecc_cfgs = {
+ 0x00000001, 0x00000001, 0x00000001, 0x00000001,
+ },
+ .flctl_off = 0x08,
+ .bchctl_off = 0x0C,
+ .dma_cfg_off = 0x10,
+ .dma_data_buf_off = 0x14,
+ .dma_oob_buf_off = 0x18,
+ .dma_st_off = 0x1C,
+ .bch_st_off = 0x20,
+ .randmz_off = 0x150,
+ .int_en_off = 0x16C,
+ .int_clr_off = 0x170,
+ .int_st_off = 0x174,
+ .oob0_off = 0x200,
+ .oob1_off = 0x230,
+ .ecc0 = {
+ .err_flag_bit = 2,
+ .low = 3,
+ .low_mask = 0x1F,
+ .low_bn = 5,
+ .high = 27,
+ .high_mask = 0x1,
+ },
+ .ecc1 = {
+ .err_flag_bit = 15,
+ .low = 16,
+ .low_mask = 0x1F,
+ .low_bn = 5,
+ .high = 29,
+ .high_mask = 0x1,
+ },
+};
+
+static struct nfc_cfg nfc_v9_cfg = {
+ .type = NFC_V9,
+ .ecc_strengths = {70, 60, 40, 16},
+ .ecc_cfgs = {
+ 0x00000001, 0x06000001, 0x04000001, 0x02000001,
+ },
+ .flctl_off = 0x10,
+ .bchctl_off = 0x20,
+ .dma_cfg_off = 0x30,
+ .dma_data_buf_off = 0x34,
+ .dma_oob_buf_off = 0x38,
+ .dma_st_off = 0x3C,
+ .bch_st_off = 0x150,
+ .randmz_off = 0x208,
+ .int_en_off = 0x120,
+ .int_clr_off = 0x124,
+ .int_st_off = 0x128,
+ .oob0_off = 0x200,
+ .oob1_off = 0x204,
+ .ecc0 = {
+ .err_flag_bit = 2,
+ .low = 3,
+ .low_mask = 0x7F,
+ .low_bn = 7,
+ .high = 0,
+ .high_mask = 0x0,
+ },
+ .ecc1 = {
+ .err_flag_bit = 18,
+ .low = 19,
+ .low_mask = 0x7F,
+ .low_bn = 7,
+ .high = 0,
+ .high_mask = 0x0,
+ },
+};
+
+static const struct of_device_id rk_nfc_id_table[] = {
+ {
+ .compatible = "rockchip,px30-nfc",
+ .data = &nfc_v9_cfg
+ },
+ {
+ .compatible = "rockchip,rk2928-nfc",
+ .data = &nfc_v6_cfg
+ },
+ {
+ .compatible = "rockchip,rv1108-nfc",
+ .data = &nfc_v8_cfg
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rk_nfc_id_table);
+
+static int rk_nfc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rk_nfc *nfc;
+ int ret, irq;
+
+ nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nand_controller_init(&nfc->controller);
+ INIT_LIST_HEAD(&nfc->chips);
+ nfc->controller.ops = &rk_nfc_controller_ops;
+
+ nfc->cfg = of_device_get_match_data(dev);
+ nfc->dev = dev;
+
+ init_completion(&nfc->done);
+
+ nfc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(nfc->regs)) {
+ ret = PTR_ERR(nfc->regs);
+ goto release_nfc;
+ }
+
+ nfc->nfc_clk = devm_clk_get(dev, "nfc");
+ if (IS_ERR(nfc->nfc_clk)) {
+ dev_dbg(dev, "no NFC clk\n");
+ /* Some earlier models, such as rk3066, have no NFC clk. */
+ }
+
+ nfc->ahb_clk = devm_clk_get(dev, "ahb");
+ if (IS_ERR(nfc->ahb_clk)) {
+ dev_err(dev, "no ahb clk\n");
+ ret = PTR_ERR(nfc->ahb_clk);
+ goto release_nfc;
+ }
+
+ ret = rk_nfc_enable_clks(dev, nfc);
+ if (ret)
+ goto release_nfc;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = -EINVAL;
+ goto clk_disable;
+ }
+
+ writel(0, nfc->regs + nfc->cfg->int_en_off);
+ ret = devm_request_irq(dev, irq, rk_nfc_irq, 0x0, "rk-nand", nfc);
+ if (ret) {
+ dev_err(dev, "failed to request NFC irq\n");
+ goto clk_disable;
+ }
+
+ platform_set_drvdata(pdev, nfc);
+
+ ret = rk_nfc_nand_chips_init(dev, nfc);
+ if (ret) {
+ dev_err(dev, "failed to init NAND chips\n");
+ goto clk_disable;
+ }
+ return 0;
+
+clk_disable:
+ rk_nfc_disable_clks(nfc);
+release_nfc:
+ return ret;
+}
+
+static void rk_nfc_remove(struct platform_device *pdev)
+{
+ struct rk_nfc *nfc = platform_get_drvdata(pdev);
+
+ kfree(nfc->page_buf);
+ kfree(nfc->oob_buf);
+ rk_nfc_chips_cleanup(nfc);
+ rk_nfc_disable_clks(nfc);
+}
+
+static int __maybe_unused rk_nfc_suspend(struct device *dev)
+{
+ struct rk_nfc *nfc = dev_get_drvdata(dev);
+
+ rk_nfc_disable_clks(nfc);
+
+ return 0;
+}
+
+static int __maybe_unused rk_nfc_resume(struct device *dev)
+{
+ struct rk_nfc *nfc = dev_get_drvdata(dev);
+ struct rk_nfc_nand_chip *rknand;
+ struct nand_chip *chip;
+ int ret;
+ u32 i;
+
+ ret = rk_nfc_enable_clks(dev, nfc);
+ if (ret)
+ return ret;
+
+ /* Reset NAND chip if VCC was powered off. */
+ list_for_each_entry(rknand, &nfc->chips, node) {
+ chip = &rknand->chip;
+ for (i = 0; i < rknand->nsels; i++)
+ nand_reset(chip, i);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops rk_nfc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(rk_nfc_suspend, rk_nfc_resume)
+};
+
+static struct platform_driver rk_nfc_driver = {
+ .probe = rk_nfc_probe,
+ .remove_new = rk_nfc_remove,
+ .driver = {
+ .name = "rockchip-nfc",
+ .of_match_table = rk_nfc_id_table,
+ .pm = &rk_nfc_pm_ops,
+ },
+};
+
+module_platform_driver(rk_nfc_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("Yifeng Zhao <yifeng.zhao@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip Nand Flash Controller Driver");
+MODULE_ALIAS("platform:rockchip-nand-controller");
diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c
new file mode 100644
index 0000000000..3d3d5c9814
--- /dev/null
+++ b/drivers/mtd/nand/raw/s3c2410.c
@@ -0,0 +1,1232 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright © 2004-2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * Samsung S3C2410/S3C2440/S3C2412 NAND driver
+*/
+
+#define pr_fmt(fmt) "nand-s3c2410: " fmt
+
+#ifdef CONFIG_MTD_NAND_S3C2410_DEBUG
+#define DEBUG
+#endif
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/of.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/platform_data/mtd-nand-s3c2410.h>
+
+#define S3C2410_NFREG(x) (x)
+
+#define S3C2410_NFCONF S3C2410_NFREG(0x00)
+#define S3C2410_NFCMD S3C2410_NFREG(0x04)
+#define S3C2410_NFADDR S3C2410_NFREG(0x08)
+#define S3C2410_NFDATA S3C2410_NFREG(0x0C)
+#define S3C2410_NFSTAT S3C2410_NFREG(0x10)
+#define S3C2410_NFECC S3C2410_NFREG(0x14)
+#define S3C2440_NFCONT S3C2410_NFREG(0x04)
+#define S3C2440_NFCMD S3C2410_NFREG(0x08)
+#define S3C2440_NFADDR S3C2410_NFREG(0x0C)
+#define S3C2440_NFDATA S3C2410_NFREG(0x10)
+#define S3C2440_NFSTAT S3C2410_NFREG(0x20)
+#define S3C2440_NFMECC0 S3C2410_NFREG(0x2C)
+#define S3C2412_NFSTAT S3C2410_NFREG(0x28)
+#define S3C2412_NFMECC0 S3C2410_NFREG(0x34)
+#define S3C2410_NFCONF_EN (1<<15)
+#define S3C2410_NFCONF_INITECC (1<<12)
+#define S3C2410_NFCONF_nFCE (1<<11)
+#define S3C2410_NFCONF_TACLS(x) ((x)<<8)
+#define S3C2410_NFCONF_TWRPH0(x) ((x)<<4)
+#define S3C2410_NFCONF_TWRPH1(x) ((x)<<0)
+#define S3C2410_NFSTAT_BUSY (1<<0)
+#define S3C2440_NFCONF_TACLS(x) ((x)<<12)
+#define S3C2440_NFCONF_TWRPH0(x) ((x)<<8)
+#define S3C2440_NFCONF_TWRPH1(x) ((x)<<4)
+#define S3C2440_NFCONT_INITECC (1<<4)
+#define S3C2440_NFCONT_nFCE (1<<1)
+#define S3C2440_NFCONT_ENABLE (1<<0)
+#define S3C2440_NFSTAT_READY (1<<0)
+#define S3C2412_NFCONF_NANDBOOT (1<<31)
+#define S3C2412_NFCONT_INIT_MAIN_ECC (1<<5)
+#define S3C2412_NFCONT_nFCE0 (1<<1)
+#define S3C2412_NFSTAT_READY (1<<0)
+
+/* new oob placement block for use with hardware ecc generation
+ */
+static int s3c2410_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 0;
+ oobregion->length = 3;
+
+ return 0;
+}
+
+static int s3c2410_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 8;
+ oobregion->length = 8;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops s3c2410_ooblayout_ops = {
+ .ecc = s3c2410_ooblayout_ecc,
+ .free = s3c2410_ooblayout_free,
+};
+
+/* controller and mtd information */
+
+struct s3c2410_nand_info;
+
+/**
+ * struct s3c2410_nand_mtd - driver MTD structure
+ * @mtd: The MTD instance to pass to the MTD layer.
+ * @chip: The NAND chip information.
+ * @set: The platform information supplied for this set of NAND chips.
+ * @info: Link back to the hardware information.
+*/
+struct s3c2410_nand_mtd {
+ struct nand_chip chip;
+ struct s3c2410_nand_set *set;
+ struct s3c2410_nand_info *info;
+};
+
+enum s3c_cpu_type {
+ TYPE_S3C2410,
+ TYPE_S3C2412,
+ TYPE_S3C2440,
+};
+
+enum s3c_nand_clk_state {
+ CLOCK_DISABLE = 0,
+ CLOCK_ENABLE,
+ CLOCK_SUSPEND,
+};
+
+/* overview of the s3c2410 nand state */
+
+/**
+ * struct s3c2410_nand_info - NAND controller state.
+ * @controller: Base controller structure.
+ * @mtds: An array of MTD instances on this controller.
+ * @platform: The platform data for this board.
+ * @device: The platform device we bound to.
+ * @clk: The clock resource for this controller.
+ * @regs: The area mapped for the hardware registers.
+ * @sel_reg: Pointer to the register controlling the NAND selection.
+ * @sel_bit: The bit in @sel_reg to select the NAND chip.
+ * @mtd_count: The number of MTDs created from this controller.
+ * @save_sel: The contents of @sel_reg to be saved over suspend.
+ * @clk_rate: The clock rate from @clk.
+ * @clk_state: The current clock state.
+ * @cpu_type: The exact type of this controller.
+ * @freq_transition: CPUFreq notifier block
+ */
+struct s3c2410_nand_info {
+ /* mtd info */
+ struct nand_controller controller;
+ struct s3c2410_nand_mtd *mtds;
+ struct s3c2410_platform_nand *platform;
+
+ /* device info */
+ struct device *device;
+ struct clk *clk;
+ void __iomem *regs;
+ void __iomem *sel_reg;
+ int sel_bit;
+ int mtd_count;
+ unsigned long save_sel;
+ unsigned long clk_rate;
+ enum s3c_nand_clk_state clk_state;
+
+ enum s3c_cpu_type cpu_type;
+};
+
+struct s3c24XX_nand_devtype_data {
+ enum s3c_cpu_type type;
+};
+
+static const struct s3c24XX_nand_devtype_data s3c2410_nand_devtype_data = {
+ .type = TYPE_S3C2410,
+};
+
+static const struct s3c24XX_nand_devtype_data s3c2412_nand_devtype_data = {
+ .type = TYPE_S3C2412,
+};
+
+static const struct s3c24XX_nand_devtype_data s3c2440_nand_devtype_data = {
+ .type = TYPE_S3C2440,
+};
+
+/* conversion functions */
+
+static struct s3c2410_nand_mtd *s3c2410_nand_mtd_toours(struct mtd_info *mtd)
+{
+ return container_of(mtd_to_nand(mtd), struct s3c2410_nand_mtd,
+ chip);
+}
+
+static struct s3c2410_nand_info *s3c2410_nand_mtd_toinfo(struct mtd_info *mtd)
+{
+ return s3c2410_nand_mtd_toours(mtd)->info;
+}
+
+static struct s3c2410_nand_info *to_nand_info(struct platform_device *dev)
+{
+ return platform_get_drvdata(dev);
+}
+
+static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev)
+{
+ return dev_get_platdata(&dev->dev);
+}
+
+static inline int allow_clk_suspend(struct s3c2410_nand_info *info)
+{
+#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+/**
+ * s3c2410_nand_clk_set_state - Enable, disable or suspend NAND clock.
+ * @info: The controller instance.
+ * @new_state: State to which clock should be set.
+ */
+static void s3c2410_nand_clk_set_state(struct s3c2410_nand_info *info,
+ enum s3c_nand_clk_state new_state)
+{
+ if (!allow_clk_suspend(info) && new_state == CLOCK_SUSPEND)
+ return;
+
+ if (info->clk_state == CLOCK_ENABLE) {
+ if (new_state != CLOCK_ENABLE)
+ clk_disable_unprepare(info->clk);
+ } else {
+ if (new_state == CLOCK_ENABLE)
+ clk_prepare_enable(info->clk);
+ }
+
+ info->clk_state = new_state;
+}
+
+/* timing calculations */
+
+#define NS_IN_KHZ 1000000
+
+/**
+ * s3c_nand_calc_rate - calculate timing data.
+ * @wanted: The cycle time in nanoseconds.
+ * @clk: The clock rate in kHz.
+ * @max: The maximum divider value.
+ *
+ * Calculate the timing value from the given parameters.
+ */
+static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
+{
+ int result;
+
+ result = DIV_ROUND_UP((wanted * clk), NS_IN_KHZ);
+
+ pr_debug("result %d from %ld, %d\n", result, clk, wanted);
+
+ if (result > max) {
+ pr_err("%d ns is too big for current clock rate %ld\n",
+ wanted, clk);
+ return -1;
+ }
+
+ if (result < 1)
+ result = 1;
+
+ return result;
+}
+
+#define to_ns(ticks, clk) (((ticks) * NS_IN_KHZ) / (unsigned int)(clk))
+
+/* controller setup */
+
+/**
+ * s3c2410_nand_setrate - setup controller timing information.
+ * @info: The controller instance.
+ *
+ * Given the information supplied by the platform, calculate and set
+ * the necessary timing registers in the hardware to generate the
+ * necessary timing cycles to the hardware.
+ */
+static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
+{
+ struct s3c2410_platform_nand *plat = info->platform;
+ int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4;
+ int tacls, twrph0, twrph1;
+ unsigned long clkrate = clk_get_rate(info->clk);
+ unsigned long set, cfg, mask;
+ unsigned long flags;
+
+ /* calculate the timing information for the controller */
+
+ info->clk_rate = clkrate;
+ clkrate /= 1000; /* turn clock into kHz for ease of use */
+
+ if (plat != NULL) {
+ tacls = s3c_nand_calc_rate(plat->tacls, clkrate, tacls_max);
+ twrph0 = s3c_nand_calc_rate(plat->twrph0, clkrate, 8);
+ twrph1 = s3c_nand_calc_rate(plat->twrph1, clkrate, 8);
+ } else {
+ /* default timings */
+ tacls = tacls_max;
+ twrph0 = 8;
+ twrph1 = 8;
+ }
+
+ if (tacls < 0 || twrph0 < 0 || twrph1 < 0) {
+ dev_err(info->device, "cannot get suitable timings\n");
+ return -EINVAL;
+ }
+
+ dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n",
+ tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate),
+ twrph1, to_ns(twrph1, clkrate));
+
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
+ mask = (S3C2410_NFCONF_TACLS(3) |
+ S3C2410_NFCONF_TWRPH0(7) |
+ S3C2410_NFCONF_TWRPH1(7));
+ set = S3C2410_NFCONF_EN;
+ set |= S3C2410_NFCONF_TACLS(tacls - 1);
+ set |= S3C2410_NFCONF_TWRPH0(twrph0 - 1);
+ set |= S3C2410_NFCONF_TWRPH1(twrph1 - 1);
+ break;
+
+ case TYPE_S3C2440:
+ case TYPE_S3C2412:
+ mask = (S3C2440_NFCONF_TACLS(tacls_max - 1) |
+ S3C2440_NFCONF_TWRPH0(7) |
+ S3C2440_NFCONF_TWRPH1(7));
+
+ set = S3C2440_NFCONF_TACLS(tacls - 1);
+ set |= S3C2440_NFCONF_TWRPH0(twrph0 - 1);
+ set |= S3C2440_NFCONF_TWRPH1(twrph1 - 1);
+ break;
+
+ default:
+ BUG();
+ }
+
+ local_irq_save(flags);
+
+ cfg = readl(info->regs + S3C2410_NFCONF);
+ cfg &= ~mask;
+ cfg |= set;
+ writel(cfg, info->regs + S3C2410_NFCONF);
+
+ local_irq_restore(flags);
+
+ dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
+
+ return 0;
+}
+
+/**
+ * s3c2410_nand_inithw - basic hardware initialisation
+ * @info: The hardware state.
+ *
+ * Do the basic initialisation of the hardware, using s3c2410_nand_setrate()
+ * to setup the hardware access speeds and set the controller to be enabled.
+*/
+static int s3c2410_nand_inithw(struct s3c2410_nand_info *info)
+{
+ int ret;
+
+ ret = s3c2410_nand_setrate(info);
+ if (ret < 0)
+ return ret;
+
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
+ default:
+ break;
+
+ case TYPE_S3C2440:
+ case TYPE_S3C2412:
+ /* enable the controller and de-assert nFCE */
+
+ writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT);
+ }
+
+ return 0;
+}
+
+/**
+ * s3c2410_nand_select_chip - select the given nand chip
+ * @this: NAND chip object.
+ * @chip: The chip number.
+ *
+ * This is called by the MTD layer to either select a given chip for the
+ * @mtd instance, or to indicate that the access has finished and the
+ * chip can be de-selected.
+ *
+ * The routine ensures that the nFCE line is correctly setup, and any
+ * platform specific selection code is called to route nFCE to the specific
+ * chip.
+ */
+static void s3c2410_nand_select_chip(struct nand_chip *this, int chip)
+{
+ struct s3c2410_nand_info *info;
+ struct s3c2410_nand_mtd *nmtd;
+ unsigned long cur;
+
+ nmtd = nand_get_controller_data(this);
+ info = nmtd->info;
+
+ if (chip != -1)
+ s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
+
+ cur = readl(info->sel_reg);
+
+ if (chip == -1) {
+ cur |= info->sel_bit;
+ } else {
+ if (nmtd->set != NULL && chip > nmtd->set->nr_chips) {
+ dev_err(info->device, "invalid chip %d\n", chip);
+ return;
+ }
+
+ if (info->platform != NULL) {
+ if (info->platform->select_chip != NULL)
+ (info->platform->select_chip) (nmtd->set, chip);
+ }
+
+ cur &= ~info->sel_bit;
+ }
+
+ writel(cur, info->sel_reg);
+
+ if (chip == -1)
+ s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
+}
+
+/* s3c2410_nand_hwcontrol
+ *
+ * Issue command and address cycles to the chip
+*/
+
+static void s3c2410_nand_hwcontrol(struct nand_chip *chip, int cmd,
+ unsigned int ctrl)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ writeb(cmd, info->regs + S3C2410_NFCMD);
+ else
+ writeb(cmd, info->regs + S3C2410_NFADDR);
+}
+
+/* command and control functions */
+
+static void s3c2440_nand_hwcontrol(struct nand_chip *chip, int cmd,
+ unsigned int ctrl)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ writeb(cmd, info->regs + S3C2440_NFCMD);
+ else
+ writeb(cmd, info->regs + S3C2440_NFADDR);
+}
+
+/* s3c2410_nand_devready()
+ *
+ * returns 0 if the nand is busy, 1 if it is ready
+*/
+
+static int s3c2410_nand_devready(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ return readb(info->regs + S3C2410_NFSTAT) & S3C2410_NFSTAT_BUSY;
+}
+
+static int s3c2440_nand_devready(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ return readb(info->regs + S3C2440_NFSTAT) & S3C2440_NFSTAT_READY;
+}
+
+static int s3c2412_nand_devready(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ return readb(info->regs + S3C2412_NFSTAT) & S3C2412_NFSTAT_READY;
+}
+
+/* ECC handling functions */
+
+static int s3c2410_nand_correct_data(struct nand_chip *chip, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ unsigned int diff0, diff1, diff2;
+ unsigned int bit, byte;
+
+ pr_debug("%s(%p,%p,%p,%p)\n", __func__, mtd, dat, read_ecc, calc_ecc);
+
+ diff0 = read_ecc[0] ^ calc_ecc[0];
+ diff1 = read_ecc[1] ^ calc_ecc[1];
+ diff2 = read_ecc[2] ^ calc_ecc[2];
+
+ pr_debug("%s: rd %*phN calc %*phN diff %02x%02x%02x\n",
+ __func__, 3, read_ecc, 3, calc_ecc,
+ diff0, diff1, diff2);
+
+ if (diff0 == 0 && diff1 == 0 && diff2 == 0)
+ return 0; /* ECC is ok */
+
+ /* sometimes people do not think about using the ECC, so check
+ * to see if we have an 0xff,0xff,0xff read ECC and then ignore
+ * the error, on the assumption that this is an un-eccd page.
+ */
+ if (read_ecc[0] == 0xff && read_ecc[1] == 0xff && read_ecc[2] == 0xff
+ && info->platform->ignore_unset_ecc)
+ return 0;
+
+ /* Can we correct this ECC (ie, one row and column change).
+ * Note, this is similar to the 256 error code on smartmedia */
+
+ if (((diff0 ^ (diff0 >> 1)) & 0x55) == 0x55 &&
+ ((diff1 ^ (diff1 >> 1)) & 0x55) == 0x55 &&
+ ((diff2 ^ (diff2 >> 1)) & 0x55) == 0x55) {
+ /* calculate the bit position of the error */
+
+ bit = ((diff2 >> 3) & 1) |
+ ((diff2 >> 4) & 2) |
+ ((diff2 >> 5) & 4);
+
+ /* calculate the byte position of the error */
+
+ byte = ((diff2 << 7) & 0x100) |
+ ((diff1 << 0) & 0x80) |
+ ((diff1 << 1) & 0x40) |
+ ((diff1 << 2) & 0x20) |
+ ((diff1 << 3) & 0x10) |
+ ((diff0 >> 4) & 0x08) |
+ ((diff0 >> 3) & 0x04) |
+ ((diff0 >> 2) & 0x02) |
+ ((diff0 >> 1) & 0x01);
+
+ dev_dbg(info->device, "correcting error bit %d, byte %d\n",
+ bit, byte);
+
+ dat[byte] ^= (1 << bit);
+ return 1;
+ }
+
+ /* if there is only one bit difference in the ECC, then
+ * one of only a row or column parity has changed, which
+ * means the error is most probably in the ECC itself */
+
+ diff0 |= (diff1 << 8);
+ diff0 |= (diff2 << 16);
+
+ /* equal to "(diff0 & ~(1 << __ffs(diff0)))" */
+ if ((diff0 & (diff0 - 1)) == 0)
+ return 1;
+
+ return -1;
+}
+
+/* ECC functions
+ *
+ * These allow the s3c2410 and s3c2440 to use the controller's ECC
+ * generator block to ECC the data as it passes through]
+*/
+
+static void s3c2410_nand_enable_hwecc(struct nand_chip *chip, int mode)
+{
+ struct s3c2410_nand_info *info;
+ unsigned long ctrl;
+
+ info = s3c2410_nand_mtd_toinfo(nand_to_mtd(chip));
+ ctrl = readl(info->regs + S3C2410_NFCONF);
+ ctrl |= S3C2410_NFCONF_INITECC;
+ writel(ctrl, info->regs + S3C2410_NFCONF);
+}
+
+static void s3c2412_nand_enable_hwecc(struct nand_chip *chip, int mode)
+{
+ struct s3c2410_nand_info *info;
+ unsigned long ctrl;
+
+ info = s3c2410_nand_mtd_toinfo(nand_to_mtd(chip));
+ ctrl = readl(info->regs + S3C2440_NFCONT);
+ writel(ctrl | S3C2412_NFCONT_INIT_MAIN_ECC,
+ info->regs + S3C2440_NFCONT);
+}
+
+static void s3c2440_nand_enable_hwecc(struct nand_chip *chip, int mode)
+{
+ struct s3c2410_nand_info *info;
+ unsigned long ctrl;
+
+ info = s3c2410_nand_mtd_toinfo(nand_to_mtd(chip));
+ ctrl = readl(info->regs + S3C2440_NFCONT);
+ writel(ctrl | S3C2440_NFCONT_INITECC, info->regs + S3C2440_NFCONT);
+}
+
+static int s3c2410_nand_calculate_ecc(struct nand_chip *chip,
+ const u_char *dat, u_char *ecc_code)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+ ecc_code[0] = readb(info->regs + S3C2410_NFECC + 0);
+ ecc_code[1] = readb(info->regs + S3C2410_NFECC + 1);
+ ecc_code[2] = readb(info->regs + S3C2410_NFECC + 2);
+
+ pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code);
+
+ return 0;
+}
+
+static int s3c2412_nand_calculate_ecc(struct nand_chip *chip,
+ const u_char *dat, u_char *ecc_code)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ unsigned long ecc = readl(info->regs + S3C2412_NFMECC0);
+
+ ecc_code[0] = ecc;
+ ecc_code[1] = ecc >> 8;
+ ecc_code[2] = ecc >> 16;
+
+ pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code);
+
+ return 0;
+}
+
+static int s3c2440_nand_calculate_ecc(struct nand_chip *chip,
+ const u_char *dat, u_char *ecc_code)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ unsigned long ecc = readl(info->regs + S3C2440_NFMECC0);
+
+ ecc_code[0] = ecc;
+ ecc_code[1] = ecc >> 8;
+ ecc_code[2] = ecc >> 16;
+
+ pr_debug("%s: returning ecc %06lx\n", __func__, ecc & 0xffffff);
+
+ return 0;
+}
+
+/* over-ride the standard functions for a little more speed. We can
+ * use read/write block to move the data buffers to/from the controller
+*/
+
+static void s3c2410_nand_read_buf(struct nand_chip *this, u_char *buf, int len)
+{
+ readsb(this->legacy.IO_ADDR_R, buf, len);
+}
+
+static void s3c2440_nand_read_buf(struct nand_chip *this, u_char *buf, int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+ readsl(info->regs + S3C2440_NFDATA, buf, len >> 2);
+
+ /* cleanup if we've got less than a word to do */
+ if (len & 3) {
+ buf += len & ~3;
+
+ for (; len & 3; len--)
+ *buf++ = readb(info->regs + S3C2440_NFDATA);
+ }
+}
+
+static void s3c2410_nand_write_buf(struct nand_chip *this, const u_char *buf,
+ int len)
+{
+ writesb(this->legacy.IO_ADDR_W, buf, len);
+}
+
+static void s3c2440_nand_write_buf(struct nand_chip *this, const u_char *buf,
+ int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+ writesl(info->regs + S3C2440_NFDATA, buf, len >> 2);
+
+ /* cleanup any fractional write */
+ if (len & 3) {
+ buf += len & ~3;
+
+ for (; len & 3; len--, buf++)
+ writeb(*buf, info->regs + S3C2440_NFDATA);
+ }
+}
+
+/* device management functions */
+
+static void s3c24xx_nand_remove(struct platform_device *pdev)
+{
+ struct s3c2410_nand_info *info = to_nand_info(pdev);
+
+ if (info == NULL)
+ return;
+
+ /* Release all our mtds and their partitions, then go through
+ * freeing the resources used
+ */
+
+ if (info->mtds != NULL) {
+ struct s3c2410_nand_mtd *ptr = info->mtds;
+ int mtdno;
+
+ for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) {
+ pr_debug("releasing mtd %d (%p)\n", mtdno, ptr);
+ WARN_ON(mtd_device_unregister(nand_to_mtd(&ptr->chip)));
+ nand_cleanup(&ptr->chip);
+ }
+ }
+
+ /* free the common resources */
+
+ if (!IS_ERR(info->clk))
+ s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
+}
+
+static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
+ struct s3c2410_nand_mtd *mtd,
+ struct s3c2410_nand_set *set)
+{
+ if (set) {
+ struct mtd_info *mtdinfo = nand_to_mtd(&mtd->chip);
+
+ mtdinfo->name = set->name;
+
+ return mtd_device_register(mtdinfo, set->partitions,
+ set->nr_partitions);
+ }
+
+ return -ENODEV;
+}
+
+static int s3c2410_nand_setup_interface(struct nand_chip *chip, int csline,
+ const struct nand_interface_config *conf)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ struct s3c2410_platform_nand *pdata = info->platform;
+ const struct nand_sdr_timings *timings;
+ int tacls;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return -ENOTSUPP;
+
+ tacls = timings->tCLS_min - timings->tWP_min;
+ if (tacls < 0)
+ tacls = 0;
+
+ pdata->tacls = DIV_ROUND_UP(tacls, 1000);
+ pdata->twrph0 = DIV_ROUND_UP(timings->tWP_min, 1000);
+ pdata->twrph1 = DIV_ROUND_UP(timings->tCLH_min, 1000);
+
+ return s3c2410_nand_setrate(info);
+}
+
+/**
+ * s3c2410_nand_init_chip - initialise a single instance of an chip
+ * @info: The base NAND controller the chip is on.
+ * @nmtd: The new controller MTD instance to fill in.
+ * @set: The information passed from the board specific platform data.
+ *
+ * Initialise the given @nmtd from the information in @info and @set. This
+ * readies the structure for use with the MTD layer functions by ensuring
+ * all pointers are setup and the necessary control routines selected.
+ */
+static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
+ struct s3c2410_nand_mtd *nmtd,
+ struct s3c2410_nand_set *set)
+{
+ struct device_node *np = info->device->of_node;
+ struct nand_chip *chip = &nmtd->chip;
+ void __iomem *regs = info->regs;
+
+ nand_set_flash_node(chip, set->of_node);
+
+ chip->legacy.write_buf = s3c2410_nand_write_buf;
+ chip->legacy.read_buf = s3c2410_nand_read_buf;
+ chip->legacy.select_chip = s3c2410_nand_select_chip;
+ chip->legacy.chip_delay = 50;
+ nand_set_controller_data(chip, nmtd);
+ chip->options = set->options;
+ chip->controller = &info->controller;
+
+ /*
+ * let's keep behavior unchanged for legacy boards booting via pdata and
+ * auto-detect timings only when booting with a device tree.
+ */
+ if (!np)
+ chip->options |= NAND_KEEP_TIMINGS;
+
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
+ chip->legacy.IO_ADDR_W = regs + S3C2410_NFDATA;
+ info->sel_reg = regs + S3C2410_NFCONF;
+ info->sel_bit = S3C2410_NFCONF_nFCE;
+ chip->legacy.cmd_ctrl = s3c2410_nand_hwcontrol;
+ chip->legacy.dev_ready = s3c2410_nand_devready;
+ break;
+
+ case TYPE_S3C2440:
+ chip->legacy.IO_ADDR_W = regs + S3C2440_NFDATA;
+ info->sel_reg = regs + S3C2440_NFCONT;
+ info->sel_bit = S3C2440_NFCONT_nFCE;
+ chip->legacy.cmd_ctrl = s3c2440_nand_hwcontrol;
+ chip->legacy.dev_ready = s3c2440_nand_devready;
+ chip->legacy.read_buf = s3c2440_nand_read_buf;
+ chip->legacy.write_buf = s3c2440_nand_write_buf;
+ break;
+
+ case TYPE_S3C2412:
+ chip->legacy.IO_ADDR_W = regs + S3C2440_NFDATA;
+ info->sel_reg = regs + S3C2440_NFCONT;
+ info->sel_bit = S3C2412_NFCONT_nFCE0;
+ chip->legacy.cmd_ctrl = s3c2440_nand_hwcontrol;
+ chip->legacy.dev_ready = s3c2412_nand_devready;
+
+ if (readl(regs + S3C2410_NFCONF) & S3C2412_NFCONF_NANDBOOT)
+ dev_info(info->device, "System booted from NAND\n");
+
+ break;
+ }
+
+ chip->legacy.IO_ADDR_R = chip->legacy.IO_ADDR_W;
+
+ nmtd->info = info;
+ nmtd->set = set;
+
+ chip->ecc.engine_type = info->platform->engine_type;
+
+ /*
+ * If you use u-boot BBT creation code, specifying this flag will
+ * let the kernel fish out the BBT from the NAND.
+ */
+ if (set->flash_bbt)
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+}
+
+/**
+ * s3c2410_nand_attach_chip - Init the ECC engine after NAND scan
+ * @chip: The NAND chip
+ *
+ * This hook is called by the core after the identification of the NAND chip,
+ * once the relevant per-chip information is up to date.. This call ensure that
+ * we update the internal state accordingly.
+ *
+ * The internal state is currently limited to the ECC state information.
+*/
+static int s3c2410_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+ switch (chip->ecc.engine_type) {
+
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ dev_info(info->device, "ECC disabled\n");
+ break;
+
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ /*
+ * This driver expects Hamming based ECC when engine_type is set
+ * to NAND_ECC_ENGINE_TYPE_SOFT. Force ecc.algo to
+ * NAND_ECC_ALGO_HAMMING to avoid adding an extra ecc_algo field
+ * to s3c2410_platform_nand.
+ */
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+ dev_info(info->device, "soft ECC\n");
+ break;
+
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ chip->ecc.calculate = s3c2410_nand_calculate_ecc;
+ chip->ecc.correct = s3c2410_nand_correct_data;
+ chip->ecc.strength = 1;
+
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
+ chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2410_nand_calculate_ecc;
+ break;
+
+ case TYPE_S3C2412:
+ chip->ecc.hwctl = s3c2412_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2412_nand_calculate_ecc;
+ break;
+
+ case TYPE_S3C2440:
+ chip->ecc.hwctl = s3c2440_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2440_nand_calculate_ecc;
+ break;
+ }
+
+ dev_dbg(info->device, "chip %p => page shift %d\n",
+ chip, chip->page_shift);
+
+ /* change the behaviour depending on whether we are using
+ * the large or small page nand device */
+ if (chip->page_shift > 10) {
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ } else {
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 3;
+ mtd_set_ooblayout(nand_to_mtd(chip),
+ &s3c2410_ooblayout_ops);
+ }
+
+ dev_info(info->device, "hardware ECC\n");
+ break;
+
+ default:
+ dev_err(info->device, "invalid ECC mode!\n");
+ return -EINVAL;
+ }
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->options |= NAND_SKIP_BBTSCAN;
+
+ return 0;
+}
+
+static const struct nand_controller_ops s3c24xx_nand_controller_ops = {
+ .attach_chip = s3c2410_nand_attach_chip,
+ .setup_interface = s3c2410_nand_setup_interface,
+};
+
+static const struct of_device_id s3c24xx_nand_dt_ids[] = {
+ {
+ .compatible = "samsung,s3c2410-nand",
+ .data = &s3c2410_nand_devtype_data,
+ }, {
+ /* also compatible with s3c6400 */
+ .compatible = "samsung,s3c2412-nand",
+ .data = &s3c2412_nand_devtype_data,
+ }, {
+ .compatible = "samsung,s3c2440-nand",
+ .data = &s3c2440_nand_devtype_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, s3c24xx_nand_dt_ids);
+
+static int s3c24xx_nand_probe_dt(struct platform_device *pdev)
+{
+ const struct s3c24XX_nand_devtype_data *devtype_data;
+ struct s3c2410_platform_nand *pdata;
+ struct s3c2410_nand_info *info = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node, *child;
+ struct s3c2410_nand_set *sets;
+
+ devtype_data = of_device_get_match_data(&pdev->dev);
+ if (!devtype_data)
+ return -ENODEV;
+
+ info->cpu_type = devtype_data->type;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdev->dev.platform_data = pdata;
+
+ pdata->nr_sets = of_get_child_count(np);
+ if (!pdata->nr_sets)
+ return 0;
+
+ sets = devm_kcalloc(&pdev->dev, pdata->nr_sets, sizeof(*sets),
+ GFP_KERNEL);
+ if (!sets)
+ return -ENOMEM;
+
+ pdata->sets = sets;
+
+ for_each_available_child_of_node(np, child) {
+ sets->name = (char *)child->name;
+ sets->of_node = child;
+ sets->nr_chips = 1;
+
+ of_node_get(child);
+
+ sets++;
+ }
+
+ return 0;
+}
+
+static int s3c24xx_nand_probe_pdata(struct platform_device *pdev)
+{
+ struct s3c2410_nand_info *info = platform_get_drvdata(pdev);
+
+ info->cpu_type = platform_get_device_id(pdev)->driver_data;
+
+ return 0;
+}
+
+/* s3c24xx_nand_probe
+ *
+ * called by device layer when it finds a device matching
+ * one our driver can handled. This code checks to see if
+ * it can allocate all necessary resources then calls the
+ * nand layer to look for devices
+*/
+static int s3c24xx_nand_probe(struct platform_device *pdev)
+{
+ struct s3c2410_platform_nand *plat;
+ struct s3c2410_nand_info *info;
+ struct s3c2410_nand_mtd *nmtd;
+ struct s3c2410_nand_set *sets;
+ struct resource *res;
+ int err = 0;
+ int size;
+ int nr_sets;
+ int setno;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (info == NULL) {
+ err = -ENOMEM;
+ goto exit_error;
+ }
+
+ platform_set_drvdata(pdev, info);
+
+ nand_controller_init(&info->controller);
+ info->controller.ops = &s3c24xx_nand_controller_ops;
+
+ /* get the clock source and enable it */
+
+ info->clk = devm_clk_get(&pdev->dev, "nand");
+ if (IS_ERR(info->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ err = -ENOENT;
+ goto exit_error;
+ }
+
+ s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
+
+ if (pdev->dev.of_node)
+ err = s3c24xx_nand_probe_dt(pdev);
+ else
+ err = s3c24xx_nand_probe_pdata(pdev);
+
+ if (err)
+ goto exit_error;
+
+ plat = to_nand_plat(pdev);
+
+ /* allocate and map the resource */
+
+ /* currently we assume we have the one resource */
+ res = pdev->resource;
+ size = resource_size(res);
+
+ info->device = &pdev->dev;
+ info->platform = plat;
+
+ info->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(info->regs)) {
+ err = PTR_ERR(info->regs);
+ goto exit_error;
+ }
+
+ dev_dbg(&pdev->dev, "mapped registers at %p\n", info->regs);
+
+ if (!plat->sets || plat->nr_sets < 1) {
+ err = -EINVAL;
+ goto exit_error;
+ }
+
+ sets = plat->sets;
+ nr_sets = plat->nr_sets;
+
+ info->mtd_count = nr_sets;
+
+ /* allocate our information */
+
+ size = nr_sets * sizeof(*info->mtds);
+ info->mtds = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (info->mtds == NULL) {
+ err = -ENOMEM;
+ goto exit_error;
+ }
+
+ /* initialise all possible chips */
+
+ nmtd = info->mtds;
+
+ for (setno = 0; setno < nr_sets; setno++, nmtd++, sets++) {
+ struct mtd_info *mtd = nand_to_mtd(&nmtd->chip);
+
+ pr_debug("initialising set %d (%p, info %p)\n",
+ setno, nmtd, info);
+
+ mtd->dev.parent = &pdev->dev;
+ s3c2410_nand_init_chip(info, nmtd, sets);
+
+ err = nand_scan(&nmtd->chip, sets ? sets->nr_chips : 1);
+ if (err)
+ goto exit_error;
+
+ s3c2410_nand_add_partition(info, nmtd, sets);
+ }
+
+ /* initialise the hardware */
+ err = s3c2410_nand_inithw(info);
+ if (err != 0)
+ goto exit_error;
+
+ if (allow_clk_suspend(info)) {
+ dev_info(&pdev->dev, "clock idle support enabled\n");
+ s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
+ }
+
+ return 0;
+
+ exit_error:
+ s3c24xx_nand_remove(pdev);
+
+ if (err == 0)
+ err = -EINVAL;
+ return err;
+}
+
+/* PM Support */
+#ifdef CONFIG_PM
+
+static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
+{
+ struct s3c2410_nand_info *info = platform_get_drvdata(dev);
+
+ if (info) {
+ info->save_sel = readl(info->sel_reg);
+
+ /* For the moment, we must ensure nFCE is high during
+ * the time we are suspended. This really should be
+ * handled by suspending the MTDs we are using, but
+ * that is currently not the case. */
+
+ writel(info->save_sel | info->sel_bit, info->sel_reg);
+
+ s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
+ }
+
+ return 0;
+}
+
+static int s3c24xx_nand_resume(struct platform_device *dev)
+{
+ struct s3c2410_nand_info *info = platform_get_drvdata(dev);
+ unsigned long sel;
+
+ if (info) {
+ s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
+ s3c2410_nand_inithw(info);
+
+ /* Restore the state of the nFCE line. */
+
+ sel = readl(info->sel_reg);
+ sel &= ~info->sel_bit;
+ sel |= info->save_sel & info->sel_bit;
+ writel(sel, info->sel_reg);
+
+ s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
+ }
+
+ return 0;
+}
+
+#else
+#define s3c24xx_nand_suspend NULL
+#define s3c24xx_nand_resume NULL
+#endif
+
+/* driver device registration */
+
+static const struct platform_device_id s3c24xx_driver_ids[] = {
+ {
+ .name = "s3c2410-nand",
+ .driver_data = TYPE_S3C2410,
+ }, {
+ .name = "s3c2440-nand",
+ .driver_data = TYPE_S3C2440,
+ }, {
+ .name = "s3c2412-nand",
+ .driver_data = TYPE_S3C2412,
+ }, {
+ .name = "s3c6400-nand",
+ .driver_data = TYPE_S3C2412, /* compatible with 2412 */
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
+
+static struct platform_driver s3c24xx_nand_driver = {
+ .probe = s3c24xx_nand_probe,
+ .remove_new = s3c24xx_nand_remove,
+ .suspend = s3c24xx_nand_suspend,
+ .resume = s3c24xx_nand_resume,
+ .id_table = s3c24xx_driver_ids,
+ .driver = {
+ .name = "s3c24xx-nand",
+ .of_match_table = s3c24xx_nand_dt_ids,
+ },
+};
+
+module_platform_driver(s3c24xx_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("S3C24XX MTD NAND driver");
diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c
new file mode 100644
index 0000000000..3e5df75cbc
--- /dev/null
+++ b/drivers/mtd/nand/raw/sh_flctl.c
@@ -0,0 +1,1230 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SuperH FLCTL nand controller
+ *
+ * Copyright (c) 2008 Renesas Solutions Corp.
+ * Copyright (c) 2008 Atom Create Engineering Co., Ltd.
+ *
+ * Based on fsl_elbc_nand.c, Copyright (c) 2006-2007 Freescale Semiconductor
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sh_dma.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/sh_flctl.h>
+
+static int flctl_4secc_ooblayout_sp_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 0;
+ oobregion->length = chip->ecc.bytes;
+
+ return 0;
+}
+
+static int flctl_4secc_ooblayout_sp_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 12;
+ oobregion->length = 4;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops flctl_4secc_oob_smallpage_ops = {
+ .ecc = flctl_4secc_ooblayout_sp_ecc,
+ .free = flctl_4secc_ooblayout_sp_free,
+};
+
+static int flctl_4secc_ooblayout_lp_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 6;
+ oobregion->length = chip->ecc.bytes;
+
+ return 0;
+}
+
+static int flctl_4secc_ooblayout_lp_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = section * 16;
+ oobregion->length = 6;
+
+ if (!section) {
+ oobregion->offset += 2;
+ oobregion->length -= 2;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops flctl_4secc_oob_largepage_ops = {
+ .ecc = flctl_4secc_ooblayout_lp_ecc,
+ .free = flctl_4secc_ooblayout_lp_free,
+};
+
+static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
+
+static struct nand_bbt_descr flctl_4secc_smallpage = {
+ .offs = 11,
+ .len = 1,
+ .pattern = scan_ff_pattern,
+};
+
+static struct nand_bbt_descr flctl_4secc_largepage = {
+ .offs = 0,
+ .len = 2,
+ .pattern = scan_ff_pattern,
+};
+
+static void empty_fifo(struct sh_flctl *flctl)
+{
+ writel(flctl->flintdmacr_base | AC1CLR | AC0CLR, FLINTDMACR(flctl));
+ writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
+}
+
+static void start_translation(struct sh_flctl *flctl)
+{
+ writeb(TRSTRT, FLTRCR(flctl));
+}
+
+static void timeout_error(struct sh_flctl *flctl, const char *str)
+{
+ dev_err(&flctl->pdev->dev, "Timeout occurred in %s\n", str);
+}
+
+static void wait_completion(struct sh_flctl *flctl)
+{
+ uint32_t timeout = LOOP_TIMEOUT_MAX;
+
+ while (timeout--) {
+ if (readb(FLTRCR(flctl)) & TREND) {
+ writeb(0x0, FLTRCR(flctl));
+ return;
+ }
+ udelay(1);
+ }
+
+ timeout_error(flctl, __func__);
+ writeb(0x0, FLTRCR(flctl));
+}
+
+static void flctl_dma_complete(void *param)
+{
+ struct sh_flctl *flctl = param;
+
+ complete(&flctl->dma_complete);
+}
+
+static void flctl_release_dma(struct sh_flctl *flctl)
+{
+ if (flctl->chan_fifo0_rx) {
+ dma_release_channel(flctl->chan_fifo0_rx);
+ flctl->chan_fifo0_rx = NULL;
+ }
+ if (flctl->chan_fifo0_tx) {
+ dma_release_channel(flctl->chan_fifo0_tx);
+ flctl->chan_fifo0_tx = NULL;
+ }
+}
+
+static void flctl_setup_dma(struct sh_flctl *flctl)
+{
+ dma_cap_mask_t mask;
+ struct dma_slave_config cfg;
+ struct platform_device *pdev = flctl->pdev;
+ struct sh_flctl_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ int ret;
+
+ if (!pdata)
+ return;
+
+ if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0)
+ return;
+
+ /* We can only either use DMA for both Tx and Rx or not use it at all */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter,
+ (void *)(uintptr_t)pdata->slave_id_fifo0_tx);
+ dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__,
+ flctl->chan_fifo0_tx);
+
+ if (!flctl->chan_fifo0_tx)
+ return;
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.direction = DMA_MEM_TO_DEV;
+ cfg.dst_addr = flctl->fifo;
+ cfg.src_addr = 0;
+ ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg);
+ if (ret < 0)
+ goto err;
+
+ flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter,
+ (void *)(uintptr_t)pdata->slave_id_fifo0_rx);
+ dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__,
+ flctl->chan_fifo0_rx);
+
+ if (!flctl->chan_fifo0_rx)
+ goto err;
+
+ cfg.direction = DMA_DEV_TO_MEM;
+ cfg.dst_addr = 0;
+ cfg.src_addr = flctl->fifo;
+ ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg);
+ if (ret < 0)
+ goto err;
+
+ init_completion(&flctl->dma_complete);
+
+ return;
+
+err:
+ flctl_release_dma(flctl);
+}
+
+static void set_addr(struct mtd_info *mtd, int column, int page_addr)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ uint32_t addr = 0;
+
+ if (column == -1) {
+ addr = page_addr; /* ERASE1 */
+ } else if (page_addr != -1) {
+ /* SEQIN, READ0, etc.. */
+ if (flctl->chip.options & NAND_BUSWIDTH_16)
+ column >>= 1;
+ if (flctl->page_size) {
+ addr = column & 0x0FFF;
+ addr |= (page_addr & 0xff) << 16;
+ addr |= ((page_addr >> 8) & 0xff) << 24;
+ /* big than 128MB */
+ if (flctl->rw_ADRCNT == ADRCNT2_E) {
+ uint32_t addr2;
+ addr2 = (page_addr >> 16) & 0xff;
+ writel(addr2, FLADR2(flctl));
+ }
+ } else {
+ addr = column;
+ addr |= (page_addr & 0xff) << 8;
+ addr |= ((page_addr >> 8) & 0xff) << 16;
+ addr |= ((page_addr >> 16) & 0xff) << 24;
+ }
+ }
+ writel(addr, FLADR(flctl));
+}
+
+static void wait_rfifo_ready(struct sh_flctl *flctl)
+{
+ uint32_t timeout = LOOP_TIMEOUT_MAX;
+
+ while (timeout--) {
+ uint32_t val;
+ /* check FIFO */
+ val = readl(FLDTCNTR(flctl)) >> 16;
+ if (val & 0xFF)
+ return;
+ udelay(1);
+ }
+ timeout_error(flctl, __func__);
+}
+
+static void wait_wfifo_ready(struct sh_flctl *flctl)
+{
+ uint32_t len, timeout = LOOP_TIMEOUT_MAX;
+
+ while (timeout--) {
+ /* check FIFO */
+ len = (readl(FLDTCNTR(flctl)) >> 16) & 0xFF;
+ if (len >= 4)
+ return;
+ udelay(1);
+ }
+ timeout_error(flctl, __func__);
+}
+
+static enum flctl_ecc_res_t wait_recfifo_ready
+ (struct sh_flctl *flctl, int sector_number)
+{
+ uint32_t timeout = LOOP_TIMEOUT_MAX;
+ void __iomem *ecc_reg[4];
+ int i;
+ int state = FL_SUCCESS;
+ uint32_t data, size;
+
+ /*
+ * First this loops checks in FLDTCNTR if we are ready to read out the
+ * oob data. This is the case if either all went fine without errors or
+ * if the bottom part of the loop corrected the errors or marked them as
+ * uncorrectable and the controller is given time to push the data into
+ * the FIFO.
+ */
+ while (timeout--) {
+ /* check if all is ok and we can read out the OOB */
+ size = readl(FLDTCNTR(flctl)) >> 24;
+ if ((size & 0xFF) == 4)
+ return state;
+
+ /* check if a correction code has been calculated */
+ if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) {
+ /*
+ * either we wait for the fifo to be filled or a
+ * correction pattern is being generated
+ */
+ udelay(1);
+ continue;
+ }
+
+ /* check for an uncorrectable error */
+ if (readl(FL4ECCCR(flctl)) & _4ECCFA) {
+ /* check if we face a non-empty page */
+ for (i = 0; i < 512; i++) {
+ if (flctl->done_buff[i] != 0xff) {
+ state = FL_ERROR; /* can't correct */
+ break;
+ }
+ }
+
+ if (state == FL_SUCCESS)
+ dev_dbg(&flctl->pdev->dev,
+ "reading empty sector %d, ecc error ignored\n",
+ sector_number);
+
+ writel(0, FL4ECCCR(flctl));
+ continue;
+ }
+
+ /* start error correction */
+ ecc_reg[0] = FL4ECCRESULT0(flctl);
+ ecc_reg[1] = FL4ECCRESULT1(flctl);
+ ecc_reg[2] = FL4ECCRESULT2(flctl);
+ ecc_reg[3] = FL4ECCRESULT3(flctl);
+
+ for (i = 0; i < 3; i++) {
+ uint8_t org;
+ unsigned int index;
+
+ data = readl(ecc_reg[i]);
+
+ if (flctl->page_size)
+ index = (512 * sector_number) +
+ (data >> 16);
+ else
+ index = data >> 16;
+
+ org = flctl->done_buff[index];
+ flctl->done_buff[index] = org ^ (data & 0xFF);
+ }
+ state = FL_REPAIRABLE;
+ writel(0, FL4ECCCR(flctl));
+ }
+
+ timeout_error(flctl, __func__);
+ return FL_TIMEOUT; /* timeout */
+}
+
+static void wait_wecfifo_ready(struct sh_flctl *flctl)
+{
+ uint32_t timeout = LOOP_TIMEOUT_MAX;
+ uint32_t len;
+
+ while (timeout--) {
+ /* check FLECFIFO */
+ len = (readl(FLDTCNTR(flctl)) >> 24) & 0xFF;
+ if (len >= 4)
+ return;
+ udelay(1);
+ }
+ timeout_error(flctl, __func__);
+}
+
+static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
+ int len, enum dma_data_direction dir)
+{
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct dma_chan *chan;
+ enum dma_transfer_direction tr_dir;
+ dma_addr_t dma_addr;
+ dma_cookie_t cookie;
+ uint32_t reg;
+ int ret = 0;
+ unsigned long time_left;
+
+ if (dir == DMA_FROM_DEVICE) {
+ chan = flctl->chan_fifo0_rx;
+ tr_dir = DMA_DEV_TO_MEM;
+ } else {
+ chan = flctl->chan_fifo0_tx;
+ tr_dir = DMA_MEM_TO_DEV;
+ }
+
+ dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
+
+ if (!dma_mapping_error(chan->device->dev, dma_addr))
+ desc = dmaengine_prep_slave_single(chan, dma_addr, len,
+ tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+ if (desc) {
+ reg = readl(FLINTDMACR(flctl));
+ reg |= DREQ0EN;
+ writel(reg, FLINTDMACR(flctl));
+
+ desc->callback = flctl_dma_complete;
+ desc->callback_param = flctl;
+ cookie = dmaengine_submit(desc);
+ if (dma_submit_error(cookie)) {
+ ret = dma_submit_error(cookie);
+ dev_warn(&flctl->pdev->dev,
+ "DMA submit failed, falling back to PIO\n");
+ goto out;
+ }
+
+ dma_async_issue_pending(chan);
+ } else {
+ /* DMA failed, fall back to PIO */
+ flctl_release_dma(flctl);
+ dev_warn(&flctl->pdev->dev,
+ "DMA failed, falling back to PIO\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ time_left =
+ wait_for_completion_timeout(&flctl->dma_complete,
+ msecs_to_jiffies(3000));
+
+ if (time_left == 0) {
+ dmaengine_terminate_all(chan);
+ dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
+ ret = -ETIMEDOUT;
+ }
+
+out:
+ reg = readl(FLINTDMACR(flctl));
+ reg &= ~DREQ0EN;
+ writel(reg, FLINTDMACR(flctl));
+
+ dma_unmap_single(chan->device->dev, dma_addr, len, dir);
+
+ /* ret == 0 is success */
+ return ret;
+}
+
+static void read_datareg(struct sh_flctl *flctl, int offset)
+{
+ unsigned long data;
+ unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
+
+ wait_completion(flctl);
+
+ data = readl(FLDATAR(flctl));
+ *buf = le32_to_cpu(data);
+}
+
+static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
+{
+ int i, len_4align;
+ unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
+
+ len_4align = (rlen + 3) / 4;
+
+ /* initiate DMA transfer */
+ if (flctl->chan_fifo0_rx && rlen >= 32 &&
+ !flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE))
+ goto convert; /* DMA success */
+
+ /* do polling transfer */
+ for (i = 0; i < len_4align; i++) {
+ wait_rfifo_ready(flctl);
+ buf[i] = readl(FLDTFIFO(flctl));
+ }
+
+convert:
+ for (i = 0; i < len_4align; i++)
+ buf[i] = be32_to_cpu(buf[i]);
+}
+
+static enum flctl_ecc_res_t read_ecfiforeg
+ (struct sh_flctl *flctl, uint8_t *buff, int sector)
+{
+ int i;
+ enum flctl_ecc_res_t res;
+ unsigned long *ecc_buf = (unsigned long *)buff;
+
+ res = wait_recfifo_ready(flctl , sector);
+
+ if (res != FL_ERROR) {
+ for (i = 0; i < 4; i++) {
+ ecc_buf[i] = readl(FLECFIFO(flctl));
+ ecc_buf[i] = be32_to_cpu(ecc_buf[i]);
+ }
+ }
+
+ return res;
+}
+
+static void write_fiforeg(struct sh_flctl *flctl, int rlen,
+ unsigned int offset)
+{
+ int i, len_4align;
+ unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
+
+ len_4align = (rlen + 3) / 4;
+ for (i = 0; i < len_4align; i++) {
+ wait_wfifo_ready(flctl);
+ writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl));
+ }
+}
+
+static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
+ unsigned int offset)
+{
+ int i, len_4align;
+ unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
+
+ len_4align = (rlen + 3) / 4;
+
+ for (i = 0; i < len_4align; i++)
+ buf[i] = cpu_to_be32(buf[i]);
+
+ /* initiate DMA transfer */
+ if (flctl->chan_fifo0_tx && rlen >= 32 &&
+ !flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE))
+ return; /* DMA success */
+
+ /* do polling transfer */
+ for (i = 0; i < len_4align; i++) {
+ wait_wecfifo_ready(flctl);
+ writel(buf[i], FLECFIFO(flctl));
+ }
+}
+
+static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT;
+ uint32_t flcmdcr_val, addr_len_bytes = 0;
+
+ /* Set SNAND bit if page size is 2048byte */
+ if (flctl->page_size)
+ flcmncr_val |= SNAND_E;
+ else
+ flcmncr_val &= ~SNAND_E;
+
+ /* default FLCMDCR val */
+ flcmdcr_val = DOCMD1_E | DOADR_E;
+
+ /* Set for FLCMDCR */
+ switch (cmd) {
+ case NAND_CMD_ERASE1:
+ addr_len_bytes = flctl->erase_ADRCNT;
+ flcmdcr_val |= DOCMD2_E;
+ break;
+ case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
+ case NAND_CMD_RNDOUT:
+ addr_len_bytes = flctl->rw_ADRCNT;
+ flcmdcr_val |= CDSRC_E;
+ if (flctl->chip.options & NAND_BUSWIDTH_16)
+ flcmncr_val |= SEL_16BIT;
+ break;
+ case NAND_CMD_SEQIN:
+ /* This case is that cmd is READ0 or READ1 or READ00 */
+ flcmdcr_val &= ~DOADR_E; /* ONLY execute 1st cmd */
+ break;
+ case NAND_CMD_PAGEPROG:
+ addr_len_bytes = flctl->rw_ADRCNT;
+ flcmdcr_val |= DOCMD2_E | CDSRC_E | SELRW;
+ if (flctl->chip.options & NAND_BUSWIDTH_16)
+ flcmncr_val |= SEL_16BIT;
+ break;
+ case NAND_CMD_READID:
+ flcmncr_val &= ~SNAND_E;
+ flcmdcr_val |= CDSRC_E;
+ addr_len_bytes = ADRCNT_1;
+ break;
+ case NAND_CMD_STATUS:
+ case NAND_CMD_RESET:
+ flcmncr_val &= ~SNAND_E;
+ flcmdcr_val &= ~(DOADR_E | DOSR_E);
+ break;
+ default:
+ break;
+ }
+
+ /* Set address bytes parameter */
+ flcmdcr_val |= addr_len_bytes;
+
+ /* Now actually write */
+ writel(flcmncr_val, FLCMNCR(flctl));
+ writel(flcmdcr_val, FLCMDCR(flctl));
+ writel(flcmcdr_val, FLCMCDR(flctl));
+}
+
+static int flctl_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
+ if (oob_required)
+ chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
+ return 0;
+}
+
+static int flctl_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
+ chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
+ return nand_prog_page_end_op(chip);
+}
+
+static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ int sector, page_sectors;
+ enum flctl_ecc_res_t ecc_result;
+
+ page_sectors = flctl->page_size ? 4 : 1;
+
+ set_cmd_regs(mtd, NAND_CMD_READ0,
+ (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
+
+ writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
+ FLCMNCR(flctl));
+ writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
+ writel(page_addr << 2, FLADR(flctl));
+
+ empty_fifo(flctl);
+ start_translation(flctl);
+
+ for (sector = 0; sector < page_sectors; sector++) {
+ read_fiforeg(flctl, 512, 512 * sector);
+
+ ecc_result = read_ecfiforeg(flctl,
+ &flctl->done_buff[mtd->writesize + 16 * sector],
+ sector);
+
+ switch (ecc_result) {
+ case FL_REPAIRABLE:
+ dev_info(&flctl->pdev->dev,
+ "applied ecc on page 0x%x", page_addr);
+ mtd->ecc_stats.corrected++;
+ break;
+ case FL_ERROR:
+ dev_warn(&flctl->pdev->dev,
+ "page 0x%x contains corrupted data\n",
+ page_addr);
+ mtd->ecc_stats.failed++;
+ break;
+ default:
+ ;
+ }
+ }
+
+ wait_completion(flctl);
+
+ writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT),
+ FLCMNCR(flctl));
+}
+
+static void execmd_read_oob(struct mtd_info *mtd, int page_addr)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ int page_sectors = flctl->page_size ? 4 : 1;
+ int i;
+
+ set_cmd_regs(mtd, NAND_CMD_READ0,
+ (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
+
+ empty_fifo(flctl);
+
+ for (i = 0; i < page_sectors; i++) {
+ set_addr(mtd, (512 + 16) * i + 512 , page_addr);
+ writel(16, FLDTCNTR(flctl));
+
+ start_translation(flctl);
+ read_fiforeg(flctl, 16, 16 * i);
+ wait_completion(flctl);
+ }
+}
+
+static void execmd_write_page_sector(struct mtd_info *mtd)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ int page_addr = flctl->seqin_page_addr;
+ int sector, page_sectors;
+
+ page_sectors = flctl->page_size ? 4 : 1;
+
+ set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
+ (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
+
+ empty_fifo(flctl);
+ writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
+ writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
+ writel(page_addr << 2, FLADR(flctl));
+ start_translation(flctl);
+
+ for (sector = 0; sector < page_sectors; sector++) {
+ write_fiforeg(flctl, 512, 512 * sector);
+ write_ec_fiforeg(flctl, 16, mtd->writesize + 16 * sector);
+ }
+
+ wait_completion(flctl);
+ writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl));
+}
+
+static void execmd_write_oob(struct mtd_info *mtd)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ int page_addr = flctl->seqin_page_addr;
+ int sector, page_sectors;
+
+ page_sectors = flctl->page_size ? 4 : 1;
+
+ set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
+ (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
+
+ for (sector = 0; sector < page_sectors; sector++) {
+ empty_fifo(flctl);
+ set_addr(mtd, sector * 528 + 512, page_addr);
+ writel(16, FLDTCNTR(flctl)); /* set read size */
+
+ start_translation(flctl);
+ write_fiforeg(flctl, 16, 16 * sector);
+ wait_completion(flctl);
+ }
+}
+
+static void flctl_cmdfunc(struct nand_chip *chip, unsigned int command,
+ int column, int page_addr)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ uint32_t read_cmd = 0;
+
+ pm_runtime_get_sync(&flctl->pdev->dev);
+
+ flctl->read_bytes = 0;
+ if (command != NAND_CMD_PAGEPROG)
+ flctl->index = 0;
+
+ switch (command) {
+ case NAND_CMD_READ1:
+ case NAND_CMD_READ0:
+ if (flctl->hwecc) {
+ /* read page with hwecc */
+ execmd_read_page_sector(mtd, page_addr);
+ break;
+ }
+ if (flctl->page_size)
+ set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
+ | command);
+ else
+ set_cmd_regs(mtd, command, command);
+
+ set_addr(mtd, 0, page_addr);
+
+ flctl->read_bytes = mtd->writesize + mtd->oobsize;
+ if (flctl->chip.options & NAND_BUSWIDTH_16)
+ column >>= 1;
+ flctl->index += column;
+ goto read_normal_exit;
+
+ case NAND_CMD_READOOB:
+ if (flctl->hwecc) {
+ /* read page with hwecc */
+ execmd_read_oob(mtd, page_addr);
+ break;
+ }
+
+ if (flctl->page_size) {
+ set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
+ | NAND_CMD_READ0);
+ set_addr(mtd, mtd->writesize, page_addr);
+ } else {
+ set_cmd_regs(mtd, command, command);
+ set_addr(mtd, 0, page_addr);
+ }
+ flctl->read_bytes = mtd->oobsize;
+ goto read_normal_exit;
+
+ case NAND_CMD_RNDOUT:
+ if (flctl->hwecc)
+ break;
+
+ if (flctl->page_size)
+ set_cmd_regs(mtd, command, (NAND_CMD_RNDOUTSTART << 8)
+ | command);
+ else
+ set_cmd_regs(mtd, command, command);
+
+ set_addr(mtd, column, 0);
+
+ flctl->read_bytes = mtd->writesize + mtd->oobsize - column;
+ goto read_normal_exit;
+
+ case NAND_CMD_READID:
+ set_cmd_regs(mtd, command, command);
+
+ /* READID is always performed using an 8-bit bus */
+ if (flctl->chip.options & NAND_BUSWIDTH_16)
+ column <<= 1;
+ set_addr(mtd, column, 0);
+
+ flctl->read_bytes = 8;
+ writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
+ empty_fifo(flctl);
+ start_translation(flctl);
+ read_fiforeg(flctl, flctl->read_bytes, 0);
+ wait_completion(flctl);
+ break;
+
+ case NAND_CMD_ERASE1:
+ flctl->erase1_page_addr = page_addr;
+ break;
+
+ case NAND_CMD_ERASE2:
+ set_cmd_regs(mtd, NAND_CMD_ERASE1,
+ (command << 8) | NAND_CMD_ERASE1);
+ set_addr(mtd, -1, flctl->erase1_page_addr);
+ start_translation(flctl);
+ wait_completion(flctl);
+ break;
+
+ case NAND_CMD_SEQIN:
+ if (!flctl->page_size) {
+ /* output read command */
+ if (column >= mtd->writesize) {
+ column -= mtd->writesize;
+ read_cmd = NAND_CMD_READOOB;
+ } else if (column < 256) {
+ read_cmd = NAND_CMD_READ0;
+ } else {
+ column -= 256;
+ read_cmd = NAND_CMD_READ1;
+ }
+ }
+ flctl->seqin_column = column;
+ flctl->seqin_page_addr = page_addr;
+ flctl->seqin_read_cmd = read_cmd;
+ break;
+
+ case NAND_CMD_PAGEPROG:
+ empty_fifo(flctl);
+ if (!flctl->page_size) {
+ set_cmd_regs(mtd, NAND_CMD_SEQIN,
+ flctl->seqin_read_cmd);
+ set_addr(mtd, -1, -1);
+ writel(0, FLDTCNTR(flctl)); /* set 0 size */
+ start_translation(flctl);
+ wait_completion(flctl);
+ }
+ if (flctl->hwecc) {
+ /* write page with hwecc */
+ if (flctl->seqin_column == mtd->writesize)
+ execmd_write_oob(mtd);
+ else if (!flctl->seqin_column)
+ execmd_write_page_sector(mtd);
+ else
+ pr_err("Invalid address !?\n");
+ break;
+ }
+ set_cmd_regs(mtd, command, (command << 8) | NAND_CMD_SEQIN);
+ set_addr(mtd, flctl->seqin_column, flctl->seqin_page_addr);
+ writel(flctl->index, FLDTCNTR(flctl)); /* set write size */
+ start_translation(flctl);
+ write_fiforeg(flctl, flctl->index, 0);
+ wait_completion(flctl);
+ break;
+
+ case NAND_CMD_STATUS:
+ set_cmd_regs(mtd, command, command);
+ set_addr(mtd, -1, -1);
+
+ flctl->read_bytes = 1;
+ writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
+ start_translation(flctl);
+ read_datareg(flctl, 0); /* read and end */
+ break;
+
+ case NAND_CMD_RESET:
+ set_cmd_regs(mtd, command, command);
+ set_addr(mtd, -1, -1);
+
+ writel(0, FLDTCNTR(flctl)); /* set 0 size */
+ start_translation(flctl);
+ wait_completion(flctl);
+ break;
+
+ default:
+ break;
+ }
+ goto runtime_exit;
+
+read_normal_exit:
+ writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
+ empty_fifo(flctl);
+ start_translation(flctl);
+ read_fiforeg(flctl, flctl->read_bytes, 0);
+ wait_completion(flctl);
+runtime_exit:
+ pm_runtime_put_sync(&flctl->pdev->dev);
+ return;
+}
+
+static void flctl_select_chip(struct nand_chip *chip, int chipnr)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
+ int ret;
+
+ switch (chipnr) {
+ case -1:
+ flctl->flcmncr_base &= ~CE0_ENABLE;
+
+ pm_runtime_get_sync(&flctl->pdev->dev);
+ writel(flctl->flcmncr_base, FLCMNCR(flctl));
+
+ if (flctl->qos_request) {
+ dev_pm_qos_remove_request(&flctl->pm_qos);
+ flctl->qos_request = 0;
+ }
+
+ pm_runtime_put_sync(&flctl->pdev->dev);
+ break;
+ case 0:
+ flctl->flcmncr_base |= CE0_ENABLE;
+
+ if (!flctl->qos_request) {
+ ret = dev_pm_qos_add_request(&flctl->pdev->dev,
+ &flctl->pm_qos,
+ DEV_PM_QOS_RESUME_LATENCY,
+ 100);
+ if (ret < 0)
+ dev_err(&flctl->pdev->dev,
+ "PM QoS request failed: %d\n", ret);
+ flctl->qos_request = 1;
+ }
+
+ if (flctl->holden) {
+ pm_runtime_get_sync(&flctl->pdev->dev);
+ writel(HOLDEN, FLHOLDCR(flctl));
+ pm_runtime_put_sync(&flctl->pdev->dev);
+ }
+ break;
+ default:
+ BUG();
+ }
+}
+
+static void flctl_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
+
+ memcpy(&flctl->done_buff[flctl->index], buf, len);
+ flctl->index += len;
+}
+
+static uint8_t flctl_read_byte(struct nand_chip *chip)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
+ uint8_t data;
+
+ data = flctl->done_buff[flctl->index];
+ flctl->index++;
+ return data;
+}
+
+static void flctl_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
+{
+ struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
+
+ memcpy(buf, &flctl->done_buff[flctl->index], len);
+ flctl->index += len;
+}
+
+static int flctl_chip_attach_chip(struct nand_chip *chip)
+{
+ u64 targetsize = nanddev_target_size(&chip->base);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+
+ /*
+ * NAND_BUSWIDTH_16 may have been set by nand_scan_ident().
+ * Add the SEL_16BIT flag in flctl->flcmncr_base.
+ */
+ if (chip->options & NAND_BUSWIDTH_16)
+ flctl->flcmncr_base |= SEL_16BIT;
+
+ if (mtd->writesize == 512) {
+ flctl->page_size = 0;
+ if (targetsize > (32 << 20)) {
+ /* big than 32MB */
+ flctl->rw_ADRCNT = ADRCNT_4;
+ flctl->erase_ADRCNT = ADRCNT_3;
+ } else if (targetsize > (2 << 16)) {
+ /* big than 128KB */
+ flctl->rw_ADRCNT = ADRCNT_3;
+ flctl->erase_ADRCNT = ADRCNT_2;
+ } else {
+ flctl->rw_ADRCNT = ADRCNT_2;
+ flctl->erase_ADRCNT = ADRCNT_1;
+ }
+ } else {
+ flctl->page_size = 1;
+ if (targetsize > (128 << 20)) {
+ /* big than 128MB */
+ flctl->rw_ADRCNT = ADRCNT2_E;
+ flctl->erase_ADRCNT = ADRCNT_3;
+ } else if (targetsize > (8 << 16)) {
+ /* big than 512KB */
+ flctl->rw_ADRCNT = ADRCNT_4;
+ flctl->erase_ADRCNT = ADRCNT_2;
+ } else {
+ flctl->rw_ADRCNT = ADRCNT_3;
+ flctl->erase_ADRCNT = ADRCNT_1;
+ }
+ }
+
+ if (flctl->hwecc) {
+ if (mtd->writesize == 512) {
+ mtd_set_ooblayout(mtd, &flctl_4secc_oob_smallpage_ops);
+ chip->badblock_pattern = &flctl_4secc_smallpage;
+ } else {
+ mtd_set_ooblayout(mtd, &flctl_4secc_oob_largepage_ops);
+ chip->badblock_pattern = &flctl_4secc_largepage;
+ }
+
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 10;
+ chip->ecc.strength = 4;
+ chip->ecc.read_page = flctl_read_page_hwecc;
+ chip->ecc.write_page = flctl_write_page_hwecc;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+
+ /* 4 symbols ECC enabled */
+ flctl->flcmncr_base |= _4ECCEN;
+ } else {
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops flctl_nand_controller_ops = {
+ .attach_chip = flctl_chip_attach_chip,
+};
+
+static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
+{
+ struct sh_flctl *flctl = dev_id;
+
+ dev_err(&flctl->pdev->dev, "flste irq: %x\n", readl(FLINTDMACR(flctl)));
+ writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
+
+ return IRQ_HANDLED;
+}
+
+struct flctl_soc_config {
+ unsigned long flcmncr_val;
+ unsigned has_hwecc:1;
+ unsigned use_holden:1;
+};
+
+static struct flctl_soc_config flctl_sh7372_config = {
+ .flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET | SHBUSSEL,
+ .has_hwecc = 1,
+ .use_holden = 1,
+};
+
+static const struct of_device_id of_flctl_match[] = {
+ { .compatible = "renesas,shmobile-flctl-sh7372",
+ .data = &flctl_sh7372_config },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_flctl_match);
+
+static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
+{
+ const struct flctl_soc_config *config;
+ struct sh_flctl_platform_data *pdata;
+
+ config = of_device_get_match_data(dev);
+ if (!config) {
+ dev_err(dev, "%s: no OF configuration attached\n", __func__);
+ return NULL;
+ }
+
+ pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data),
+ GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ /* set SoC specific options */
+ pdata->flcmncr_val = config->flcmncr_val;
+ pdata->has_hwecc = config->has_hwecc;
+ pdata->use_holden = config->use_holden;
+
+ return pdata;
+}
+
+static int flctl_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct sh_flctl *flctl;
+ struct mtd_info *flctl_mtd;
+ struct nand_chip *nand;
+ struct sh_flctl_platform_data *pdata;
+ int ret;
+ int irq;
+
+ flctl = devm_kzalloc(&pdev->dev, sizeof(struct sh_flctl), GFP_KERNEL);
+ if (!flctl)
+ return -ENOMEM;
+
+ flctl->reg = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(flctl->reg))
+ return PTR_ERR(flctl->reg);
+ flctl->fifo = res->start + 0x24; /* FLDTFIFO */
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED,
+ "flste", flctl);
+ if (ret) {
+ dev_err(&pdev->dev, "request interrupt failed.\n");
+ return ret;
+ }
+
+ if (pdev->dev.of_node)
+ pdata = flctl_parse_dt(&pdev->dev);
+ else
+ pdata = dev_get_platdata(&pdev->dev);
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "no setup data defined\n");
+ return -EINVAL;
+ }
+
+ platform_set_drvdata(pdev, flctl);
+ nand = &flctl->chip;
+ flctl_mtd = nand_to_mtd(nand);
+ nand_set_flash_node(nand, pdev->dev.of_node);
+ flctl_mtd->dev.parent = &pdev->dev;
+ flctl->pdev = pdev;
+ flctl->hwecc = pdata->has_hwecc;
+ flctl->holden = pdata->use_holden;
+ flctl->flcmncr_base = pdata->flcmncr_val;
+ flctl->flintdmacr_base = flctl->hwecc ? (STERINTE | ECERB) : STERINTE;
+
+ /* Set address of hardware control function */
+ /* 20 us command delay time */
+ nand->legacy.chip_delay = 20;
+
+ nand->legacy.read_byte = flctl_read_byte;
+ nand->legacy.write_buf = flctl_write_buf;
+ nand->legacy.read_buf = flctl_read_buf;
+ nand->legacy.select_chip = flctl_select_chip;
+ nand->legacy.cmdfunc = flctl_cmdfunc;
+ nand->legacy.set_features = nand_get_set_features_notsupp;
+ nand->legacy.get_features = nand_get_set_features_notsupp;
+
+ if (pdata->flcmncr_val & SEL_16BIT)
+ nand->options |= NAND_BUSWIDTH_16;
+
+ nand->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_resume(&pdev->dev);
+
+ flctl_setup_dma(flctl);
+
+ nand->legacy.dummy_controller.ops = &flctl_nand_controller_ops;
+ ret = nand_scan(nand, 1);
+ if (ret)
+ goto err_chip;
+
+ ret = mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts);
+ if (ret)
+ goto cleanup_nand;
+
+ return 0;
+
+cleanup_nand:
+ nand_cleanup(nand);
+err_chip:
+ flctl_release_dma(flctl);
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static void flctl_remove(struct platform_device *pdev)
+{
+ struct sh_flctl *flctl = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &flctl->chip;
+ int ret;
+
+ flctl_release_dma(flctl);
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ pm_runtime_disable(&pdev->dev);
+}
+
+static struct platform_driver flctl_driver = {
+ .remove_new = flctl_remove,
+ .driver = {
+ .name = "sh_flctl",
+ .of_match_table = of_flctl_match,
+ },
+};
+
+module_platform_driver_probe(flctl_driver, flctl_probe);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Yoshihiro Shimoda");
+MODULE_DESCRIPTION("SuperH FLCTL driver");
+MODULE_ALIAS("platform:sh_flctl");
diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c
new file mode 100644
index 0000000000..2402dc5465
--- /dev/null
+++ b/drivers/mtd/nand/raw/sharpsl.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004 Richard Purdie
+ * Copyright (C) 2008 Dmitry Baryshkov
+ *
+ * Based on Sharp's NAND driver sharp_sl.c
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/sharpsl.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+struct sharpsl_nand {
+ struct nand_controller controller;
+ struct nand_chip chip;
+
+ void __iomem *io;
+};
+
+static inline struct sharpsl_nand *mtd_to_sharpsl(struct mtd_info *mtd)
+{
+ return container_of(mtd_to_nand(mtd), struct sharpsl_nand, chip);
+}
+
+/* register offset */
+#define ECCLPLB 0x00 /* line parity 7 - 0 bit */
+#define ECCLPUB 0x04 /* line parity 15 - 8 bit */
+#define ECCCP 0x08 /* column parity 5 - 0 bit */
+#define ECCCNTR 0x0C /* ECC byte counter */
+#define ECCCLRR 0x10 /* cleare ECC */
+#define FLASHIO 0x14 /* Flash I/O */
+#define FLASHCTL 0x18 /* Flash Control */
+
+/* Flash control bit */
+#define FLRYBY (1 << 5)
+#define FLCE1 (1 << 4)
+#define FLWP (1 << 3)
+#define FLALE (1 << 2)
+#define FLCLE (1 << 1)
+#define FLCE0 (1 << 0)
+
+/*
+ * hardware specific access to control-lines
+ * ctrl:
+ * NAND_CNE: bit 0 -> ! bit 0 & 4
+ * NAND_CLE: bit 1 -> bit 1
+ * NAND_ALE: bit 2 -> bit 2
+ *
+ */
+static void sharpsl_nand_hwcontrol(struct nand_chip *chip, int cmd,
+ unsigned int ctrl)
+{
+ struct sharpsl_nand *sharpsl = mtd_to_sharpsl(nand_to_mtd(chip));
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ unsigned char bits = ctrl & 0x07;
+
+ bits |= (ctrl & 0x01) << 4;
+
+ bits ^= 0x11;
+
+ writeb((readb(sharpsl->io + FLASHCTL) & ~0x17) | bits, sharpsl->io + FLASHCTL);
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, chip->legacy.IO_ADDR_W);
+}
+
+static int sharpsl_nand_dev_ready(struct nand_chip *chip)
+{
+ struct sharpsl_nand *sharpsl = mtd_to_sharpsl(nand_to_mtd(chip));
+ return !((readb(sharpsl->io + FLASHCTL) & FLRYBY) == 0);
+}
+
+static void sharpsl_nand_enable_hwecc(struct nand_chip *chip, int mode)
+{
+ struct sharpsl_nand *sharpsl = mtd_to_sharpsl(nand_to_mtd(chip));
+ writeb(0, sharpsl->io + ECCCLRR);
+}
+
+static int sharpsl_nand_calculate_ecc(struct nand_chip *chip,
+ const u_char * dat, u_char * ecc_code)
+{
+ struct sharpsl_nand *sharpsl = mtd_to_sharpsl(nand_to_mtd(chip));
+ ecc_code[0] = ~readb(sharpsl->io + ECCLPUB);
+ ecc_code[1] = ~readb(sharpsl->io + ECCLPLB);
+ ecc_code[2] = (~readb(sharpsl->io + ECCCP) << 2) | 0x03;
+ return readb(sharpsl->io + ECCCNTR) != 0;
+}
+
+static int sharpsl_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
+ chip->ecc.hwctl = sharpsl_nand_enable_hwecc;
+ chip->ecc.calculate = sharpsl_nand_calculate_ecc;
+ chip->ecc.correct = rawnand_sw_hamming_correct;
+
+ return 0;
+}
+
+static const struct nand_controller_ops sharpsl_ops = {
+ .attach_chip = sharpsl_attach_chip,
+};
+
+/*
+ * Main initialization routine
+ */
+static int sharpsl_nand_probe(struct platform_device *pdev)
+{
+ struct nand_chip *this;
+ struct mtd_info *mtd;
+ struct resource *r;
+ int err = 0;
+ struct sharpsl_nand *sharpsl;
+ struct sharpsl_nand_platform_data *data = dev_get_platdata(&pdev->dev);
+
+ if (!data) {
+ dev_err(&pdev->dev, "no platform data!\n");
+ return -EINVAL;
+ }
+
+ /* Allocate memory for MTD device structure and private data */
+ sharpsl = kzalloc(sizeof(struct sharpsl_nand), GFP_KERNEL);
+ if (!sharpsl)
+ return -ENOMEM;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no io memory resource defined!\n");
+ err = -ENODEV;
+ goto err_get_res;
+ }
+
+ /* map physical address */
+ sharpsl->io = ioremap(r->start, resource_size(r));
+ if (!sharpsl->io) {
+ dev_err(&pdev->dev, "ioremap to access Sharp SL NAND chip failed\n");
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *)(&sharpsl->chip);
+
+ nand_controller_init(&sharpsl->controller);
+ sharpsl->controller.ops = &sharpsl_ops;
+ this->controller = &sharpsl->controller;
+
+ /* Link the private data with the MTD structure */
+ mtd = nand_to_mtd(this);
+ mtd->dev.parent = &pdev->dev;
+ mtd_set_ooblayout(mtd, data->ecc_layout);
+
+ platform_set_drvdata(pdev, sharpsl);
+
+ /*
+ * PXA initialize
+ */
+ writeb(readb(sharpsl->io + FLASHCTL) | FLWP, sharpsl->io + FLASHCTL);
+
+ /* Set address of NAND IO lines */
+ this->legacy.IO_ADDR_R = sharpsl->io + FLASHIO;
+ this->legacy.IO_ADDR_W = sharpsl->io + FLASHIO;
+ /* Set address of hardware control function */
+ this->legacy.cmd_ctrl = sharpsl_nand_hwcontrol;
+ this->legacy.dev_ready = sharpsl_nand_dev_ready;
+ /* 15 us command delay time */
+ this->legacy.chip_delay = 15;
+ this->badblock_pattern = data->badblock_pattern;
+
+ /* Scan to find existence of the device */
+ err = nand_scan(this, 1);
+ if (err)
+ goto err_scan;
+
+ /* Register the partitions */
+ mtd->name = "sharpsl-nand";
+
+ err = mtd_device_parse_register(mtd, data->part_parsers, NULL,
+ data->partitions, data->nr_partitions);
+ if (err)
+ goto err_add;
+
+ /* Return happy */
+ return 0;
+
+err_add:
+ nand_cleanup(this);
+
+err_scan:
+ iounmap(sharpsl->io);
+err_ioremap:
+err_get_res:
+ kfree(sharpsl);
+ return err;
+}
+
+/*
+ * Clean up routine
+ */
+static void sharpsl_nand_remove(struct platform_device *pdev)
+{
+ struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &sharpsl->chip;
+ int ret;
+
+ /* Unregister device */
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+
+ /* Release resources */
+ nand_cleanup(chip);
+
+ iounmap(sharpsl->io);
+
+ /* Free the driver's structure */
+ kfree(sharpsl);
+}
+
+static struct platform_driver sharpsl_nand_driver = {
+ .driver = {
+ .name = "sharpsl-nand",
+ },
+ .probe = sharpsl_nand_probe,
+ .remove_new = sharpsl_nand_remove,
+};
+
+module_platform_driver(sharpsl_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
+MODULE_DESCRIPTION("Device specific logic for NAND flash on Sharp SL-C7xx Series");
diff --git a/drivers/mtd/nand/raw/sm_common.c b/drivers/mtd/nand/raw/sm_common.c
new file mode 100644
index 0000000000..24f52a30fb
--- /dev/null
+++ b/drivers/mtd/nand/raw/sm_common.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * Common routines & support for xD format
+ */
+#include <linux/kernel.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/module.h>
+#include <linux/sizes.h>
+#include "sm_common.h"
+
+static int oob_sm_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 1)
+ return -ERANGE;
+
+ oobregion->length = 3;
+ oobregion->offset = ((section + 1) * 8) - 3;
+
+ return 0;
+}
+
+static int oob_sm_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ switch (section) {
+ case 0:
+ /* reserved */
+ oobregion->offset = 0;
+ oobregion->length = 4;
+ break;
+ case 1:
+ /* LBA1 */
+ oobregion->offset = 6;
+ oobregion->length = 2;
+ break;
+ case 2:
+ /* LBA2 */
+ oobregion->offset = 11;
+ oobregion->length = 2;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops oob_sm_ops = {
+ .ecc = oob_sm_ooblayout_ecc,
+ .free = oob_sm_ooblayout_free,
+};
+
+/* NOTE: This layout is not compatabable with SmartMedia, */
+/* because the 256 byte devices have page depenent oob layout */
+/* However it does preserve the bad block markers */
+/* If you use smftl, it will bypass this and work correctly */
+/* If you not, then you break SmartMedia compliance anyway */
+
+static int oob_sm_small_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = 3;
+ oobregion->offset = 0;
+
+ return 0;
+}
+
+static int oob_sm_small_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ switch (section) {
+ case 0:
+ /* reserved */
+ oobregion->offset = 3;
+ oobregion->length = 2;
+ break;
+ case 1:
+ /* LBA1 */
+ oobregion->offset = 6;
+ oobregion->length = 2;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops oob_sm_small_ops = {
+ .ecc = oob_sm_small_ooblayout_ecc,
+ .free = oob_sm_small_ooblayout_free,
+};
+
+static int sm_block_markbad(struct nand_chip *chip, loff_t ofs)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mtd_oob_ops ops = { };
+ struct sm_oob oob;
+ int ret;
+
+ memset(&oob, -1, SM_OOB_SIZE);
+ oob.block_status = 0x0F;
+
+ /* As long as this function is called on erase block boundaries
+ it will work correctly for 256 byte nand */
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooboffs = 0;
+ ops.ooblen = mtd->oobsize;
+ ops.oobbuf = (void *)&oob;
+ ops.datbuf = NULL;
+
+
+ ret = mtd_write_oob(mtd, ofs, &ops);
+ if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) {
+ pr_notice("sm_common: can't mark sector at %i as bad\n",
+ (int)ofs);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
+ LEGACY_ID_NAND("SmartMedia 2MiB 3,3V ROM", 0x5d, 2, SZ_8K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 4MiB 3,3V", 0xe3, 4, SZ_8K, 0),
+ LEGACY_ID_NAND("SmartMedia 4MiB 3,3/5V", 0xe5, 4, SZ_8K, 0),
+ LEGACY_ID_NAND("SmartMedia 4MiB 5V", 0x6b, 4, SZ_8K, 0),
+ LEGACY_ID_NAND("SmartMedia 4MiB 3,3V ROM", 0xd5, 4, SZ_8K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 8MiB 3,3V", 0xe6, 8, SZ_8K, 0),
+ LEGACY_ID_NAND("SmartMedia 8MiB 3,3V ROM", 0xd6, 8, SZ_8K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 16MiB 3,3V", 0x73, 16, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 16MiB 3,3V ROM", 0x57, 16, SZ_16K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 32MiB 3,3V", 0x75, 32, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 32MiB 3,3V ROM", 0x58, 32, SZ_16K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 64MiB 3,3V", 0x76, 64, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 64MiB 3,3V ROM", 0xd9, 64, SZ_16K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 128MiB 3,3V", 0x79, 128, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 128MiB 3,3V ROM", 0xda, 128, SZ_16K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 256MiB 3, 3V", 0x71, 256, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 256MiB 3,3V ROM", 0x5b, 256, SZ_16K, NAND_ROM),
+ {NULL}
+};
+
+static struct nand_flash_dev nand_xd_flash_ids[] = {
+ LEGACY_ID_NAND("xD 16MiB 3,3V", 0x73, 16, SZ_16K, 0),
+ LEGACY_ID_NAND("xD 32MiB 3,3V", 0x75, 32, SZ_16K, 0),
+ LEGACY_ID_NAND("xD 64MiB 3,3V", 0x76, 64, SZ_16K, 0),
+ LEGACY_ID_NAND("xD 128MiB 3,3V", 0x79, 128, SZ_16K, 0),
+ LEGACY_ID_NAND("xD 256MiB 3,3V", 0x71, 256, SZ_16K, NAND_BROKEN_XD),
+ LEGACY_ID_NAND("xD 512MiB 3,3V", 0xdc, 512, SZ_16K, NAND_BROKEN_XD),
+ LEGACY_ID_NAND("xD 1GiB 3,3V", 0xd3, 1024, SZ_16K, NAND_BROKEN_XD),
+ LEGACY_ID_NAND("xD 2GiB 3,3V", 0xd5, 2048, SZ_16K, NAND_BROKEN_XD),
+ {NULL}
+};
+
+static int sm_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /* Bad block marker position */
+ chip->badblockpos = 0x05;
+ chip->badblockbits = 7;
+ chip->legacy.block_markbad = sm_block_markbad;
+
+ /* ECC layout */
+ if (mtd->writesize == SM_SECTOR_SIZE)
+ mtd_set_ooblayout(mtd, &oob_sm_ops);
+ else if (mtd->writesize == SM_SMALL_PAGE)
+ mtd_set_ooblayout(mtd, &oob_sm_small_ops);
+ else
+ return -ENODEV;
+
+ return 0;
+}
+
+static const struct nand_controller_ops sm_controller_ops = {
+ .attach_chip = sm_attach_chip,
+};
+
+int sm_register_device(struct mtd_info *mtd, int smartmedia)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_flash_dev *flash_ids;
+ int ret;
+
+ chip->options |= NAND_SKIP_BBTSCAN;
+
+ /* Scan for card properties */
+ chip->legacy.dummy_controller.ops = &sm_controller_ops;
+ flash_ids = smartmedia ? nand_smartmedia_flash_ids : nand_xd_flash_ids;
+ ret = nand_scan_with_ids(chip, 1, flash_ids);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ nand_cleanup(chip);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sm_register_device);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
+MODULE_DESCRIPTION("Common SmartMedia/xD functions");
diff --git a/drivers/mtd/nand/raw/sm_common.h b/drivers/mtd/nand/raw/sm_common.h
new file mode 100644
index 0000000000..57fc9f86f9
--- /dev/null
+++ b/drivers/mtd/nand/raw/sm_common.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * Common routines & support for SmartMedia/xD format
+ */
+#include <linux/bitops.h>
+#include <linux/mtd/mtd.h>
+
+/* Full oob structure as written on the flash */
+struct sm_oob {
+ uint32_t reserved;
+ uint8_t data_status;
+ uint8_t block_status;
+ uint8_t lba_copy1[2];
+ uint8_t ecc2[3];
+ uint8_t lba_copy2[2];
+ uint8_t ecc1[3];
+} __packed;
+
+
+/* one sector is always 512 bytes, but it can consist of two nand pages */
+#define SM_SECTOR_SIZE 512
+
+/* oob area is also 16 bytes, but might be from two pages */
+#define SM_OOB_SIZE 16
+
+/* This is maximum zone size, and all devices that have more that one zone
+ have this size */
+#define SM_MAX_ZONE_SIZE 1024
+
+/* support for small page nand */
+#define SM_SMALL_PAGE 256
+#define SM_SMALL_OOB_SIZE 8
+
+
+int sm_register_device(struct mtd_info *mtd, int smartmedia);
+
+
+static inline int sm_sector_valid(struct sm_oob *oob)
+{
+ return hweight16(oob->data_status) >= 5;
+}
+
+static inline int sm_block_valid(struct sm_oob *oob)
+{
+ return hweight16(oob->block_status) >= 7;
+}
+
+static inline int sm_block_erased(struct sm_oob *oob)
+{
+ static const uint32_t erased_pattern[4] = {
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
+
+ /* First test for erased block */
+ if (!memcmp(oob, erased_pattern, sizeof(*oob)))
+ return 1;
+ return 0;
+}
diff --git a/drivers/mtd/nand/raw/socrates_nand.c b/drivers/mtd/nand/raw/socrates_nand.c
new file mode 100644
index 0000000000..76d50eb9f1
--- /dev/null
+++ b/drivers/mtd/nand/raw/socrates_nand.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2008 Ilya Yanok, Emcraft Systems
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#define FPGA_NAND_CMD_MASK (0x7 << 28)
+#define FPGA_NAND_CMD_COMMAND (0x0 << 28)
+#define FPGA_NAND_CMD_ADDR (0x1 << 28)
+#define FPGA_NAND_CMD_READ (0x2 << 28)
+#define FPGA_NAND_CMD_WRITE (0x3 << 28)
+#define FPGA_NAND_BUSY (0x1 << 15)
+#define FPGA_NAND_ENABLE (0x1 << 31)
+#define FPGA_NAND_DATA_SHIFT 16
+
+struct socrates_nand_host {
+ struct nand_controller controller;
+ struct nand_chip nand_chip;
+ void __iomem *io_base;
+ struct device *dev;
+};
+
+/**
+ * socrates_nand_write_buf - write buffer to chip
+ * @this: NAND chip object
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void socrates_nand_write_buf(struct nand_chip *this, const uint8_t *buf,
+ int len)
+{
+ int i;
+ struct socrates_nand_host *host = nand_get_controller_data(this);
+
+ for (i = 0; i < len; i++) {
+ out_be32(host->io_base, FPGA_NAND_ENABLE |
+ FPGA_NAND_CMD_WRITE |
+ (buf[i] << FPGA_NAND_DATA_SHIFT));
+ }
+}
+
+/**
+ * socrates_nand_read_buf - read chip data into buffer
+ * @this: NAND chip object
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void socrates_nand_read_buf(struct nand_chip *this, uint8_t *buf,
+ int len)
+{
+ int i;
+ struct socrates_nand_host *host = nand_get_controller_data(this);
+ uint32_t val;
+
+ val = FPGA_NAND_ENABLE | FPGA_NAND_CMD_READ;
+
+ out_be32(host->io_base, val);
+ for (i = 0; i < len; i++) {
+ buf[i] = (in_be32(host->io_base) >>
+ FPGA_NAND_DATA_SHIFT) & 0xff;
+ }
+}
+
+/**
+ * socrates_nand_read_byte - read one byte from the chip
+ * @mtd: MTD device structure
+ */
+static uint8_t socrates_nand_read_byte(struct nand_chip *this)
+{
+ uint8_t byte;
+ socrates_nand_read_buf(this, &byte, sizeof(byte));
+ return byte;
+}
+
+/*
+ * Hardware specific access to control-lines
+ */
+static void socrates_nand_cmd_ctrl(struct nand_chip *nand_chip, int cmd,
+ unsigned int ctrl)
+{
+ struct socrates_nand_host *host = nand_get_controller_data(nand_chip);
+ uint32_t val;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ val = FPGA_NAND_CMD_COMMAND;
+ else
+ val = FPGA_NAND_CMD_ADDR;
+
+ if (ctrl & NAND_NCE)
+ val |= FPGA_NAND_ENABLE;
+
+ val |= (cmd & 0xff) << FPGA_NAND_DATA_SHIFT;
+
+ out_be32(host->io_base, val);
+}
+
+/*
+ * Read the Device Ready pin.
+ */
+static int socrates_nand_device_ready(struct nand_chip *nand_chip)
+{
+ struct socrates_nand_host *host = nand_get_controller_data(nand_chip);
+
+ if (in_be32(host->io_base) & FPGA_NAND_BUSY)
+ return 0; /* busy */
+ return 1;
+}
+
+static int socrates_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+ chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ return 0;
+}
+
+static const struct nand_controller_ops socrates_ops = {
+ .attach_chip = socrates_attach_chip,
+};
+
+/*
+ * Probe for the NAND device.
+ */
+static int socrates_nand_probe(struct platform_device *ofdev)
+{
+ struct socrates_nand_host *host;
+ struct mtd_info *mtd;
+ struct nand_chip *nand_chip;
+ int res;
+
+ /* Allocate memory for the device structure (and zero it) */
+ host = devm_kzalloc(&ofdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->io_base = of_iomap(ofdev->dev.of_node, 0);
+ if (host->io_base == NULL) {
+ dev_err(&ofdev->dev, "ioremap failed\n");
+ return -EIO;
+ }
+
+ nand_chip = &host->nand_chip;
+ mtd = nand_to_mtd(nand_chip);
+ host->dev = &ofdev->dev;
+
+ nand_controller_init(&host->controller);
+ host->controller.ops = &socrates_ops;
+ nand_chip->controller = &host->controller;
+
+ /* link the private data structures */
+ nand_set_controller_data(nand_chip, host);
+ nand_set_flash_node(nand_chip, ofdev->dev.of_node);
+ mtd->name = "socrates_nand";
+ mtd->dev.parent = &ofdev->dev;
+
+ nand_chip->legacy.cmd_ctrl = socrates_nand_cmd_ctrl;
+ nand_chip->legacy.read_byte = socrates_nand_read_byte;
+ nand_chip->legacy.write_buf = socrates_nand_write_buf;
+ nand_chip->legacy.read_buf = socrates_nand_read_buf;
+ nand_chip->legacy.dev_ready = socrates_nand_device_ready;
+
+ /* TODO: I have no idea what real delay is. */
+ nand_chip->legacy.chip_delay = 20; /* 20us command delay time */
+
+ /*
+ * This driver assumes that the default ECC engine should be TYPE_SOFT.
+ * Set ->engine_type before registering the NAND devices in order to
+ * provide a driver specific default value.
+ */
+ nand_chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
+ dev_set_drvdata(&ofdev->dev, host);
+
+ res = nand_scan(nand_chip, 1);
+ if (res)
+ goto out;
+
+ res = mtd_device_register(mtd, NULL, 0);
+ if (!res)
+ return res;
+
+ nand_cleanup(nand_chip);
+
+out:
+ iounmap(host->io_base);
+ return res;
+}
+
+/*
+ * Remove a NAND device.
+ */
+static void socrates_nand_remove(struct platform_device *ofdev)
+{
+ struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev);
+ struct nand_chip *chip = &host->nand_chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+
+ iounmap(host->io_base);
+}
+
+static const struct of_device_id socrates_nand_match[] =
+{
+ {
+ .compatible = "abb,socrates-nand",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, socrates_nand_match);
+
+static struct platform_driver socrates_nand_driver = {
+ .driver = {
+ .name = "socrates_nand",
+ .of_match_table = socrates_nand_match,
+ },
+ .probe = socrates_nand_probe,
+ .remove_new = socrates_nand_remove,
+};
+
+module_platform_driver(socrates_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ilya Yanok");
+MODULE_DESCRIPTION("NAND driver for Socrates board");
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
new file mode 100644
index 0000000000..88811139aa
--- /dev/null
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -0,0 +1,2110 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2018
+ * Author: Christophe Kerello <christophe.kerello@st.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of_address.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+/* Bad block marker length */
+#define FMC2_BBM_LEN 2
+
+/* ECC step size */
+#define FMC2_ECC_STEP_SIZE 512
+
+/* BCHDSRx registers length */
+#define FMC2_BCHDSRS_LEN 20
+
+/* HECCR length */
+#define FMC2_HECCR_LEN 4
+
+/* Max requests done for a 8k nand page size */
+#define FMC2_MAX_SG 16
+
+/* Max chip enable */
+#define FMC2_MAX_CE 2
+
+/* Max ECC buffer length */
+#define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG)
+
+#define FMC2_TIMEOUT_MS 5000
+
+/* Timings */
+#define FMC2_THIZ 1
+#define FMC2_TIO 8000
+#define FMC2_TSYNC 3000
+#define FMC2_PCR_TIMING_MASK 0xf
+#define FMC2_PMEM_PATT_TIMING_MASK 0xff
+
+/* FMC2 Controller Registers */
+#define FMC2_BCR1 0x0
+#define FMC2_PCR 0x80
+#define FMC2_SR 0x84
+#define FMC2_PMEM 0x88
+#define FMC2_PATT 0x8c
+#define FMC2_HECCR 0x94
+#define FMC2_ISR 0x184
+#define FMC2_ICR 0x188
+#define FMC2_CSQCR 0x200
+#define FMC2_CSQCFGR1 0x204
+#define FMC2_CSQCFGR2 0x208
+#define FMC2_CSQCFGR3 0x20c
+#define FMC2_CSQAR1 0x210
+#define FMC2_CSQAR2 0x214
+#define FMC2_CSQIER 0x220
+#define FMC2_CSQISR 0x224
+#define FMC2_CSQICR 0x228
+#define FMC2_CSQEMSR 0x230
+#define FMC2_BCHIER 0x250
+#define FMC2_BCHISR 0x254
+#define FMC2_BCHICR 0x258
+#define FMC2_BCHPBR1 0x260
+#define FMC2_BCHPBR2 0x264
+#define FMC2_BCHPBR3 0x268
+#define FMC2_BCHPBR4 0x26c
+#define FMC2_BCHDSR0 0x27c
+#define FMC2_BCHDSR1 0x280
+#define FMC2_BCHDSR2 0x284
+#define FMC2_BCHDSR3 0x288
+#define FMC2_BCHDSR4 0x28c
+
+/* Register: FMC2_BCR1 */
+#define FMC2_BCR1_FMC2EN BIT(31)
+
+/* Register: FMC2_PCR */
+#define FMC2_PCR_PWAITEN BIT(1)
+#define FMC2_PCR_PBKEN BIT(2)
+#define FMC2_PCR_PWID GENMASK(5, 4)
+#define FMC2_PCR_PWID_BUSWIDTH_8 0
+#define FMC2_PCR_PWID_BUSWIDTH_16 1
+#define FMC2_PCR_ECCEN BIT(6)
+#define FMC2_PCR_ECCALG BIT(8)
+#define FMC2_PCR_TCLR GENMASK(12, 9)
+#define FMC2_PCR_TCLR_DEFAULT 0xf
+#define FMC2_PCR_TAR GENMASK(16, 13)
+#define FMC2_PCR_TAR_DEFAULT 0xf
+#define FMC2_PCR_ECCSS GENMASK(19, 17)
+#define FMC2_PCR_ECCSS_512 1
+#define FMC2_PCR_ECCSS_2048 3
+#define FMC2_PCR_BCHECC BIT(24)
+#define FMC2_PCR_WEN BIT(25)
+
+/* Register: FMC2_SR */
+#define FMC2_SR_NWRF BIT(6)
+
+/* Register: FMC2_PMEM */
+#define FMC2_PMEM_MEMSET GENMASK(7, 0)
+#define FMC2_PMEM_MEMWAIT GENMASK(15, 8)
+#define FMC2_PMEM_MEMHOLD GENMASK(23, 16)
+#define FMC2_PMEM_MEMHIZ GENMASK(31, 24)
+#define FMC2_PMEM_DEFAULT 0x0a0a0a0a
+
+/* Register: FMC2_PATT */
+#define FMC2_PATT_ATTSET GENMASK(7, 0)
+#define FMC2_PATT_ATTWAIT GENMASK(15, 8)
+#define FMC2_PATT_ATTHOLD GENMASK(23, 16)
+#define FMC2_PATT_ATTHIZ GENMASK(31, 24)
+#define FMC2_PATT_DEFAULT 0x0a0a0a0a
+
+/* Register: FMC2_ISR */
+#define FMC2_ISR_IHLF BIT(1)
+
+/* Register: FMC2_ICR */
+#define FMC2_ICR_CIHLF BIT(1)
+
+/* Register: FMC2_CSQCR */
+#define FMC2_CSQCR_CSQSTART BIT(0)
+
+/* Register: FMC2_CSQCFGR1 */
+#define FMC2_CSQCFGR1_CMD2EN BIT(1)
+#define FMC2_CSQCFGR1_DMADEN BIT(2)
+#define FMC2_CSQCFGR1_ACYNBR GENMASK(6, 4)
+#define FMC2_CSQCFGR1_CMD1 GENMASK(15, 8)
+#define FMC2_CSQCFGR1_CMD2 GENMASK(23, 16)
+#define FMC2_CSQCFGR1_CMD1T BIT(24)
+#define FMC2_CSQCFGR1_CMD2T BIT(25)
+
+/* Register: FMC2_CSQCFGR2 */
+#define FMC2_CSQCFGR2_SQSDTEN BIT(0)
+#define FMC2_CSQCFGR2_RCMD2EN BIT(1)
+#define FMC2_CSQCFGR2_DMASEN BIT(2)
+#define FMC2_CSQCFGR2_RCMD1 GENMASK(15, 8)
+#define FMC2_CSQCFGR2_RCMD2 GENMASK(23, 16)
+#define FMC2_CSQCFGR2_RCMD1T BIT(24)
+#define FMC2_CSQCFGR2_RCMD2T BIT(25)
+
+/* Register: FMC2_CSQCFGR3 */
+#define FMC2_CSQCFGR3_SNBR GENMASK(13, 8)
+#define FMC2_CSQCFGR3_AC1T BIT(16)
+#define FMC2_CSQCFGR3_AC2T BIT(17)
+#define FMC2_CSQCFGR3_AC3T BIT(18)
+#define FMC2_CSQCFGR3_AC4T BIT(19)
+#define FMC2_CSQCFGR3_AC5T BIT(20)
+#define FMC2_CSQCFGR3_SDT BIT(21)
+#define FMC2_CSQCFGR3_RAC1T BIT(22)
+#define FMC2_CSQCFGR3_RAC2T BIT(23)
+
+/* Register: FMC2_CSQCAR1 */
+#define FMC2_CSQCAR1_ADDC1 GENMASK(7, 0)
+#define FMC2_CSQCAR1_ADDC2 GENMASK(15, 8)
+#define FMC2_CSQCAR1_ADDC3 GENMASK(23, 16)
+#define FMC2_CSQCAR1_ADDC4 GENMASK(31, 24)
+
+/* Register: FMC2_CSQCAR2 */
+#define FMC2_CSQCAR2_ADDC5 GENMASK(7, 0)
+#define FMC2_CSQCAR2_NANDCEN GENMASK(11, 10)
+#define FMC2_CSQCAR2_SAO GENMASK(31, 16)
+
+/* Register: FMC2_CSQIER */
+#define FMC2_CSQIER_TCIE BIT(0)
+
+/* Register: FMC2_CSQICR */
+#define FMC2_CSQICR_CLEAR_IRQ GENMASK(4, 0)
+
+/* Register: FMC2_CSQEMSR */
+#define FMC2_CSQEMSR_SEM GENMASK(15, 0)
+
+/* Register: FMC2_BCHIER */
+#define FMC2_BCHIER_DERIE BIT(1)
+#define FMC2_BCHIER_EPBRIE BIT(4)
+
+/* Register: FMC2_BCHICR */
+#define FMC2_BCHICR_CLEAR_IRQ GENMASK(4, 0)
+
+/* Register: FMC2_BCHDSR0 */
+#define FMC2_BCHDSR0_DUE BIT(0)
+#define FMC2_BCHDSR0_DEF BIT(1)
+#define FMC2_BCHDSR0_DEN GENMASK(7, 4)
+
+/* Register: FMC2_BCHDSR1 */
+#define FMC2_BCHDSR1_EBP1 GENMASK(12, 0)
+#define FMC2_BCHDSR1_EBP2 GENMASK(28, 16)
+
+/* Register: FMC2_BCHDSR2 */
+#define FMC2_BCHDSR2_EBP3 GENMASK(12, 0)
+#define FMC2_BCHDSR2_EBP4 GENMASK(28, 16)
+
+/* Register: FMC2_BCHDSR3 */
+#define FMC2_BCHDSR3_EBP5 GENMASK(12, 0)
+#define FMC2_BCHDSR3_EBP6 GENMASK(28, 16)
+
+/* Register: FMC2_BCHDSR4 */
+#define FMC2_BCHDSR4_EBP7 GENMASK(12, 0)
+#define FMC2_BCHDSR4_EBP8 GENMASK(28, 16)
+
+enum stm32_fmc2_ecc {
+ FMC2_ECC_HAM = 1,
+ FMC2_ECC_BCH4 = 4,
+ FMC2_ECC_BCH8 = 8
+};
+
+enum stm32_fmc2_irq_state {
+ FMC2_IRQ_UNKNOWN = 0,
+ FMC2_IRQ_BCH,
+ FMC2_IRQ_SEQ
+};
+
+struct stm32_fmc2_timings {
+ u8 tclr;
+ u8 tar;
+ u8 thiz;
+ u8 twait;
+ u8 thold_mem;
+ u8 tset_mem;
+ u8 thold_att;
+ u8 tset_att;
+};
+
+struct stm32_fmc2_nand {
+ struct nand_chip chip;
+ struct gpio_desc *wp_gpio;
+ struct stm32_fmc2_timings timings;
+ int ncs;
+ int cs_used[FMC2_MAX_CE];
+};
+
+static inline struct stm32_fmc2_nand *to_fmc2_nand(struct nand_chip *chip)
+{
+ return container_of(chip, struct stm32_fmc2_nand, chip);
+}
+
+struct stm32_fmc2_nfc {
+ struct nand_controller base;
+ struct stm32_fmc2_nand nand;
+ struct device *dev;
+ struct device *cdev;
+ struct regmap *regmap;
+ void __iomem *data_base[FMC2_MAX_CE];
+ void __iomem *cmd_base[FMC2_MAX_CE];
+ void __iomem *addr_base[FMC2_MAX_CE];
+ phys_addr_t io_phys_addr;
+ phys_addr_t data_phys_addr[FMC2_MAX_CE];
+ struct clk *clk;
+ u8 irq_state;
+
+ struct dma_chan *dma_tx_ch;
+ struct dma_chan *dma_rx_ch;
+ struct dma_chan *dma_ecc_ch;
+ struct sg_table dma_data_sg;
+ struct sg_table dma_ecc_sg;
+ u8 *ecc_buf;
+ int dma_ecc_len;
+
+ struct completion complete;
+ struct completion dma_data_complete;
+ struct completion dma_ecc_complete;
+
+ u8 cs_assigned;
+ int cs_sel;
+};
+
+static inline struct stm32_fmc2_nfc *to_stm32_nfc(struct nand_controller *base)
+{
+ return container_of(base, struct stm32_fmc2_nfc, base);
+}
+
+static void stm32_fmc2_nfc_timings_init(struct nand_chip *chip)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
+ struct stm32_fmc2_timings *timings = &nand->timings;
+ u32 pmem, patt;
+
+ /* Set tclr/tar timings */
+ regmap_update_bits(nfc->regmap, FMC2_PCR,
+ FMC2_PCR_TCLR | FMC2_PCR_TAR,
+ FIELD_PREP(FMC2_PCR_TCLR, timings->tclr) |
+ FIELD_PREP(FMC2_PCR_TAR, timings->tar));
+
+ /* Set tset/twait/thold/thiz timings in common bank */
+ pmem = FIELD_PREP(FMC2_PMEM_MEMSET, timings->tset_mem);
+ pmem |= FIELD_PREP(FMC2_PMEM_MEMWAIT, timings->twait);
+ pmem |= FIELD_PREP(FMC2_PMEM_MEMHOLD, timings->thold_mem);
+ pmem |= FIELD_PREP(FMC2_PMEM_MEMHIZ, timings->thiz);
+ regmap_write(nfc->regmap, FMC2_PMEM, pmem);
+
+ /* Set tset/twait/thold/thiz timings in attribut bank */
+ patt = FIELD_PREP(FMC2_PATT_ATTSET, timings->tset_att);
+ patt |= FIELD_PREP(FMC2_PATT_ATTWAIT, timings->twait);
+ patt |= FIELD_PREP(FMC2_PATT_ATTHOLD, timings->thold_att);
+ patt |= FIELD_PREP(FMC2_PATT_ATTHIZ, timings->thiz);
+ regmap_write(nfc->regmap, FMC2_PATT, patt);
+}
+
+static void stm32_fmc2_nfc_setup(struct nand_chip *chip)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ u32 pcr = 0, pcr_mask;
+
+ /* Configure ECC algorithm (default configuration is Hamming) */
+ pcr_mask = FMC2_PCR_ECCALG;
+ pcr_mask |= FMC2_PCR_BCHECC;
+ if (chip->ecc.strength == FMC2_ECC_BCH8) {
+ pcr |= FMC2_PCR_ECCALG;
+ pcr |= FMC2_PCR_BCHECC;
+ } else if (chip->ecc.strength == FMC2_ECC_BCH4) {
+ pcr |= FMC2_PCR_ECCALG;
+ }
+
+ /* Set buswidth */
+ pcr_mask |= FMC2_PCR_PWID;
+ if (chip->options & NAND_BUSWIDTH_16)
+ pcr |= FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_16);
+
+ /* Set ECC sector size */
+ pcr_mask |= FMC2_PCR_ECCSS;
+ pcr |= FIELD_PREP(FMC2_PCR_ECCSS, FMC2_PCR_ECCSS_512);
+
+ regmap_update_bits(nfc->regmap, FMC2_PCR, pcr_mask, pcr);
+}
+
+static int stm32_fmc2_nfc_select_chip(struct nand_chip *chip, int chipnr)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
+ struct dma_slave_config dma_cfg;
+ int ret;
+
+ if (nand->cs_used[chipnr] == nfc->cs_sel)
+ return 0;
+
+ nfc->cs_sel = nand->cs_used[chipnr];
+ stm32_fmc2_nfc_setup(chip);
+ stm32_fmc2_nfc_timings_init(chip);
+
+ if (nfc->dma_tx_ch && nfc->dma_rx_ch) {
+ memset(&dma_cfg, 0, sizeof(dma_cfg));
+ dma_cfg.src_addr = nfc->data_phys_addr[nfc->cs_sel];
+ dma_cfg.dst_addr = nfc->data_phys_addr[nfc->cs_sel];
+ dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_cfg.src_maxburst = 32;
+ dma_cfg.dst_maxburst = 32;
+
+ ret = dmaengine_slave_config(nfc->dma_tx_ch, &dma_cfg);
+ if (ret) {
+ dev_err(nfc->dev, "tx DMA engine slave config failed\n");
+ return ret;
+ }
+
+ ret = dmaengine_slave_config(nfc->dma_rx_ch, &dma_cfg);
+ if (ret) {
+ dev_err(nfc->dev, "rx DMA engine slave config failed\n");
+ return ret;
+ }
+ }
+
+ if (nfc->dma_ecc_ch) {
+ /*
+ * Hamming: we read HECCR register
+ * BCH4/BCH8: we read BCHDSRSx registers
+ */
+ memset(&dma_cfg, 0, sizeof(dma_cfg));
+ dma_cfg.src_addr = nfc->io_phys_addr;
+ dma_cfg.src_addr += chip->ecc.strength == FMC2_ECC_HAM ?
+ FMC2_HECCR : FMC2_BCHDSR0;
+ dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ ret = dmaengine_slave_config(nfc->dma_ecc_ch, &dma_cfg);
+ if (ret) {
+ dev_err(nfc->dev, "ECC DMA engine slave config failed\n");
+ return ret;
+ }
+
+ /* Calculate ECC length needed for one sector */
+ nfc->dma_ecc_len = chip->ecc.strength == FMC2_ECC_HAM ?
+ FMC2_HECCR_LEN : FMC2_BCHDSRS_LEN;
+ }
+
+ return 0;
+}
+
+static void stm32_fmc2_nfc_set_buswidth_16(struct stm32_fmc2_nfc *nfc, bool set)
+{
+ u32 pcr;
+
+ pcr = set ? FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_16) :
+ FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_8);
+
+ regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_PWID, pcr);
+}
+
+static void stm32_fmc2_nfc_set_ecc(struct stm32_fmc2_nfc *nfc, bool enable)
+{
+ regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_ECCEN,
+ enable ? FMC2_PCR_ECCEN : 0);
+}
+
+static void stm32_fmc2_nfc_enable_seq_irq(struct stm32_fmc2_nfc *nfc)
+{
+ nfc->irq_state = FMC2_IRQ_SEQ;
+
+ regmap_update_bits(nfc->regmap, FMC2_CSQIER,
+ FMC2_CSQIER_TCIE, FMC2_CSQIER_TCIE);
+}
+
+static void stm32_fmc2_nfc_disable_seq_irq(struct stm32_fmc2_nfc *nfc)
+{
+ regmap_update_bits(nfc->regmap, FMC2_CSQIER, FMC2_CSQIER_TCIE, 0);
+
+ nfc->irq_state = FMC2_IRQ_UNKNOWN;
+}
+
+static void stm32_fmc2_nfc_clear_seq_irq(struct stm32_fmc2_nfc *nfc)
+{
+ regmap_write(nfc->regmap, FMC2_CSQICR, FMC2_CSQICR_CLEAR_IRQ);
+}
+
+static void stm32_fmc2_nfc_enable_bch_irq(struct stm32_fmc2_nfc *nfc, int mode)
+{
+ nfc->irq_state = FMC2_IRQ_BCH;
+
+ if (mode == NAND_ECC_WRITE)
+ regmap_update_bits(nfc->regmap, FMC2_BCHIER,
+ FMC2_BCHIER_EPBRIE, FMC2_BCHIER_EPBRIE);
+ else
+ regmap_update_bits(nfc->regmap, FMC2_BCHIER,
+ FMC2_BCHIER_DERIE, FMC2_BCHIER_DERIE);
+}
+
+static void stm32_fmc2_nfc_disable_bch_irq(struct stm32_fmc2_nfc *nfc)
+{
+ regmap_update_bits(nfc->regmap, FMC2_BCHIER,
+ FMC2_BCHIER_DERIE | FMC2_BCHIER_EPBRIE, 0);
+
+ nfc->irq_state = FMC2_IRQ_UNKNOWN;
+}
+
+static void stm32_fmc2_nfc_clear_bch_irq(struct stm32_fmc2_nfc *nfc)
+{
+ regmap_write(nfc->regmap, FMC2_BCHICR, FMC2_BCHICR_CLEAR_IRQ);
+}
+
+/*
+ * Enable ECC logic and reset syndrome/parity bits previously calculated
+ * Syndrome/parity bits is cleared by setting the ECCEN bit to 0
+ */
+static void stm32_fmc2_nfc_hwctl(struct nand_chip *chip, int mode)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+
+ stm32_fmc2_nfc_set_ecc(nfc, false);
+
+ if (chip->ecc.strength != FMC2_ECC_HAM) {
+ regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_WEN,
+ mode == NAND_ECC_WRITE ? FMC2_PCR_WEN : 0);
+
+ reinit_completion(&nfc->complete);
+ stm32_fmc2_nfc_clear_bch_irq(nfc);
+ stm32_fmc2_nfc_enable_bch_irq(nfc, mode);
+ }
+
+ stm32_fmc2_nfc_set_ecc(nfc, true);
+}
+
+/*
+ * ECC Hamming calculation
+ * ECC is 3 bytes for 512 bytes of data (supports error correction up to
+ * max of 1-bit)
+ */
+static void stm32_fmc2_nfc_ham_set_ecc(const u32 ecc_sta, u8 *ecc)
+{
+ ecc[0] = ecc_sta;
+ ecc[1] = ecc_sta >> 8;
+ ecc[2] = ecc_sta >> 16;
+}
+
+static int stm32_fmc2_nfc_ham_calculate(struct nand_chip *chip, const u8 *data,
+ u8 *ecc)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ u32 sr, heccr;
+ int ret;
+
+ ret = regmap_read_poll_timeout(nfc->regmap, FMC2_SR, sr,
+ sr & FMC2_SR_NWRF, 1,
+ 1000 * FMC2_TIMEOUT_MS);
+ if (ret) {
+ dev_err(nfc->dev, "ham timeout\n");
+ return ret;
+ }
+
+ regmap_read(nfc->regmap, FMC2_HECCR, &heccr);
+ stm32_fmc2_nfc_ham_set_ecc(heccr, ecc);
+ stm32_fmc2_nfc_set_ecc(nfc, false);
+
+ return 0;
+}
+
+static int stm32_fmc2_nfc_ham_correct(struct nand_chip *chip, u8 *dat,
+ u8 *read_ecc, u8 *calc_ecc)
+{
+ u8 bit_position = 0, b0, b1, b2;
+ u32 byte_addr = 0, b;
+ u32 i, shifting = 1;
+
+ /* Indicate which bit and byte is faulty (if any) */
+ b0 = read_ecc[0] ^ calc_ecc[0];
+ b1 = read_ecc[1] ^ calc_ecc[1];
+ b2 = read_ecc[2] ^ calc_ecc[2];
+ b = b0 | (b1 << 8) | (b2 << 16);
+
+ /* No errors */
+ if (likely(!b))
+ return 0;
+
+ /* Calculate bit position */
+ for (i = 0; i < 3; i++) {
+ switch (b % 4) {
+ case 2:
+ bit_position += shifting;
+ break;
+ case 1:
+ break;
+ default:
+ return -EBADMSG;
+ }
+ shifting <<= 1;
+ b >>= 2;
+ }
+
+ /* Calculate byte position */
+ shifting = 1;
+ for (i = 0; i < 9; i++) {
+ switch (b % 4) {
+ case 2:
+ byte_addr += shifting;
+ break;
+ case 1:
+ break;
+ default:
+ return -EBADMSG;
+ }
+ shifting <<= 1;
+ b >>= 2;
+ }
+
+ /* Flip the bit */
+ dat[byte_addr] ^= (1 << bit_position);
+
+ return 1;
+}
+
+/*
+ * ECC BCH calculation and correction
+ * ECC is 7/13 bytes for 512 bytes of data (supports error correction up to
+ * max of 4-bit/8-bit)
+ */
+static int stm32_fmc2_nfc_bch_calculate(struct nand_chip *chip, const u8 *data,
+ u8 *ecc)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ u32 bchpbr;
+
+ /* Wait until the BCH code is ready */
+ if (!wait_for_completion_timeout(&nfc->complete,
+ msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
+ dev_err(nfc->dev, "bch timeout\n");
+ stm32_fmc2_nfc_disable_bch_irq(nfc);
+ return -ETIMEDOUT;
+ }
+
+ /* Read parity bits */
+ regmap_read(nfc->regmap, FMC2_BCHPBR1, &bchpbr);
+ ecc[0] = bchpbr;
+ ecc[1] = bchpbr >> 8;
+ ecc[2] = bchpbr >> 16;
+ ecc[3] = bchpbr >> 24;
+
+ regmap_read(nfc->regmap, FMC2_BCHPBR2, &bchpbr);
+ ecc[4] = bchpbr;
+ ecc[5] = bchpbr >> 8;
+ ecc[6] = bchpbr >> 16;
+
+ if (chip->ecc.strength == FMC2_ECC_BCH8) {
+ ecc[7] = bchpbr >> 24;
+
+ regmap_read(nfc->regmap, FMC2_BCHPBR3, &bchpbr);
+ ecc[8] = bchpbr;
+ ecc[9] = bchpbr >> 8;
+ ecc[10] = bchpbr >> 16;
+ ecc[11] = bchpbr >> 24;
+
+ regmap_read(nfc->regmap, FMC2_BCHPBR4, &bchpbr);
+ ecc[12] = bchpbr;
+ }
+
+ stm32_fmc2_nfc_set_ecc(nfc, false);
+
+ return 0;
+}
+
+static int stm32_fmc2_nfc_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta)
+{
+ u32 bchdsr0 = ecc_sta[0];
+ u32 bchdsr1 = ecc_sta[1];
+ u32 bchdsr2 = ecc_sta[2];
+ u32 bchdsr3 = ecc_sta[3];
+ u32 bchdsr4 = ecc_sta[4];
+ u16 pos[8];
+ int i, den;
+ unsigned int nb_errs = 0;
+
+ /* No errors found */
+ if (likely(!(bchdsr0 & FMC2_BCHDSR0_DEF)))
+ return 0;
+
+ /* Too many errors detected */
+ if (unlikely(bchdsr0 & FMC2_BCHDSR0_DUE))
+ return -EBADMSG;
+
+ pos[0] = FIELD_GET(FMC2_BCHDSR1_EBP1, bchdsr1);
+ pos[1] = FIELD_GET(FMC2_BCHDSR1_EBP2, bchdsr1);
+ pos[2] = FIELD_GET(FMC2_BCHDSR2_EBP3, bchdsr2);
+ pos[3] = FIELD_GET(FMC2_BCHDSR2_EBP4, bchdsr2);
+ pos[4] = FIELD_GET(FMC2_BCHDSR3_EBP5, bchdsr3);
+ pos[5] = FIELD_GET(FMC2_BCHDSR3_EBP6, bchdsr3);
+ pos[6] = FIELD_GET(FMC2_BCHDSR4_EBP7, bchdsr4);
+ pos[7] = FIELD_GET(FMC2_BCHDSR4_EBP8, bchdsr4);
+
+ den = FIELD_GET(FMC2_BCHDSR0_DEN, bchdsr0);
+ for (i = 0; i < den; i++) {
+ if (pos[i] < eccsize * 8) {
+ change_bit(pos[i], (unsigned long *)dat);
+ nb_errs++;
+ }
+ }
+
+ return nb_errs;
+}
+
+static int stm32_fmc2_nfc_bch_correct(struct nand_chip *chip, u8 *dat,
+ u8 *read_ecc, u8 *calc_ecc)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ u32 ecc_sta[5];
+
+ /* Wait until the decoding error is ready */
+ if (!wait_for_completion_timeout(&nfc->complete,
+ msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
+ dev_err(nfc->dev, "bch timeout\n");
+ stm32_fmc2_nfc_disable_bch_irq(nfc);
+ return -ETIMEDOUT;
+ }
+
+ regmap_bulk_read(nfc->regmap, FMC2_BCHDSR0, ecc_sta, 5);
+
+ stm32_fmc2_nfc_set_ecc(nfc, false);
+
+ return stm32_fmc2_nfc_bch_decode(chip->ecc.size, dat, ecc_sta);
+}
+
+static int stm32_fmc2_nfc_read_page(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret, i, s, stat, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ int eccstrength = chip->ecc.strength;
+ u8 *p = buf;
+ u8 *ecc_calc = chip->ecc.calc_buf;
+ u8 *ecc_code = chip->ecc.code_buf;
+ unsigned int max_bitflips = 0;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (i = mtd->writesize + FMC2_BBM_LEN, s = 0; s < eccsteps;
+ s++, i += eccbytes, p += eccsize) {
+ chip->ecc.hwctl(chip, NAND_ECC_READ);
+
+ /* Read the nand page sector (512 bytes) */
+ ret = nand_change_read_column_op(chip, s * eccsize, p,
+ eccsize, false);
+ if (ret)
+ return ret;
+
+ /* Read the corresponding ECC bytes */
+ ret = nand_change_read_column_op(chip, i, ecc_code,
+ eccbytes, false);
+ if (ret)
+ return ret;
+
+ /* Correct the data */
+ stat = chip->ecc.correct(chip, p, ecc_code, ecc_calc);
+ if (stat == -EBADMSG)
+ /* Check for empty pages with bitflips */
+ stat = nand_check_erased_ecc_chunk(p, eccsize,
+ ecc_code, eccbytes,
+ NULL, 0,
+ eccstrength);
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+
+ /* Read oob */
+ if (oob_required) {
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ return max_bitflips;
+}
+
+/* Sequencer read/write configuration */
+static void stm32_fmc2_nfc_rw_page_init(struct nand_chip *chip, int page,
+ int raw, bool write_data)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 ecc_offset = mtd->writesize + FMC2_BBM_LEN;
+ /*
+ * cfg[0] => csqcfgr1, cfg[1] => csqcfgr2, cfg[2] => csqcfgr3
+ * cfg[3] => csqar1, cfg[4] => csqar2
+ */
+ u32 cfg[5];
+
+ regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_WEN,
+ write_data ? FMC2_PCR_WEN : 0);
+
+ /*
+ * - Set Program Page/Page Read command
+ * - Enable DMA request data
+ * - Set timings
+ */
+ cfg[0] = FMC2_CSQCFGR1_DMADEN | FMC2_CSQCFGR1_CMD1T;
+ if (write_data)
+ cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_CMD1, NAND_CMD_SEQIN);
+ else
+ cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_CMD1, NAND_CMD_READ0) |
+ FMC2_CSQCFGR1_CMD2EN |
+ FIELD_PREP(FMC2_CSQCFGR1_CMD2, NAND_CMD_READSTART) |
+ FMC2_CSQCFGR1_CMD2T;
+
+ /*
+ * - Set Random Data Input/Random Data Read command
+ * - Enable the sequencer to access the Spare data area
+ * - Enable DMA request status decoding for read
+ * - Set timings
+ */
+ if (write_data)
+ cfg[1] = FIELD_PREP(FMC2_CSQCFGR2_RCMD1, NAND_CMD_RNDIN);
+ else
+ cfg[1] = FIELD_PREP(FMC2_CSQCFGR2_RCMD1, NAND_CMD_RNDOUT) |
+ FMC2_CSQCFGR2_RCMD2EN |
+ FIELD_PREP(FMC2_CSQCFGR2_RCMD2, NAND_CMD_RNDOUTSTART) |
+ FMC2_CSQCFGR2_RCMD1T |
+ FMC2_CSQCFGR2_RCMD2T;
+ if (!raw) {
+ cfg[1] |= write_data ? 0 : FMC2_CSQCFGR2_DMASEN;
+ cfg[1] |= FMC2_CSQCFGR2_SQSDTEN;
+ }
+
+ /*
+ * - Set the number of sectors to be written
+ * - Set timings
+ */
+ cfg[2] = FIELD_PREP(FMC2_CSQCFGR3_SNBR, chip->ecc.steps - 1);
+ if (write_data) {
+ cfg[2] |= FMC2_CSQCFGR3_RAC2T;
+ if (chip->options & NAND_ROW_ADDR_3)
+ cfg[2] |= FMC2_CSQCFGR3_AC5T;
+ else
+ cfg[2] |= FMC2_CSQCFGR3_AC4T;
+ }
+
+ /*
+ * Set the fourth first address cycles
+ * Byte 1 and byte 2 => column, we start at 0x0
+ * Byte 3 and byte 4 => page
+ */
+ cfg[3] = FIELD_PREP(FMC2_CSQCAR1_ADDC3, page);
+ cfg[3] |= FIELD_PREP(FMC2_CSQCAR1_ADDC4, page >> 8);
+
+ /*
+ * - Set chip enable number
+ * - Set ECC byte offset in the spare area
+ * - Calculate the number of address cycles to be issued
+ * - Set byte 5 of address cycle if needed
+ */
+ cfg[4] = FIELD_PREP(FMC2_CSQCAR2_NANDCEN, nfc->cs_sel);
+ if (chip->options & NAND_BUSWIDTH_16)
+ cfg[4] |= FIELD_PREP(FMC2_CSQCAR2_SAO, ecc_offset >> 1);
+ else
+ cfg[4] |= FIELD_PREP(FMC2_CSQCAR2_SAO, ecc_offset);
+ if (chip->options & NAND_ROW_ADDR_3) {
+ cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_ACYNBR, 5);
+ cfg[4] |= FIELD_PREP(FMC2_CSQCAR2_ADDC5, page >> 16);
+ } else {
+ cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_ACYNBR, 4);
+ }
+
+ regmap_bulk_write(nfc->regmap, FMC2_CSQCFGR1, cfg, 5);
+}
+
+static void stm32_fmc2_nfc_dma_callback(void *arg)
+{
+ complete((struct completion *)arg);
+}
+
+/* Read/write data from/to a page */
+static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
+ int raw, bool write_data)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ struct dma_async_tx_descriptor *desc_data, *desc_ecc;
+ struct scatterlist *sg;
+ struct dma_chan *dma_ch = nfc->dma_rx_ch;
+ enum dma_data_direction dma_data_dir = DMA_FROM_DEVICE;
+ enum dma_transfer_direction dma_transfer_dir = DMA_DEV_TO_MEM;
+ int eccsteps = chip->ecc.steps;
+ int eccsize = chip->ecc.size;
+ unsigned long timeout = msecs_to_jiffies(FMC2_TIMEOUT_MS);
+ const u8 *p = buf;
+ int s, ret;
+
+ /* Configure DMA data */
+ if (write_data) {
+ dma_data_dir = DMA_TO_DEVICE;
+ dma_transfer_dir = DMA_MEM_TO_DEV;
+ dma_ch = nfc->dma_tx_ch;
+ }
+
+ for_each_sg(nfc->dma_data_sg.sgl, sg, eccsteps, s) {
+ sg_set_buf(sg, p, eccsize);
+ p += eccsize;
+ }
+
+ ret = dma_map_sg(nfc->dev, nfc->dma_data_sg.sgl,
+ eccsteps, dma_data_dir);
+ if (!ret)
+ return -EIO;
+
+ desc_data = dmaengine_prep_slave_sg(dma_ch, nfc->dma_data_sg.sgl,
+ eccsteps, dma_transfer_dir,
+ DMA_PREP_INTERRUPT);
+ if (!desc_data) {
+ ret = -ENOMEM;
+ goto err_unmap_data;
+ }
+
+ reinit_completion(&nfc->dma_data_complete);
+ reinit_completion(&nfc->complete);
+ desc_data->callback = stm32_fmc2_nfc_dma_callback;
+ desc_data->callback_param = &nfc->dma_data_complete;
+ ret = dma_submit_error(dmaengine_submit(desc_data));
+ if (ret)
+ goto err_unmap_data;
+
+ dma_async_issue_pending(dma_ch);
+
+ if (!write_data && !raw) {
+ /* Configure DMA ECC status */
+ p = nfc->ecc_buf;
+ for_each_sg(nfc->dma_ecc_sg.sgl, sg, eccsteps, s) {
+ sg_set_buf(sg, p, nfc->dma_ecc_len);
+ p += nfc->dma_ecc_len;
+ }
+
+ ret = dma_map_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
+ eccsteps, dma_data_dir);
+ if (!ret) {
+ ret = -EIO;
+ goto err_unmap_data;
+ }
+
+ desc_ecc = dmaengine_prep_slave_sg(nfc->dma_ecc_ch,
+ nfc->dma_ecc_sg.sgl,
+ eccsteps, dma_transfer_dir,
+ DMA_PREP_INTERRUPT);
+ if (!desc_ecc) {
+ ret = -ENOMEM;
+ goto err_unmap_ecc;
+ }
+
+ reinit_completion(&nfc->dma_ecc_complete);
+ desc_ecc->callback = stm32_fmc2_nfc_dma_callback;
+ desc_ecc->callback_param = &nfc->dma_ecc_complete;
+ ret = dma_submit_error(dmaengine_submit(desc_ecc));
+ if (ret)
+ goto err_unmap_ecc;
+
+ dma_async_issue_pending(nfc->dma_ecc_ch);
+ }
+
+ stm32_fmc2_nfc_clear_seq_irq(nfc);
+ stm32_fmc2_nfc_enable_seq_irq(nfc);
+
+ /* Start the transfer */
+ regmap_update_bits(nfc->regmap, FMC2_CSQCR,
+ FMC2_CSQCR_CSQSTART, FMC2_CSQCR_CSQSTART);
+
+ /* Wait end of sequencer transfer */
+ if (!wait_for_completion_timeout(&nfc->complete, timeout)) {
+ dev_err(nfc->dev, "seq timeout\n");
+ stm32_fmc2_nfc_disable_seq_irq(nfc);
+ dmaengine_terminate_all(dma_ch);
+ if (!write_data && !raw)
+ dmaengine_terminate_all(nfc->dma_ecc_ch);
+ ret = -ETIMEDOUT;
+ goto err_unmap_ecc;
+ }
+
+ /* Wait DMA data transfer completion */
+ if (!wait_for_completion_timeout(&nfc->dma_data_complete, timeout)) {
+ dev_err(nfc->dev, "data DMA timeout\n");
+ dmaengine_terminate_all(dma_ch);
+ ret = -ETIMEDOUT;
+ }
+
+ /* Wait DMA ECC transfer completion */
+ if (!write_data && !raw) {
+ if (!wait_for_completion_timeout(&nfc->dma_ecc_complete,
+ timeout)) {
+ dev_err(nfc->dev, "ECC DMA timeout\n");
+ dmaengine_terminate_all(nfc->dma_ecc_ch);
+ ret = -ETIMEDOUT;
+ }
+ }
+
+err_unmap_ecc:
+ if (!write_data && !raw)
+ dma_unmap_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
+ eccsteps, dma_data_dir);
+
+err_unmap_data:
+ dma_unmap_sg(nfc->dev, nfc->dma_data_sg.sgl, eccsteps, dma_data_dir);
+
+ return ret;
+}
+
+static int stm32_fmc2_nfc_seq_write(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page, int raw)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ /* Configure the sequencer */
+ stm32_fmc2_nfc_rw_page_init(chip, page, raw, true);
+
+ /* Write the page */
+ ret = stm32_fmc2_nfc_xfer(chip, buf, raw, true);
+ if (ret)
+ return ret;
+
+ /* Write oob */
+ if (oob_required) {
+ ret = nand_change_write_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int stm32_fmc2_nfc_seq_write_page(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ int ret;
+
+ ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
+ if (ret)
+ return ret;
+
+ return stm32_fmc2_nfc_seq_write(chip, buf, oob_required, page, false);
+}
+
+static int stm32_fmc2_nfc_seq_write_page_raw(struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ int ret;
+
+ ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
+ if (ret)
+ return ret;
+
+ return stm32_fmc2_nfc_seq_write(chip, buf, oob_required, page, true);
+}
+
+/* Get a status indicating which sectors have errors */
+static u16 stm32_fmc2_nfc_get_mapping_status(struct stm32_fmc2_nfc *nfc)
+{
+ u32 csqemsr;
+
+ regmap_read(nfc->regmap, FMC2_CSQEMSR, &csqemsr);
+
+ return FIELD_GET(FMC2_CSQEMSR_SEM, csqemsr);
+}
+
+static int stm32_fmc2_nfc_seq_correct(struct nand_chip *chip, u8 *dat,
+ u8 *read_ecc, u8 *calc_ecc)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ int eccstrength = chip->ecc.strength;
+ int i, s, eccsize = chip->ecc.size;
+ u32 *ecc_sta = (u32 *)nfc->ecc_buf;
+ u16 sta_map = stm32_fmc2_nfc_get_mapping_status(nfc);
+ unsigned int max_bitflips = 0;
+
+ for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, dat += eccsize) {
+ int stat = 0;
+
+ if (eccstrength == FMC2_ECC_HAM) {
+ /* Ecc_sta = FMC2_HECCR */
+ if (sta_map & BIT(s)) {
+ stm32_fmc2_nfc_ham_set_ecc(*ecc_sta,
+ &calc_ecc[i]);
+ stat = stm32_fmc2_nfc_ham_correct(chip, dat,
+ &read_ecc[i],
+ &calc_ecc[i]);
+ }
+ ecc_sta++;
+ } else {
+ /*
+ * Ecc_sta[0] = FMC2_BCHDSR0
+ * Ecc_sta[1] = FMC2_BCHDSR1
+ * Ecc_sta[2] = FMC2_BCHDSR2
+ * Ecc_sta[3] = FMC2_BCHDSR3
+ * Ecc_sta[4] = FMC2_BCHDSR4
+ */
+ if (sta_map & BIT(s))
+ stat = stm32_fmc2_nfc_bch_decode(eccsize, dat,
+ ecc_sta);
+ ecc_sta += 5;
+ }
+
+ if (stat == -EBADMSG)
+ /* Check for empty pages with bitflips */
+ stat = nand_check_erased_ecc_chunk(dat, eccsize,
+ &read_ecc[i],
+ eccbytes,
+ NULL, 0,
+ eccstrength);
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+
+ return max_bitflips;
+}
+
+static int stm32_fmc2_nfc_seq_read_page(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ u8 *ecc_calc = chip->ecc.calc_buf;
+ u8 *ecc_code = chip->ecc.code_buf;
+ u16 sta_map;
+ int ret;
+
+ ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
+ if (ret)
+ return ret;
+
+ /* Configure the sequencer */
+ stm32_fmc2_nfc_rw_page_init(chip, page, 0, false);
+
+ /* Read the page */
+ ret = stm32_fmc2_nfc_xfer(chip, buf, 0, false);
+ if (ret)
+ return ret;
+
+ sta_map = stm32_fmc2_nfc_get_mapping_status(nfc);
+
+ /* Check if errors happen */
+ if (likely(!sta_map)) {
+ if (oob_required)
+ return nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi,
+ mtd->oobsize, false);
+
+ return 0;
+ }
+
+ /* Read oob */
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
+
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ /* Correct data */
+ return chip->ecc.correct(chip, buf, ecc_code, ecc_calc);
+}
+
+static int stm32_fmc2_nfc_seq_read_page_raw(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
+ if (ret)
+ return ret;
+
+ /* Configure the sequencer */
+ stm32_fmc2_nfc_rw_page_init(chip, page, 1, false);
+
+ /* Read the page */
+ ret = stm32_fmc2_nfc_xfer(chip, buf, 1, false);
+ if (ret)
+ return ret;
+
+ /* Read oob */
+ if (oob_required)
+ return nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ false);
+
+ return 0;
+}
+
+static irqreturn_t stm32_fmc2_nfc_irq(int irq, void *dev_id)
+{
+ struct stm32_fmc2_nfc *nfc = (struct stm32_fmc2_nfc *)dev_id;
+
+ if (nfc->irq_state == FMC2_IRQ_SEQ)
+ /* Sequencer is used */
+ stm32_fmc2_nfc_disable_seq_irq(nfc);
+ else if (nfc->irq_state == FMC2_IRQ_BCH)
+ /* BCH is used */
+ stm32_fmc2_nfc_disable_bch_irq(nfc);
+
+ complete(&nfc->complete);
+
+ return IRQ_HANDLED;
+}
+
+static void stm32_fmc2_nfc_read_data(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ void __iomem *io_addr_r = nfc->data_base[nfc->cs_sel];
+
+ if (force_8bit && chip->options & NAND_BUSWIDTH_16)
+ /* Reconfigure bus width to 8-bit */
+ stm32_fmc2_nfc_set_buswidth_16(nfc, false);
+
+ if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32))) {
+ if (!IS_ALIGNED((uintptr_t)buf, sizeof(u16)) && len) {
+ *(u8 *)buf = readb_relaxed(io_addr_r);
+ buf += sizeof(u8);
+ len -= sizeof(u8);
+ }
+
+ if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32)) &&
+ len >= sizeof(u16)) {
+ *(u16 *)buf = readw_relaxed(io_addr_r);
+ buf += sizeof(u16);
+ len -= sizeof(u16);
+ }
+ }
+
+ /* Buf is aligned */
+ while (len >= sizeof(u32)) {
+ *(u32 *)buf = readl_relaxed(io_addr_r);
+ buf += sizeof(u32);
+ len -= sizeof(u32);
+ }
+
+ /* Read remaining bytes */
+ if (len >= sizeof(u16)) {
+ *(u16 *)buf = readw_relaxed(io_addr_r);
+ buf += sizeof(u16);
+ len -= sizeof(u16);
+ }
+
+ if (len)
+ *(u8 *)buf = readb_relaxed(io_addr_r);
+
+ if (force_8bit && chip->options & NAND_BUSWIDTH_16)
+ /* Reconfigure bus width to 16-bit */
+ stm32_fmc2_nfc_set_buswidth_16(nfc, true);
+}
+
+static void stm32_fmc2_nfc_write_data(struct nand_chip *chip, const void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ void __iomem *io_addr_w = nfc->data_base[nfc->cs_sel];
+
+ if (force_8bit && chip->options & NAND_BUSWIDTH_16)
+ /* Reconfigure bus width to 8-bit */
+ stm32_fmc2_nfc_set_buswidth_16(nfc, false);
+
+ if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32))) {
+ if (!IS_ALIGNED((uintptr_t)buf, sizeof(u16)) && len) {
+ writeb_relaxed(*(u8 *)buf, io_addr_w);
+ buf += sizeof(u8);
+ len -= sizeof(u8);
+ }
+
+ if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32)) &&
+ len >= sizeof(u16)) {
+ writew_relaxed(*(u16 *)buf, io_addr_w);
+ buf += sizeof(u16);
+ len -= sizeof(u16);
+ }
+ }
+
+ /* Buf is aligned */
+ while (len >= sizeof(u32)) {
+ writel_relaxed(*(u32 *)buf, io_addr_w);
+ buf += sizeof(u32);
+ len -= sizeof(u32);
+ }
+
+ /* Write remaining bytes */
+ if (len >= sizeof(u16)) {
+ writew_relaxed(*(u16 *)buf, io_addr_w);
+ buf += sizeof(u16);
+ len -= sizeof(u16);
+ }
+
+ if (len)
+ writeb_relaxed(*(u8 *)buf, io_addr_w);
+
+ if (force_8bit && chip->options & NAND_BUSWIDTH_16)
+ /* Reconfigure bus width to 16-bit */
+ stm32_fmc2_nfc_set_buswidth_16(nfc, true);
+}
+
+static int stm32_fmc2_nfc_waitrdy(struct nand_chip *chip,
+ unsigned long timeout_ms)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ const struct nand_sdr_timings *timings;
+ u32 isr, sr;
+
+ /* Check if there is no pending requests to the NAND flash */
+ if (regmap_read_poll_timeout(nfc->regmap, FMC2_SR, sr,
+ sr & FMC2_SR_NWRF, 1,
+ 1000 * FMC2_TIMEOUT_MS))
+ dev_warn(nfc->dev, "Waitrdy timeout\n");
+
+ /* Wait tWB before R/B# signal is low */
+ timings = nand_get_sdr_timings(nand_get_interface_config(chip));
+ ndelay(PSEC_TO_NSEC(timings->tWB_max));
+
+ /* R/B# signal is low, clear high level flag */
+ regmap_write(nfc->regmap, FMC2_ICR, FMC2_ICR_CIHLF);
+
+ /* Wait R/B# signal is high */
+ return regmap_read_poll_timeout(nfc->regmap, FMC2_ISR, isr,
+ isr & FMC2_ISR_IHLF, 5,
+ 1000 * FMC2_TIMEOUT_MS);
+}
+
+static int stm32_fmc2_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ const struct nand_op_instr *instr = NULL;
+ unsigned int op_id, i, timeout;
+ int ret;
+
+ if (check_only)
+ return 0;
+
+ ret = stm32_fmc2_nfc_select_chip(chip, op->cs);
+ if (ret)
+ return ret;
+
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ writeb_relaxed(instr->ctx.cmd.opcode,
+ nfc->cmd_base[nfc->cs_sel]);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++)
+ writeb_relaxed(instr->ctx.addr.addrs[i],
+ nfc->addr_base[nfc->cs_sel]);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ stm32_fmc2_nfc_read_data(chip, instr->ctx.data.buf.in,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ stm32_fmc2_nfc_write_data(chip, instr->ctx.data.buf.out,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ timeout = instr->ctx.waitrdy.timeout_ms;
+ ret = stm32_fmc2_nfc_waitrdy(chip, timeout);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void stm32_fmc2_nfc_init(struct stm32_fmc2_nfc *nfc)
+{
+ u32 pcr;
+
+ regmap_read(nfc->regmap, FMC2_PCR, &pcr);
+
+ /* Set CS used to undefined */
+ nfc->cs_sel = -1;
+
+ /* Enable wait feature and nand flash memory bank */
+ pcr |= FMC2_PCR_PWAITEN;
+ pcr |= FMC2_PCR_PBKEN;
+
+ /* Set buswidth to 8 bits mode for identification */
+ pcr &= ~FMC2_PCR_PWID;
+
+ /* ECC logic is disabled */
+ pcr &= ~FMC2_PCR_ECCEN;
+
+ /* Default mode */
+ pcr &= ~FMC2_PCR_ECCALG;
+ pcr &= ~FMC2_PCR_BCHECC;
+ pcr &= ~FMC2_PCR_WEN;
+
+ /* Set default ECC sector size */
+ pcr &= ~FMC2_PCR_ECCSS;
+ pcr |= FIELD_PREP(FMC2_PCR_ECCSS, FMC2_PCR_ECCSS_2048);
+
+ /* Set default tclr/tar timings */
+ pcr &= ~FMC2_PCR_TCLR;
+ pcr |= FIELD_PREP(FMC2_PCR_TCLR, FMC2_PCR_TCLR_DEFAULT);
+ pcr &= ~FMC2_PCR_TAR;
+ pcr |= FIELD_PREP(FMC2_PCR_TAR, FMC2_PCR_TAR_DEFAULT);
+
+ /* Enable FMC2 controller */
+ if (nfc->dev == nfc->cdev)
+ regmap_update_bits(nfc->regmap, FMC2_BCR1,
+ FMC2_BCR1_FMC2EN, FMC2_BCR1_FMC2EN);
+
+ regmap_write(nfc->regmap, FMC2_PCR, pcr);
+ regmap_write(nfc->regmap, FMC2_PMEM, FMC2_PMEM_DEFAULT);
+ regmap_write(nfc->regmap, FMC2_PATT, FMC2_PATT_DEFAULT);
+}
+
+static void stm32_fmc2_nfc_calc_timings(struct nand_chip *chip,
+ const struct nand_sdr_timings *sdrt)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
+ struct stm32_fmc2_timings *tims = &nand->timings;
+ unsigned long hclk = clk_get_rate(nfc->clk);
+ unsigned long hclkp = NSEC_PER_SEC / (hclk / 1000);
+ unsigned long timing, tar, tclr, thiz, twait;
+ unsigned long tset_mem, tset_att, thold_mem, thold_att;
+
+ tar = max_t(unsigned long, hclkp, sdrt->tAR_min);
+ timing = DIV_ROUND_UP(tar, hclkp) - 1;
+ tims->tar = min_t(unsigned long, timing, FMC2_PCR_TIMING_MASK);
+
+ tclr = max_t(unsigned long, hclkp, sdrt->tCLR_min);
+ timing = DIV_ROUND_UP(tclr, hclkp) - 1;
+ tims->tclr = min_t(unsigned long, timing, FMC2_PCR_TIMING_MASK);
+
+ tims->thiz = FMC2_THIZ;
+ thiz = (tims->thiz + 1) * hclkp;
+
+ /*
+ * tWAIT > tRP
+ * tWAIT > tWP
+ * tWAIT > tREA + tIO
+ */
+ twait = max_t(unsigned long, hclkp, sdrt->tRP_min);
+ twait = max_t(unsigned long, twait, sdrt->tWP_min);
+ twait = max_t(unsigned long, twait, sdrt->tREA_max + FMC2_TIO);
+ timing = DIV_ROUND_UP(twait, hclkp);
+ tims->twait = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
+
+ /*
+ * tSETUP_MEM > tCS - tWAIT
+ * tSETUP_MEM > tALS - tWAIT
+ * tSETUP_MEM > tDS - (tWAIT - tHIZ)
+ */
+ tset_mem = hclkp;
+ if (sdrt->tCS_min > twait && (tset_mem < sdrt->tCS_min - twait))
+ tset_mem = sdrt->tCS_min - twait;
+ if (sdrt->tALS_min > twait && (tset_mem < sdrt->tALS_min - twait))
+ tset_mem = sdrt->tALS_min - twait;
+ if (twait > thiz && (sdrt->tDS_min > twait - thiz) &&
+ (tset_mem < sdrt->tDS_min - (twait - thiz)))
+ tset_mem = sdrt->tDS_min - (twait - thiz);
+ timing = DIV_ROUND_UP(tset_mem, hclkp);
+ tims->tset_mem = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
+
+ /*
+ * tHOLD_MEM > tCH
+ * tHOLD_MEM > tREH - tSETUP_MEM
+ * tHOLD_MEM > max(tRC, tWC) - (tSETUP_MEM + tWAIT)
+ */
+ thold_mem = max_t(unsigned long, hclkp, sdrt->tCH_min);
+ if (sdrt->tREH_min > tset_mem &&
+ (thold_mem < sdrt->tREH_min - tset_mem))
+ thold_mem = sdrt->tREH_min - tset_mem;
+ if ((sdrt->tRC_min > tset_mem + twait) &&
+ (thold_mem < sdrt->tRC_min - (tset_mem + twait)))
+ thold_mem = sdrt->tRC_min - (tset_mem + twait);
+ if ((sdrt->tWC_min > tset_mem + twait) &&
+ (thold_mem < sdrt->tWC_min - (tset_mem + twait)))
+ thold_mem = sdrt->tWC_min - (tset_mem + twait);
+ timing = DIV_ROUND_UP(thold_mem, hclkp);
+ tims->thold_mem = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
+
+ /*
+ * tSETUP_ATT > tCS - tWAIT
+ * tSETUP_ATT > tCLS - tWAIT
+ * tSETUP_ATT > tALS - tWAIT
+ * tSETUP_ATT > tRHW - tHOLD_MEM
+ * tSETUP_ATT > tDS - (tWAIT - tHIZ)
+ */
+ tset_att = hclkp;
+ if (sdrt->tCS_min > twait && (tset_att < sdrt->tCS_min - twait))
+ tset_att = sdrt->tCS_min - twait;
+ if (sdrt->tCLS_min > twait && (tset_att < sdrt->tCLS_min - twait))
+ tset_att = sdrt->tCLS_min - twait;
+ if (sdrt->tALS_min > twait && (tset_att < sdrt->tALS_min - twait))
+ tset_att = sdrt->tALS_min - twait;
+ if (sdrt->tRHW_min > thold_mem &&
+ (tset_att < sdrt->tRHW_min - thold_mem))
+ tset_att = sdrt->tRHW_min - thold_mem;
+ if (twait > thiz && (sdrt->tDS_min > twait - thiz) &&
+ (tset_att < sdrt->tDS_min - (twait - thiz)))
+ tset_att = sdrt->tDS_min - (twait - thiz);
+ timing = DIV_ROUND_UP(tset_att, hclkp);
+ tims->tset_att = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
+
+ /*
+ * tHOLD_ATT > tALH
+ * tHOLD_ATT > tCH
+ * tHOLD_ATT > tCLH
+ * tHOLD_ATT > tCOH
+ * tHOLD_ATT > tDH
+ * tHOLD_ATT > tWB + tIO + tSYNC - tSETUP_MEM
+ * tHOLD_ATT > tADL - tSETUP_MEM
+ * tHOLD_ATT > tWH - tSETUP_MEM
+ * tHOLD_ATT > tWHR - tSETUP_MEM
+ * tHOLD_ATT > tRC - (tSETUP_ATT + tWAIT)
+ * tHOLD_ATT > tWC - (tSETUP_ATT + tWAIT)
+ */
+ thold_att = max_t(unsigned long, hclkp, sdrt->tALH_min);
+ thold_att = max_t(unsigned long, thold_att, sdrt->tCH_min);
+ thold_att = max_t(unsigned long, thold_att, sdrt->tCLH_min);
+ thold_att = max_t(unsigned long, thold_att, sdrt->tCOH_min);
+ thold_att = max_t(unsigned long, thold_att, sdrt->tDH_min);
+ if ((sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC > tset_mem) &&
+ (thold_att < sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC - tset_mem))
+ thold_att = sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC - tset_mem;
+ if (sdrt->tADL_min > tset_mem &&
+ (thold_att < sdrt->tADL_min - tset_mem))
+ thold_att = sdrt->tADL_min - tset_mem;
+ if (sdrt->tWH_min > tset_mem &&
+ (thold_att < sdrt->tWH_min - tset_mem))
+ thold_att = sdrt->tWH_min - tset_mem;
+ if (sdrt->tWHR_min > tset_mem &&
+ (thold_att < sdrt->tWHR_min - tset_mem))
+ thold_att = sdrt->tWHR_min - tset_mem;
+ if ((sdrt->tRC_min > tset_att + twait) &&
+ (thold_att < sdrt->tRC_min - (tset_att + twait)))
+ thold_att = sdrt->tRC_min - (tset_att + twait);
+ if ((sdrt->tWC_min > tset_att + twait) &&
+ (thold_att < sdrt->tWC_min - (tset_att + twait)))
+ thold_att = sdrt->tWC_min - (tset_att + twait);
+ timing = DIV_ROUND_UP(thold_att, hclkp);
+ tims->thold_att = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
+}
+
+static int stm32_fmc2_nfc_setup_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_interface_config *conf)
+{
+ const struct nand_sdr_timings *sdrt;
+
+ sdrt = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdrt))
+ return PTR_ERR(sdrt);
+
+ if (conf->timings.mode > 3)
+ return -EOPNOTSUPP;
+
+ if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ stm32_fmc2_nfc_calc_timings(chip, sdrt);
+ stm32_fmc2_nfc_timings_init(chip);
+
+ return 0;
+}
+
+static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc)
+{
+ int ret = 0;
+
+ nfc->dma_tx_ch = dma_request_chan(nfc->dev, "tx");
+ if (IS_ERR(nfc->dma_tx_ch)) {
+ ret = PTR_ERR(nfc->dma_tx_ch);
+ if (ret != -ENODEV && ret != -EPROBE_DEFER)
+ dev_err(nfc->dev,
+ "failed to request tx DMA channel: %d\n", ret);
+ nfc->dma_tx_ch = NULL;
+ goto err_dma;
+ }
+
+ nfc->dma_rx_ch = dma_request_chan(nfc->dev, "rx");
+ if (IS_ERR(nfc->dma_rx_ch)) {
+ ret = PTR_ERR(nfc->dma_rx_ch);
+ if (ret != -ENODEV && ret != -EPROBE_DEFER)
+ dev_err(nfc->dev,
+ "failed to request rx DMA channel: %d\n", ret);
+ nfc->dma_rx_ch = NULL;
+ goto err_dma;
+ }
+
+ nfc->dma_ecc_ch = dma_request_chan(nfc->dev, "ecc");
+ if (IS_ERR(nfc->dma_ecc_ch)) {
+ ret = PTR_ERR(nfc->dma_ecc_ch);
+ if (ret != -ENODEV && ret != -EPROBE_DEFER)
+ dev_err(nfc->dev,
+ "failed to request ecc DMA channel: %d\n", ret);
+ nfc->dma_ecc_ch = NULL;
+ goto err_dma;
+ }
+
+ ret = sg_alloc_table(&nfc->dma_ecc_sg, FMC2_MAX_SG, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ /* Allocate a buffer to store ECC status registers */
+ nfc->ecc_buf = devm_kzalloc(nfc->dev, FMC2_MAX_ECC_BUF_LEN, GFP_KERNEL);
+ if (!nfc->ecc_buf)
+ return -ENOMEM;
+
+ ret = sg_alloc_table(&nfc->dma_data_sg, FMC2_MAX_SG, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ init_completion(&nfc->dma_data_complete);
+ init_completion(&nfc->dma_ecc_complete);
+
+ return 0;
+
+err_dma:
+ if (ret == -ENODEV) {
+ dev_warn(nfc->dev,
+ "DMAs not defined in the DT, polling mode is used\n");
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static void stm32_fmc2_nfc_nand_callbacks_setup(struct nand_chip *chip)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+
+ /*
+ * Specific callbacks to read/write a page depending on
+ * the mode (polling/sequencer) and the algo used (Hamming, BCH).
+ */
+ if (nfc->dma_tx_ch && nfc->dma_rx_ch && nfc->dma_ecc_ch) {
+ /* DMA => use sequencer mode callbacks */
+ chip->ecc.correct = stm32_fmc2_nfc_seq_correct;
+ chip->ecc.write_page = stm32_fmc2_nfc_seq_write_page;
+ chip->ecc.read_page = stm32_fmc2_nfc_seq_read_page;
+ chip->ecc.write_page_raw = stm32_fmc2_nfc_seq_write_page_raw;
+ chip->ecc.read_page_raw = stm32_fmc2_nfc_seq_read_page_raw;
+ } else {
+ /* No DMA => use polling mode callbacks */
+ chip->ecc.hwctl = stm32_fmc2_nfc_hwctl;
+ if (chip->ecc.strength == FMC2_ECC_HAM) {
+ /* Hamming is used */
+ chip->ecc.calculate = stm32_fmc2_nfc_ham_calculate;
+ chip->ecc.correct = stm32_fmc2_nfc_ham_correct;
+ chip->ecc.options |= NAND_ECC_GENERIC_ERASED_CHECK;
+ } else {
+ /* BCH is used */
+ chip->ecc.calculate = stm32_fmc2_nfc_bch_calculate;
+ chip->ecc.correct = stm32_fmc2_nfc_bch_correct;
+ chip->ecc.read_page = stm32_fmc2_nfc_read_page;
+ }
+ }
+
+ /* Specific configurations depending on the algo used */
+ if (chip->ecc.strength == FMC2_ECC_HAM)
+ chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 4 : 3;
+ else if (chip->ecc.strength == FMC2_ECC_BCH8)
+ chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 14 : 13;
+ else
+ chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 8 : 7;
+}
+
+static int stm32_fmc2_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = ecc->total;
+ oobregion->offset = FMC2_BBM_LEN;
+
+ return 0;
+}
+
+static int stm32_fmc2_nfc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = mtd->oobsize - ecc->total - FMC2_BBM_LEN;
+ oobregion->offset = ecc->total + FMC2_BBM_LEN;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops stm32_fmc2_nfc_ooblayout_ops = {
+ .ecc = stm32_fmc2_nfc_ooblayout_ecc,
+ .free = stm32_fmc2_nfc_ooblayout_free,
+};
+
+static int stm32_fmc2_nfc_calc_ecc_bytes(int step_size, int strength)
+{
+ /* Hamming */
+ if (strength == FMC2_ECC_HAM)
+ return 4;
+
+ /* BCH8 */
+ if (strength == FMC2_ECC_BCH8)
+ return 14;
+
+ /* BCH4 */
+ return 8;
+}
+
+NAND_ECC_CAPS_SINGLE(stm32_fmc2_nfc_ecc_caps, stm32_fmc2_nfc_calc_ecc_bytes,
+ FMC2_ECC_STEP_SIZE,
+ FMC2_ECC_HAM, FMC2_ECC_BCH4, FMC2_ECC_BCH8);
+
+static int stm32_fmc2_nfc_attach_chip(struct nand_chip *chip)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ /*
+ * Only NAND_ECC_ENGINE_TYPE_ON_HOST mode is actually supported
+ * Hamming => ecc.strength = 1
+ * BCH4 => ecc.strength = 4
+ * BCH8 => ecc.strength = 8
+ * ECC sector size = 512
+ */
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST) {
+ dev_err(nfc->dev,
+ "nand_ecc_engine_type is not well defined in the DT\n");
+ return -EINVAL;
+ }
+
+ /* Default ECC settings in case they are not set in the device tree */
+ if (!chip->ecc.size)
+ chip->ecc.size = FMC2_ECC_STEP_SIZE;
+
+ if (!chip->ecc.strength)
+ chip->ecc.strength = FMC2_ECC_BCH8;
+
+ ret = nand_ecc_choose_conf(chip, &stm32_fmc2_nfc_ecc_caps,
+ mtd->oobsize - FMC2_BBM_LEN);
+ if (ret) {
+ dev_err(nfc->dev, "no valid ECC settings set\n");
+ return ret;
+ }
+
+ if (mtd->writesize / chip->ecc.size > FMC2_MAX_SG) {
+ dev_err(nfc->dev, "nand page size is not supported\n");
+ return -EINVAL;
+ }
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ stm32_fmc2_nfc_nand_callbacks_setup(chip);
+
+ mtd_set_ooblayout(mtd, &stm32_fmc2_nfc_ooblayout_ops);
+
+ stm32_fmc2_nfc_setup(chip);
+
+ return 0;
+}
+
+static const struct nand_controller_ops stm32_fmc2_nfc_controller_ops = {
+ .attach_chip = stm32_fmc2_nfc_attach_chip,
+ .exec_op = stm32_fmc2_nfc_exec_op,
+ .setup_interface = stm32_fmc2_nfc_setup_interface,
+};
+
+static void stm32_fmc2_nfc_wp_enable(struct stm32_fmc2_nand *nand)
+{
+ if (nand->wp_gpio)
+ gpiod_set_value(nand->wp_gpio, 1);
+}
+
+static void stm32_fmc2_nfc_wp_disable(struct stm32_fmc2_nand *nand)
+{
+ if (nand->wp_gpio)
+ gpiod_set_value(nand->wp_gpio, 0);
+}
+
+static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc,
+ struct device_node *dn)
+{
+ struct stm32_fmc2_nand *nand = &nfc->nand;
+ u32 cs;
+ int ret, i;
+
+ if (!of_get_property(dn, "reg", &nand->ncs))
+ return -EINVAL;
+
+ nand->ncs /= sizeof(u32);
+ if (!nand->ncs) {
+ dev_err(nfc->dev, "invalid reg property size\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nand->ncs; i++) {
+ ret = of_property_read_u32_index(dn, "reg", i, &cs);
+ if (ret) {
+ dev_err(nfc->dev, "could not retrieve reg property: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (cs >= FMC2_MAX_CE) {
+ dev_err(nfc->dev, "invalid reg value: %d\n", cs);
+ return -EINVAL;
+ }
+
+ if (nfc->cs_assigned & BIT(cs)) {
+ dev_err(nfc->dev, "cs already assigned: %d\n", cs);
+ return -EINVAL;
+ }
+
+ nfc->cs_assigned |= BIT(cs);
+ nand->cs_used[i] = cs;
+ }
+
+ nand->wp_gpio = devm_fwnode_gpiod_get(nfc->dev, of_fwnode_handle(dn),
+ "wp", GPIOD_OUT_HIGH, "wp");
+ if (IS_ERR(nand->wp_gpio)) {
+ ret = PTR_ERR(nand->wp_gpio);
+ if (ret != -ENOENT)
+ return dev_err_probe(nfc->dev, ret,
+ "failed to request WP GPIO\n");
+
+ nand->wp_gpio = NULL;
+ }
+
+ nand_set_flash_node(&nand->chip, dn);
+
+ return 0;
+}
+
+static int stm32_fmc2_nfc_parse_dt(struct stm32_fmc2_nfc *nfc)
+{
+ struct device_node *dn = nfc->dev->of_node;
+ struct device_node *child;
+ int nchips = of_get_child_count(dn);
+ int ret = 0;
+
+ if (!nchips) {
+ dev_err(nfc->dev, "NAND chip not defined\n");
+ return -EINVAL;
+ }
+
+ if (nchips > 1) {
+ dev_err(nfc->dev, "too many NAND chips defined\n");
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(dn, child) {
+ ret = stm32_fmc2_nfc_parse_child(nfc, child);
+ if (ret < 0) {
+ of_node_put(child);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int stm32_fmc2_nfc_set_cdev(struct stm32_fmc2_nfc *nfc)
+{
+ struct device *dev = nfc->dev;
+ bool ebi_found = false;
+
+ if (dev->parent && of_device_is_compatible(dev->parent->of_node,
+ "st,stm32mp1-fmc2-ebi"))
+ ebi_found = true;
+
+ if (of_device_is_compatible(dev->of_node, "st,stm32mp1-fmc2-nfc")) {
+ if (ebi_found) {
+ nfc->cdev = dev->parent;
+
+ return 0;
+ }
+
+ return -EINVAL;
+ }
+
+ if (ebi_found)
+ return -EINVAL;
+
+ nfc->cdev = dev;
+
+ return 0;
+}
+
+static int stm32_fmc2_nfc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct reset_control *rstc;
+ struct stm32_fmc2_nfc *nfc;
+ struct stm32_fmc2_nand *nand;
+ struct resource *res;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ struct resource cres;
+ int chip_cs, mem_region, ret, irq;
+ int start_region = 0;
+
+ nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->dev = dev;
+ nand_controller_init(&nfc->base);
+ nfc->base.ops = &stm32_fmc2_nfc_controller_ops;
+
+ ret = stm32_fmc2_nfc_set_cdev(nfc);
+ if (ret)
+ return ret;
+
+ ret = stm32_fmc2_nfc_parse_dt(nfc);
+ if (ret)
+ return ret;
+
+ ret = of_address_to_resource(nfc->cdev->of_node, 0, &cres);
+ if (ret)
+ return ret;
+
+ nfc->io_phys_addr = cres.start;
+
+ nfc->regmap = device_node_to_regmap(nfc->cdev->of_node);
+ if (IS_ERR(nfc->regmap))
+ return PTR_ERR(nfc->regmap);
+
+ if (nfc->dev == nfc->cdev)
+ start_region = 1;
+
+ for (chip_cs = 0, mem_region = start_region; chip_cs < FMC2_MAX_CE;
+ chip_cs++, mem_region += 3) {
+ if (!(nfc->cs_assigned & BIT(chip_cs)))
+ continue;
+
+ nfc->data_base[chip_cs] = devm_platform_get_and_ioremap_resource(pdev,
+ mem_region, &res);
+ if (IS_ERR(nfc->data_base[chip_cs]))
+ return PTR_ERR(nfc->data_base[chip_cs]);
+
+ nfc->data_phys_addr[chip_cs] = res->start;
+
+ nfc->cmd_base[chip_cs] = devm_platform_ioremap_resource(pdev, mem_region + 1);
+ if (IS_ERR(nfc->cmd_base[chip_cs]))
+ return PTR_ERR(nfc->cmd_base[chip_cs]);
+
+ nfc->addr_base[chip_cs] = devm_platform_ioremap_resource(pdev, mem_region + 2);
+ if (IS_ERR(nfc->addr_base[chip_cs]))
+ return PTR_ERR(nfc->addr_base[chip_cs]);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, stm32_fmc2_nfc_irq, 0,
+ dev_name(dev), nfc);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ return ret;
+ }
+
+ init_completion(&nfc->complete);
+
+ nfc->clk = devm_clk_get_enabled(nfc->cdev, NULL);
+ if (IS_ERR(nfc->clk)) {
+ dev_err(dev, "can not get and enable the clock\n");
+ return PTR_ERR(nfc->clk);
+ }
+
+ rstc = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(rstc)) {
+ ret = PTR_ERR(rstc);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ } else {
+ reset_control_assert(rstc);
+ reset_control_deassert(rstc);
+ }
+
+ ret = stm32_fmc2_nfc_dma_setup(nfc);
+ if (ret)
+ goto err_release_dma;
+
+ stm32_fmc2_nfc_init(nfc);
+
+ nand = &nfc->nand;
+ chip = &nand->chip;
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = dev;
+
+ chip->controller = &nfc->base;
+ chip->options |= NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE |
+ NAND_USES_DMA;
+
+ stm32_fmc2_nfc_wp_disable(nand);
+
+ /* Scan to find existence of the device */
+ ret = nand_scan(chip, nand->ncs);
+ if (ret)
+ goto err_wp_enable;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ goto err_nand_cleanup;
+
+ platform_set_drvdata(pdev, nfc);
+
+ return 0;
+
+err_nand_cleanup:
+ nand_cleanup(chip);
+
+err_wp_enable:
+ stm32_fmc2_nfc_wp_enable(nand);
+
+err_release_dma:
+ if (nfc->dma_ecc_ch)
+ dma_release_channel(nfc->dma_ecc_ch);
+ if (nfc->dma_tx_ch)
+ dma_release_channel(nfc->dma_tx_ch);
+ if (nfc->dma_rx_ch)
+ dma_release_channel(nfc->dma_rx_ch);
+
+ sg_free_table(&nfc->dma_data_sg);
+ sg_free_table(&nfc->dma_ecc_sg);
+
+ return ret;
+}
+
+static void stm32_fmc2_nfc_remove(struct platform_device *pdev)
+{
+ struct stm32_fmc2_nfc *nfc = platform_get_drvdata(pdev);
+ struct stm32_fmc2_nand *nand = &nfc->nand;
+ struct nand_chip *chip = &nand->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+
+ if (nfc->dma_ecc_ch)
+ dma_release_channel(nfc->dma_ecc_ch);
+ if (nfc->dma_tx_ch)
+ dma_release_channel(nfc->dma_tx_ch);
+ if (nfc->dma_rx_ch)
+ dma_release_channel(nfc->dma_rx_ch);
+
+ sg_free_table(&nfc->dma_data_sg);
+ sg_free_table(&nfc->dma_ecc_sg);
+
+ stm32_fmc2_nfc_wp_enable(nand);
+}
+
+static int __maybe_unused stm32_fmc2_nfc_suspend(struct device *dev)
+{
+ struct stm32_fmc2_nfc *nfc = dev_get_drvdata(dev);
+ struct stm32_fmc2_nand *nand = &nfc->nand;
+
+ clk_disable_unprepare(nfc->clk);
+
+ stm32_fmc2_nfc_wp_enable(nand);
+
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_fmc2_nfc_resume(struct device *dev)
+{
+ struct stm32_fmc2_nfc *nfc = dev_get_drvdata(dev);
+ struct stm32_fmc2_nand *nand = &nfc->nand;
+ int chip_cs, ret;
+
+ pinctrl_pm_select_default_state(dev);
+
+ ret = clk_prepare_enable(nfc->clk);
+ if (ret) {
+ dev_err(dev, "can not enable the clock\n");
+ return ret;
+ }
+
+ stm32_fmc2_nfc_init(nfc);
+
+ stm32_fmc2_nfc_wp_disable(nand);
+
+ for (chip_cs = 0; chip_cs < FMC2_MAX_CE; chip_cs++) {
+ if (!(nfc->cs_assigned & BIT(chip_cs)))
+ continue;
+
+ nand_reset(&nand->chip, chip_cs);
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(stm32_fmc2_nfc_pm_ops, stm32_fmc2_nfc_suspend,
+ stm32_fmc2_nfc_resume);
+
+static const struct of_device_id stm32_fmc2_nfc_match[] = {
+ {.compatible = "st,stm32mp15-fmc2"},
+ {.compatible = "st,stm32mp1-fmc2-nfc"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, stm32_fmc2_nfc_match);
+
+static struct platform_driver stm32_fmc2_nfc_driver = {
+ .probe = stm32_fmc2_nfc_probe,
+ .remove_new = stm32_fmc2_nfc_remove,
+ .driver = {
+ .name = "stm32_fmc2_nfc",
+ .of_match_table = stm32_fmc2_nfc_match,
+ .pm = &stm32_fmc2_nfc_pm_ops,
+ },
+};
+module_platform_driver(stm32_fmc2_nfc_driver);
+
+MODULE_ALIAS("platform:stm32_fmc2_nfc");
+MODULE_AUTHOR("Christophe Kerello <christophe.kerello@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 FMC2 NFC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
new file mode 100644
index 0000000000..9abf38049d
--- /dev/null
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -0,0 +1,2208 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon.dev@gmail.com>
+ *
+ * Derived from:
+ * https://github.com/yuq/sunxi-nfc-mtd
+ * Copyright (C) 2013 Qiang Yu <yuq825@gmail.com>
+ *
+ * https://github.com/hno/Allwinner-Info
+ * Copyright (C) 2013 Henrik Nordström <Henrik Nordström>
+ *
+ * Copyright (C) 2013 Dmitriy B. <rzk333@gmail.com>
+ * Copyright (C) 2013 Sergey Lapin <slapin@ossfans.org>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/reset.h>
+
+#define NFC_REG_CTL 0x0000
+#define NFC_REG_ST 0x0004
+#define NFC_REG_INT 0x0008
+#define NFC_REG_TIMING_CTL 0x000C
+#define NFC_REG_TIMING_CFG 0x0010
+#define NFC_REG_ADDR_LOW 0x0014
+#define NFC_REG_ADDR_HIGH 0x0018
+#define NFC_REG_SECTOR_NUM 0x001C
+#define NFC_REG_CNT 0x0020
+#define NFC_REG_CMD 0x0024
+#define NFC_REG_RCMD_SET 0x0028
+#define NFC_REG_WCMD_SET 0x002C
+#define NFC_REG_A10_IO_DATA 0x0030
+#define NFC_REG_A23_IO_DATA 0x0300
+#define NFC_REG_ECC_CTL 0x0034
+#define NFC_REG_ECC_ST 0x0038
+#define NFC_REG_DEBUG 0x003C
+#define NFC_REG_ECC_ERR_CNT(x) ((0x0040 + (x)) & ~0x3)
+#define NFC_REG_USER_DATA(x) (0x0050 + ((x) * 4))
+#define NFC_REG_SPARE_AREA 0x00A0
+#define NFC_REG_PAT_ID 0x00A4
+#define NFC_REG_MDMA_ADDR 0x00C0
+#define NFC_REG_MDMA_CNT 0x00C4
+#define NFC_RAM0_BASE 0x0400
+#define NFC_RAM1_BASE 0x0800
+
+/* define bit use in NFC_CTL */
+#define NFC_EN BIT(0)
+#define NFC_RESET BIT(1)
+#define NFC_BUS_WIDTH_MSK BIT(2)
+#define NFC_BUS_WIDTH_8 (0 << 2)
+#define NFC_BUS_WIDTH_16 (1 << 2)
+#define NFC_RB_SEL_MSK BIT(3)
+#define NFC_RB_SEL(x) ((x) << 3)
+#define NFC_CE_SEL_MSK GENMASK(26, 24)
+#define NFC_CE_SEL(x) ((x) << 24)
+#define NFC_CE_CTL BIT(6)
+#define NFC_PAGE_SHIFT_MSK GENMASK(11, 8)
+#define NFC_PAGE_SHIFT(x) (((x) < 10 ? 0 : (x) - 10) << 8)
+#define NFC_SAM BIT(12)
+#define NFC_RAM_METHOD BIT(14)
+#define NFC_DMA_TYPE_NORMAL BIT(15)
+#define NFC_DEBUG_CTL BIT(31)
+
+/* define bit use in NFC_ST */
+#define NFC_RB_B2R BIT(0)
+#define NFC_CMD_INT_FLAG BIT(1)
+#define NFC_DMA_INT_FLAG BIT(2)
+#define NFC_CMD_FIFO_STATUS BIT(3)
+#define NFC_STA BIT(4)
+#define NFC_NATCH_INT_FLAG BIT(5)
+#define NFC_RB_STATE(x) BIT(x + 8)
+
+/* define bit use in NFC_INT */
+#define NFC_B2R_INT_ENABLE BIT(0)
+#define NFC_CMD_INT_ENABLE BIT(1)
+#define NFC_DMA_INT_ENABLE BIT(2)
+#define NFC_INT_MASK (NFC_B2R_INT_ENABLE | \
+ NFC_CMD_INT_ENABLE | \
+ NFC_DMA_INT_ENABLE)
+
+/* define bit use in NFC_TIMING_CTL */
+#define NFC_TIMING_CTL_EDO BIT(8)
+
+/* define NFC_TIMING_CFG register layout */
+#define NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD) \
+ (((tWB) & 0x3) | (((tADL) & 0x3) << 2) | \
+ (((tWHR) & 0x3) << 4) | (((tRHW) & 0x3) << 6) | \
+ (((tCAD) & 0x7) << 8))
+
+/* define bit use in NFC_CMD */
+#define NFC_CMD_LOW_BYTE_MSK GENMASK(7, 0)
+#define NFC_CMD_HIGH_BYTE_MSK GENMASK(15, 8)
+#define NFC_CMD(x) (x)
+#define NFC_ADR_NUM_MSK GENMASK(18, 16)
+#define NFC_ADR_NUM(x) (((x) - 1) << 16)
+#define NFC_SEND_ADR BIT(19)
+#define NFC_ACCESS_DIR BIT(20)
+#define NFC_DATA_TRANS BIT(21)
+#define NFC_SEND_CMD1 BIT(22)
+#define NFC_WAIT_FLAG BIT(23)
+#define NFC_SEND_CMD2 BIT(24)
+#define NFC_SEQ BIT(25)
+#define NFC_DATA_SWAP_METHOD BIT(26)
+#define NFC_ROW_AUTO_INC BIT(27)
+#define NFC_SEND_CMD3 BIT(28)
+#define NFC_SEND_CMD4 BIT(29)
+#define NFC_CMD_TYPE_MSK GENMASK(31, 30)
+#define NFC_NORMAL_OP (0 << 30)
+#define NFC_ECC_OP (1 << 30)
+#define NFC_PAGE_OP (2U << 30)
+
+/* define bit use in NFC_RCMD_SET */
+#define NFC_READ_CMD_MSK GENMASK(7, 0)
+#define NFC_RND_READ_CMD0_MSK GENMASK(15, 8)
+#define NFC_RND_READ_CMD1_MSK GENMASK(23, 16)
+
+/* define bit use in NFC_WCMD_SET */
+#define NFC_PROGRAM_CMD_MSK GENMASK(7, 0)
+#define NFC_RND_WRITE_CMD_MSK GENMASK(15, 8)
+#define NFC_READ_CMD0_MSK GENMASK(23, 16)
+#define NFC_READ_CMD1_MSK GENMASK(31, 24)
+
+/* define bit use in NFC_ECC_CTL */
+#define NFC_ECC_EN BIT(0)
+#define NFC_ECC_PIPELINE BIT(3)
+#define NFC_ECC_EXCEPTION BIT(4)
+#define NFC_ECC_BLOCK_SIZE_MSK BIT(5)
+#define NFC_ECC_BLOCK_512 BIT(5)
+#define NFC_RANDOM_EN BIT(9)
+#define NFC_RANDOM_DIRECTION BIT(10)
+#define NFC_ECC_MODE_MSK GENMASK(15, 12)
+#define NFC_ECC_MODE(x) ((x) << 12)
+#define NFC_RANDOM_SEED_MSK GENMASK(30, 16)
+#define NFC_RANDOM_SEED(x) ((x) << 16)
+
+/* define bit use in NFC_ECC_ST */
+#define NFC_ECC_ERR(x) BIT(x)
+#define NFC_ECC_ERR_MSK GENMASK(15, 0)
+#define NFC_ECC_PAT_FOUND(x) BIT(x + 16)
+#define NFC_ECC_ERR_CNT(b, x) (((x) >> (((b) % 4) * 8)) & 0xff)
+
+#define NFC_DEFAULT_TIMEOUT_MS 1000
+
+#define NFC_SRAM_SIZE 1024
+
+#define NFC_MAX_CS 7
+
+/**
+ * struct sunxi_nand_chip_sel - stores information related to NAND Chip Select
+ *
+ * @cs: the NAND CS id used to communicate with a NAND Chip
+ * @rb: the Ready/Busy pin ID. -1 means no R/B pin connected to the NFC
+ */
+struct sunxi_nand_chip_sel {
+ u8 cs;
+ s8 rb;
+};
+
+/**
+ * struct sunxi_nand_hw_ecc - stores information related to HW ECC support
+ *
+ * @ecc_ctl: ECC_CTL register value for this NAND chip
+ */
+struct sunxi_nand_hw_ecc {
+ u32 ecc_ctl;
+};
+
+/**
+ * struct sunxi_nand_chip - stores NAND chip device related information
+ *
+ * @node: used to store NAND chips into a list
+ * @nand: base NAND chip structure
+ * @ecc: ECC controller structure
+ * @clk_rate: clk_rate required for this NAND chip
+ * @timing_cfg: TIMING_CFG register value for this NAND chip
+ * @timing_ctl: TIMING_CTL register value for this NAND chip
+ * @nsels: number of CS lines required by the NAND chip
+ * @sels: array of CS lines descriptions
+ */
+struct sunxi_nand_chip {
+ struct list_head node;
+ struct nand_chip nand;
+ struct sunxi_nand_hw_ecc ecc;
+ unsigned long clk_rate;
+ u32 timing_cfg;
+ u32 timing_ctl;
+ int nsels;
+ struct sunxi_nand_chip_sel sels[];
+};
+
+static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
+{
+ return container_of(nand, struct sunxi_nand_chip, nand);
+}
+
+/*
+ * NAND Controller capabilities structure: stores NAND controller capabilities
+ * for distinction between compatible strings.
+ *
+ * @has_mdma: Use mbus dma mode, otherwise general dma
+ * through MBUS on A23/A33 needs extra configuration.
+ * @reg_io_data: I/O data register
+ * @dma_maxburst: DMA maxburst
+ */
+struct sunxi_nfc_caps {
+ bool has_mdma;
+ unsigned int reg_io_data;
+ unsigned int dma_maxburst;
+};
+
+/**
+ * struct sunxi_nfc - stores sunxi NAND controller information
+ *
+ * @controller: base controller structure
+ * @dev: parent device (used to print error messages)
+ * @regs: NAND controller registers
+ * @ahb_clk: NAND controller AHB clock
+ * @mod_clk: NAND controller mod clock
+ * @reset: NAND controller reset line
+ * @assigned_cs: bitmask describing already assigned CS lines
+ * @clk_rate: NAND controller current clock rate
+ * @chips: a list containing all the NAND chips attached to this NAND
+ * controller
+ * @complete: a completion object used to wait for NAND controller events
+ * @dmac: the DMA channel attached to the NAND controller
+ * @caps: NAND Controller capabilities
+ */
+struct sunxi_nfc {
+ struct nand_controller controller;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *ahb_clk;
+ struct clk *mod_clk;
+ struct reset_control *reset;
+ unsigned long assigned_cs;
+ unsigned long clk_rate;
+ struct list_head chips;
+ struct completion complete;
+ struct dma_chan *dmac;
+ const struct sunxi_nfc_caps *caps;
+};
+
+static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_controller *ctrl)
+{
+ return container_of(ctrl, struct sunxi_nfc, controller);
+}
+
+static irqreturn_t sunxi_nfc_interrupt(int irq, void *dev_id)
+{
+ struct sunxi_nfc *nfc = dev_id;
+ u32 st = readl(nfc->regs + NFC_REG_ST);
+ u32 ien = readl(nfc->regs + NFC_REG_INT);
+
+ if (!(ien & st))
+ return IRQ_NONE;
+
+ if ((ien & st) == ien)
+ complete(&nfc->complete);
+
+ writel(st & NFC_INT_MASK, nfc->regs + NFC_REG_ST);
+ writel(~st & ien & NFC_INT_MASK, nfc->regs + NFC_REG_INT);
+
+ return IRQ_HANDLED;
+}
+
+static int sunxi_nfc_wait_events(struct sunxi_nfc *nfc, u32 events,
+ bool use_polling, unsigned int timeout_ms)
+{
+ int ret;
+
+ if (events & ~NFC_INT_MASK)
+ return -EINVAL;
+
+ if (!timeout_ms)
+ timeout_ms = NFC_DEFAULT_TIMEOUT_MS;
+
+ if (!use_polling) {
+ init_completion(&nfc->complete);
+
+ writel(events, nfc->regs + NFC_REG_INT);
+
+ ret = wait_for_completion_timeout(&nfc->complete,
+ msecs_to_jiffies(timeout_ms));
+ if (!ret)
+ ret = -ETIMEDOUT;
+ else
+ ret = 0;
+
+ writel(0, nfc->regs + NFC_REG_INT);
+ } else {
+ u32 status;
+
+ ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status,
+ (status & events) == events, 1,
+ timeout_ms * 1000);
+ }
+
+ writel(events & NFC_INT_MASK, nfc->regs + NFC_REG_ST);
+
+ if (ret)
+ dev_err(nfc->dev, "wait interrupt timedout\n");
+
+ return ret;
+}
+
+static int sunxi_nfc_wait_cmd_fifo_empty(struct sunxi_nfc *nfc)
+{
+ u32 status;
+ int ret;
+
+ ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status,
+ !(status & NFC_CMD_FIFO_STATUS), 1,
+ NFC_DEFAULT_TIMEOUT_MS * 1000);
+ if (ret)
+ dev_err(nfc->dev, "wait for empty cmd FIFO timedout\n");
+
+ return ret;
+}
+
+static int sunxi_nfc_rst(struct sunxi_nfc *nfc)
+{
+ u32 ctl;
+ int ret;
+
+ writel(0, nfc->regs + NFC_REG_ECC_CTL);
+ writel(NFC_RESET, nfc->regs + NFC_REG_CTL);
+
+ ret = readl_poll_timeout(nfc->regs + NFC_REG_CTL, ctl,
+ !(ctl & NFC_RESET), 1,
+ NFC_DEFAULT_TIMEOUT_MS * 1000);
+ if (ret)
+ dev_err(nfc->dev, "wait for NAND controller reset timedout\n");
+
+ return ret;
+}
+
+static int sunxi_nfc_dma_op_prepare(struct sunxi_nfc *nfc, const void *buf,
+ int chunksize, int nchunks,
+ enum dma_data_direction ddir,
+ struct scatterlist *sg)
+{
+ struct dma_async_tx_descriptor *dmad;
+ enum dma_transfer_direction tdir;
+ dma_cookie_t dmat;
+ int ret;
+
+ if (ddir == DMA_FROM_DEVICE)
+ tdir = DMA_DEV_TO_MEM;
+ else
+ tdir = DMA_MEM_TO_DEV;
+
+ sg_init_one(sg, buf, nchunks * chunksize);
+ ret = dma_map_sg(nfc->dev, sg, 1, ddir);
+ if (!ret)
+ return -ENOMEM;
+
+ if (!nfc->caps->has_mdma) {
+ dmad = dmaengine_prep_slave_sg(nfc->dmac, sg, 1, tdir, DMA_CTRL_ACK);
+ if (!dmad) {
+ ret = -EINVAL;
+ goto err_unmap_buf;
+ }
+ }
+
+ writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
+ nfc->regs + NFC_REG_CTL);
+ writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM);
+ writel(chunksize, nfc->regs + NFC_REG_CNT);
+
+ if (nfc->caps->has_mdma) {
+ writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_DMA_TYPE_NORMAL,
+ nfc->regs + NFC_REG_CTL);
+ writel(chunksize * nchunks, nfc->regs + NFC_REG_MDMA_CNT);
+ writel(sg_dma_address(sg), nfc->regs + NFC_REG_MDMA_ADDR);
+ } else {
+ dmat = dmaengine_submit(dmad);
+
+ ret = dma_submit_error(dmat);
+ if (ret)
+ goto err_clr_dma_flag;
+ }
+
+ return 0;
+
+err_clr_dma_flag:
+ writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
+ nfc->regs + NFC_REG_CTL);
+
+err_unmap_buf:
+ dma_unmap_sg(nfc->dev, sg, 1, ddir);
+ return ret;
+}
+
+static void sunxi_nfc_dma_op_cleanup(struct sunxi_nfc *nfc,
+ enum dma_data_direction ddir,
+ struct scatterlist *sg)
+{
+ dma_unmap_sg(nfc->dev, sg, 1, ddir);
+ writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
+ nfc->regs + NFC_REG_CTL);
+}
+
+static void sunxi_nfc_select_chip(struct nand_chip *nand, unsigned int cs)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ struct sunxi_nand_chip_sel *sel;
+ u32 ctl;
+
+ if (cs >= sunxi_nand->nsels)
+ return;
+
+ ctl = readl(nfc->regs + NFC_REG_CTL) &
+ ~(NFC_PAGE_SHIFT_MSK | NFC_CE_SEL_MSK | NFC_RB_SEL_MSK | NFC_EN);
+
+ sel = &sunxi_nand->sels[cs];
+ ctl |= NFC_CE_SEL(sel->cs) | NFC_EN | NFC_PAGE_SHIFT(nand->page_shift);
+ if (sel->rb >= 0)
+ ctl |= NFC_RB_SEL(sel->rb);
+
+ writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA);
+
+ if (nfc->clk_rate != sunxi_nand->clk_rate) {
+ clk_set_rate(nfc->mod_clk, sunxi_nand->clk_rate);
+ nfc->clk_rate = sunxi_nand->clk_rate;
+ }
+
+ writel(sunxi_nand->timing_ctl, nfc->regs + NFC_REG_TIMING_CTL);
+ writel(sunxi_nand->timing_cfg, nfc->regs + NFC_REG_TIMING_CFG);
+ writel(ctl, nfc->regs + NFC_REG_CTL);
+}
+
+static void sunxi_nfc_read_buf(struct nand_chip *nand, uint8_t *buf, int len)
+{
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ int ret;
+ int cnt;
+ int offs = 0;
+ u32 tmp;
+
+ while (len > offs) {
+ bool poll = false;
+
+ cnt = min(len - offs, NFC_SRAM_SIZE);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ break;
+
+ writel(cnt, nfc->regs + NFC_REG_CNT);
+ tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD;
+ writel(tmp, nfc->regs + NFC_REG_CMD);
+
+ /* Arbitrary limit for polling mode */
+ if (cnt < 64)
+ poll = true;
+
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, poll, 0);
+ if (ret)
+ break;
+
+ if (buf)
+ memcpy_fromio(buf + offs, nfc->regs + NFC_RAM0_BASE,
+ cnt);
+ offs += cnt;
+ }
+}
+
+static void sunxi_nfc_write_buf(struct nand_chip *nand, const uint8_t *buf,
+ int len)
+{
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ int ret;
+ int cnt;
+ int offs = 0;
+ u32 tmp;
+
+ while (len > offs) {
+ bool poll = false;
+
+ cnt = min(len - offs, NFC_SRAM_SIZE);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ break;
+
+ writel(cnt, nfc->regs + NFC_REG_CNT);
+ memcpy_toio(nfc->regs + NFC_RAM0_BASE, buf + offs, cnt);
+ tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
+ NFC_ACCESS_DIR;
+ writel(tmp, nfc->regs + NFC_REG_CMD);
+
+ /* Arbitrary limit for polling mode */
+ if (cnt < 64)
+ poll = true;
+
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, poll, 0);
+ if (ret)
+ break;
+
+ offs += cnt;
+ }
+}
+
+/* These seed values have been extracted from Allwinner's BSP */
+static const u16 sunxi_nfc_randomizer_page_seeds[] = {
+ 0x2b75, 0x0bd0, 0x5ca3, 0x62d1, 0x1c93, 0x07e9, 0x2162, 0x3a72,
+ 0x0d67, 0x67f9, 0x1be7, 0x077d, 0x032f, 0x0dac, 0x2716, 0x2436,
+ 0x7922, 0x1510, 0x3860, 0x5287, 0x480f, 0x4252, 0x1789, 0x5a2d,
+ 0x2a49, 0x5e10, 0x437f, 0x4b4e, 0x2f45, 0x216e, 0x5cb7, 0x7130,
+ 0x2a3f, 0x60e4, 0x4dc9, 0x0ef0, 0x0f52, 0x1bb9, 0x6211, 0x7a56,
+ 0x226d, 0x4ea7, 0x6f36, 0x3692, 0x38bf, 0x0c62, 0x05eb, 0x4c55,
+ 0x60f4, 0x728c, 0x3b6f, 0x2037, 0x7f69, 0x0936, 0x651a, 0x4ceb,
+ 0x6218, 0x79f3, 0x383f, 0x18d9, 0x4f05, 0x5c82, 0x2912, 0x6f17,
+ 0x6856, 0x5938, 0x1007, 0x61ab, 0x3e7f, 0x57c2, 0x542f, 0x4f62,
+ 0x7454, 0x2eac, 0x7739, 0x42d4, 0x2f90, 0x435a, 0x2e52, 0x2064,
+ 0x637c, 0x66ad, 0x2c90, 0x0bad, 0x759c, 0x0029, 0x0986, 0x7126,
+ 0x1ca7, 0x1605, 0x386a, 0x27f5, 0x1380, 0x6d75, 0x24c3, 0x0f8e,
+ 0x2b7a, 0x1418, 0x1fd1, 0x7dc1, 0x2d8e, 0x43af, 0x2267, 0x7da3,
+ 0x4e3d, 0x1338, 0x50db, 0x454d, 0x764d, 0x40a3, 0x42e6, 0x262b,
+ 0x2d2e, 0x1aea, 0x2e17, 0x173d, 0x3a6e, 0x71bf, 0x25f9, 0x0a5d,
+ 0x7c57, 0x0fbe, 0x46ce, 0x4939, 0x6b17, 0x37bb, 0x3e91, 0x76db,
+};
+
+/*
+ * sunxi_nfc_randomizer_ecc512_seeds and sunxi_nfc_randomizer_ecc1024_seeds
+ * have been generated using
+ * sunxi_nfc_randomizer_step(seed, (step_size * 8) + 15), which is what
+ * the randomizer engine does internally before de/scrambling OOB data.
+ *
+ * Those tables are statically defined to avoid calculating randomizer state
+ * at runtime.
+ */
+static const u16 sunxi_nfc_randomizer_ecc512_seeds[] = {
+ 0x3346, 0x367f, 0x1f18, 0x769a, 0x4f64, 0x068c, 0x2ef1, 0x6b64,
+ 0x28a9, 0x15d7, 0x30f8, 0x3659, 0x53db, 0x7c5f, 0x71d4, 0x4409,
+ 0x26eb, 0x03cc, 0x655d, 0x47d4, 0x4daa, 0x0877, 0x712d, 0x3617,
+ 0x3264, 0x49aa, 0x7f9e, 0x588e, 0x4fbc, 0x7176, 0x7f91, 0x6c6d,
+ 0x4b95, 0x5fb7, 0x3844, 0x4037, 0x0184, 0x081b, 0x0ee8, 0x5b91,
+ 0x293d, 0x1f71, 0x0e6f, 0x402b, 0x5122, 0x1e52, 0x22be, 0x3d2d,
+ 0x75bc, 0x7c60, 0x6291, 0x1a2f, 0x61d4, 0x74aa, 0x4140, 0x29ab,
+ 0x472d, 0x2852, 0x017e, 0x15e8, 0x5ec2, 0x17cf, 0x7d0f, 0x06b8,
+ 0x117a, 0x6b94, 0x789b, 0x3126, 0x6ac5, 0x5be7, 0x150f, 0x51f8,
+ 0x7889, 0x0aa5, 0x663d, 0x77e8, 0x0b87, 0x3dcb, 0x360d, 0x218b,
+ 0x512f, 0x7dc9, 0x6a4d, 0x630a, 0x3547, 0x1dd2, 0x5aea, 0x69a5,
+ 0x7bfa, 0x5e4f, 0x1519, 0x6430, 0x3a0e, 0x5eb3, 0x5425, 0x0c7a,
+ 0x5540, 0x3670, 0x63c1, 0x31e9, 0x5a39, 0x2de7, 0x5979, 0x2891,
+ 0x1562, 0x014b, 0x5b05, 0x2756, 0x5a34, 0x13aa, 0x6cb5, 0x2c36,
+ 0x5e72, 0x1306, 0x0861, 0x15ef, 0x1ee8, 0x5a37, 0x7ac4, 0x45dd,
+ 0x44c4, 0x7266, 0x2f41, 0x3ccc, 0x045e, 0x7d40, 0x7c66, 0x0fa0,
+};
+
+static const u16 sunxi_nfc_randomizer_ecc1024_seeds[] = {
+ 0x2cf5, 0x35f1, 0x63a4, 0x5274, 0x2bd2, 0x778b, 0x7285, 0x32b6,
+ 0x6a5c, 0x70d6, 0x757d, 0x6769, 0x5375, 0x1e81, 0x0cf3, 0x3982,
+ 0x6787, 0x042a, 0x6c49, 0x1925, 0x56a8, 0x40a9, 0x063e, 0x7bd9,
+ 0x4dbf, 0x55ec, 0x672e, 0x7334, 0x5185, 0x4d00, 0x232a, 0x7e07,
+ 0x445d, 0x6b92, 0x528f, 0x4255, 0x53ba, 0x7d82, 0x2a2e, 0x3a4e,
+ 0x75eb, 0x450c, 0x6844, 0x1b5d, 0x581a, 0x4cc6, 0x0379, 0x37b2,
+ 0x419f, 0x0e92, 0x6b27, 0x5624, 0x01e3, 0x07c1, 0x44a5, 0x130c,
+ 0x13e8, 0x5910, 0x0876, 0x60c5, 0x54e3, 0x5b7f, 0x2269, 0x509f,
+ 0x7665, 0x36fd, 0x3e9a, 0x0579, 0x6295, 0x14ef, 0x0a81, 0x1bcc,
+ 0x4b16, 0x64db, 0x0514, 0x4f07, 0x0591, 0x3576, 0x6853, 0x0d9e,
+ 0x259f, 0x38b7, 0x64fb, 0x3094, 0x4693, 0x6ddd, 0x29bb, 0x0bc8,
+ 0x3f47, 0x490e, 0x0c0e, 0x7933, 0x3c9e, 0x5840, 0x398d, 0x3e68,
+ 0x4af1, 0x71f5, 0x57cf, 0x1121, 0x64eb, 0x3579, 0x15ac, 0x584d,
+ 0x5f2a, 0x47e2, 0x6528, 0x6eac, 0x196e, 0x6b96, 0x0450, 0x0179,
+ 0x609c, 0x06e1, 0x4626, 0x42c7, 0x273e, 0x486f, 0x0705, 0x1601,
+ 0x145b, 0x407e, 0x062b, 0x57a5, 0x53f9, 0x5659, 0x4410, 0x3ccd,
+};
+
+static u16 sunxi_nfc_randomizer_step(u16 state, int count)
+{
+ state &= 0x7fff;
+
+ /*
+ * This loop is just a simple implementation of a Fibonacci LFSR using
+ * the x16 + x15 + 1 polynomial.
+ */
+ while (count--)
+ state = ((state >> 1) |
+ (((state ^ (state >> 1)) & 1) << 14)) & 0x7fff;
+
+ return state;
+}
+
+static u16 sunxi_nfc_randomizer_state(struct nand_chip *nand, int page,
+ bool ecc)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ const u16 *seeds = sunxi_nfc_randomizer_page_seeds;
+ int mod = mtd_div_by_ws(mtd->erasesize, mtd);
+
+ if (mod > ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds))
+ mod = ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds);
+
+ if (ecc) {
+ if (mtd->ecc_step_size == 512)
+ seeds = sunxi_nfc_randomizer_ecc512_seeds;
+ else
+ seeds = sunxi_nfc_randomizer_ecc1024_seeds;
+ }
+
+ return seeds[page % mod];
+}
+
+static void sunxi_nfc_randomizer_config(struct nand_chip *nand, int page,
+ bool ecc)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ u32 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
+ u16 state;
+
+ if (!(nand->options & NAND_NEED_SCRAMBLING))
+ return;
+
+ ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
+ state = sunxi_nfc_randomizer_state(nand, page, ecc);
+ ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_SEED_MSK;
+ writel(ecc_ctl | NFC_RANDOM_SEED(state), nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static void sunxi_nfc_randomizer_enable(struct nand_chip *nand)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+ if (!(nand->options & NAND_NEED_SCRAMBLING))
+ return;
+
+ writel(readl(nfc->regs + NFC_REG_ECC_CTL) | NFC_RANDOM_EN,
+ nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static void sunxi_nfc_randomizer_disable(struct nand_chip *nand)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+ if (!(nand->options & NAND_NEED_SCRAMBLING))
+ return;
+
+ writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_EN,
+ nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static void sunxi_nfc_randomize_bbm(struct nand_chip *nand, int page, u8 *bbm)
+{
+ u16 state = sunxi_nfc_randomizer_state(nand, page, true);
+
+ bbm[0] ^= state;
+ bbm[1] ^= sunxi_nfc_randomizer_step(state, 8);
+}
+
+static void sunxi_nfc_randomizer_write_buf(struct nand_chip *nand,
+ const uint8_t *buf, int len,
+ bool ecc, int page)
+{
+ sunxi_nfc_randomizer_config(nand, page, ecc);
+ sunxi_nfc_randomizer_enable(nand);
+ sunxi_nfc_write_buf(nand, buf, len);
+ sunxi_nfc_randomizer_disable(nand);
+}
+
+static void sunxi_nfc_randomizer_read_buf(struct nand_chip *nand, uint8_t *buf,
+ int len, bool ecc, int page)
+{
+ sunxi_nfc_randomizer_config(nand, page, ecc);
+ sunxi_nfc_randomizer_enable(nand);
+ sunxi_nfc_read_buf(nand, buf, len);
+ sunxi_nfc_randomizer_disable(nand);
+}
+
+static void sunxi_nfc_hw_ecc_enable(struct nand_chip *nand)
+{
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+ writel(sunxi_nand->ecc.ecc_ctl, nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static void sunxi_nfc_hw_ecc_disable(struct nand_chip *nand)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+ writel(0, nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static inline void sunxi_nfc_user_data_to_buf(u32 user_data, u8 *buf)
+{
+ buf[0] = user_data;
+ buf[1] = user_data >> 8;
+ buf[2] = user_data >> 16;
+ buf[3] = user_data >> 24;
+}
+
+static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf)
+{
+ return buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
+}
+
+static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct nand_chip *nand, u8 *oob,
+ int step, bool bbm, int page)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+ sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(step)),
+ oob);
+
+ /* De-randomize the Bad Block Marker. */
+ if (bbm && (nand->options & NAND_NEED_SCRAMBLING))
+ sunxi_nfc_randomize_bbm(nand, page, oob);
+}
+
+static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct nand_chip *nand,
+ const u8 *oob, int step,
+ bool bbm, int page)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ u8 user_data[4];
+
+ /* Randomize the Bad Block Marker. */
+ if (bbm && (nand->options & NAND_NEED_SCRAMBLING)) {
+ memcpy(user_data, oob, sizeof(user_data));
+ sunxi_nfc_randomize_bbm(nand, page, user_data);
+ oob = user_data;
+ }
+
+ writel(sunxi_nfc_buf_to_user_data(oob),
+ nfc->regs + NFC_REG_USER_DATA(step));
+}
+
+static void sunxi_nfc_hw_ecc_update_stats(struct nand_chip *nand,
+ unsigned int *max_bitflips, int ret)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+
+ if (ret < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += ret;
+ *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
+ }
+}
+
+static int sunxi_nfc_hw_ecc_correct(struct nand_chip *nand, u8 *data, u8 *oob,
+ int step, u32 status, bool *erased)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ u32 tmp;
+
+ *erased = false;
+
+ if (status & NFC_ECC_ERR(step))
+ return -EBADMSG;
+
+ if (status & NFC_ECC_PAT_FOUND(step)) {
+ u8 pattern;
+
+ if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID) & 0x1))) {
+ pattern = 0x0;
+ } else {
+ pattern = 0xff;
+ *erased = true;
+ }
+
+ if (data)
+ memset(data, pattern, ecc->size);
+
+ if (oob)
+ memset(oob, pattern, ecc->bytes + 4);
+
+ return 0;
+ }
+
+ tmp = readl(nfc->regs + NFC_REG_ECC_ERR_CNT(step));
+
+ return NFC_ECC_ERR_CNT(step, tmp);
+}
+
+static int sunxi_nfc_hw_ecc_read_chunk(struct nand_chip *nand,
+ u8 *data, int data_off,
+ u8 *oob, int oob_off,
+ int *cur_off,
+ unsigned int *max_bitflips,
+ bool bbm, bool oob_required, int page)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ int raw_mode = 0;
+ bool erased;
+ int ret;
+
+ if (*cur_off != data_off)
+ nand_change_read_column_op(nand, data_off, NULL, 0, false);
+
+ sunxi_nfc_randomizer_read_buf(nand, NULL, ecc->size, false, page);
+
+ if (data_off + ecc->size != oob_off)
+ nand_change_read_column_op(nand, oob_off, NULL, 0, false);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
+
+ sunxi_nfc_randomizer_enable(nand);
+ writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
+ nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
+ sunxi_nfc_randomizer_disable(nand);
+ if (ret)
+ return ret;
+
+ *cur_off = oob_off + ecc->bytes + 4;
+
+ ret = sunxi_nfc_hw_ecc_correct(nand, data, oob_required ? oob : NULL, 0,
+ readl(nfc->regs + NFC_REG_ECC_ST),
+ &erased);
+ if (erased)
+ return 1;
+
+ if (ret < 0) {
+ /*
+ * Re-read the data with the randomizer disabled to identify
+ * bitflips in erased pages.
+ */
+ if (nand->options & NAND_NEED_SCRAMBLING)
+ nand_change_read_column_op(nand, data_off, data,
+ ecc->size, false);
+ else
+ memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE,
+ ecc->size);
+
+ nand_change_read_column_op(nand, oob_off, oob, ecc->bytes + 4,
+ false);
+
+ ret = nand_check_erased_ecc_chunk(data, ecc->size,
+ oob, ecc->bytes + 4,
+ NULL, 0, ecc->strength);
+ if (ret >= 0)
+ raw_mode = 1;
+ } else {
+ memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size);
+
+ if (oob_required) {
+ nand_change_read_column_op(nand, oob_off, NULL, 0,
+ false);
+ sunxi_nfc_randomizer_read_buf(nand, oob, ecc->bytes + 4,
+ true, page);
+
+ sunxi_nfc_hw_ecc_get_prot_oob_bytes(nand, oob, 0,
+ bbm, page);
+ }
+ }
+
+ sunxi_nfc_hw_ecc_update_stats(nand, max_bitflips, ret);
+
+ return raw_mode;
+}
+
+static void sunxi_nfc_hw_ecc_read_extra_oob(struct nand_chip *nand,
+ u8 *oob, int *cur_off,
+ bool randomize, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ int offset = ((ecc->bytes + 4) * ecc->steps);
+ int len = mtd->oobsize - offset;
+
+ if (len <= 0)
+ return;
+
+ if (!cur_off || *cur_off != offset)
+ nand_change_read_column_op(nand, mtd->writesize, NULL, 0,
+ false);
+
+ if (!randomize)
+ sunxi_nfc_read_buf(nand, oob + offset, len);
+ else
+ sunxi_nfc_randomizer_read_buf(nand, oob + offset, len,
+ false, page);
+
+ if (cur_off)
+ *cur_off = mtd->oobsize + mtd->writesize;
+}
+
+static int sunxi_nfc_hw_ecc_read_chunks_dma(struct nand_chip *nand, uint8_t *buf,
+ int oob_required, int page,
+ int nchunks)
+{
+ bool randomized = nand->options & NAND_NEED_SCRAMBLING;
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ unsigned int max_bitflips = 0;
+ int ret, i, raw_mode = 0;
+ struct scatterlist sg;
+ u32 status, wait;
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
+
+ ret = sunxi_nfc_dma_op_prepare(nfc, buf, ecc->size, nchunks,
+ DMA_FROM_DEVICE, &sg);
+ if (ret)
+ return ret;
+
+ sunxi_nfc_hw_ecc_enable(nand);
+ sunxi_nfc_randomizer_config(nand, page, false);
+ sunxi_nfc_randomizer_enable(nand);
+
+ writel((NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) |
+ NAND_CMD_READSTART, nfc->regs + NFC_REG_RCMD_SET);
+
+ wait = NFC_CMD_INT_FLAG;
+
+ if (nfc->caps->has_mdma)
+ wait |= NFC_DMA_INT_FLAG;
+ else
+ dma_async_issue_pending(nfc->dmac);
+
+ writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | NFC_DATA_TRANS,
+ nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_events(nfc, wait, false, 0);
+ if (ret && !nfc->caps->has_mdma)
+ dmaengine_terminate_all(nfc->dmac);
+
+ sunxi_nfc_randomizer_disable(nand);
+ sunxi_nfc_hw_ecc_disable(nand);
+
+ sunxi_nfc_dma_op_cleanup(nfc, DMA_FROM_DEVICE, &sg);
+
+ if (ret)
+ return ret;
+
+ status = readl(nfc->regs + NFC_REG_ECC_ST);
+
+ for (i = 0; i < nchunks; i++) {
+ int data_off = i * ecc->size;
+ int oob_off = i * (ecc->bytes + 4);
+ u8 *data = buf + data_off;
+ u8 *oob = nand->oob_poi + oob_off;
+ bool erased;
+
+ ret = sunxi_nfc_hw_ecc_correct(nand, randomized ? data : NULL,
+ oob_required ? oob : NULL,
+ i, status, &erased);
+
+ /* ECC errors are handled in the second loop. */
+ if (ret < 0)
+ continue;
+
+ if (oob_required && !erased) {
+ /* TODO: use DMA to retrieve OOB */
+ nand_change_read_column_op(nand,
+ mtd->writesize + oob_off,
+ oob, ecc->bytes + 4, false);
+
+ sunxi_nfc_hw_ecc_get_prot_oob_bytes(nand, oob, i,
+ !i, page);
+ }
+
+ if (erased)
+ raw_mode = 1;
+
+ sunxi_nfc_hw_ecc_update_stats(nand, &max_bitflips, ret);
+ }
+
+ if (status & NFC_ECC_ERR_MSK) {
+ for (i = 0; i < nchunks; i++) {
+ int data_off = i * ecc->size;
+ int oob_off = i * (ecc->bytes + 4);
+ u8 *data = buf + data_off;
+ u8 *oob = nand->oob_poi + oob_off;
+
+ if (!(status & NFC_ECC_ERR(i)))
+ continue;
+
+ /*
+ * Re-read the data with the randomizer disabled to
+ * identify bitflips in erased pages.
+ * TODO: use DMA to read page in raw mode
+ */
+ if (randomized)
+ nand_change_read_column_op(nand, data_off,
+ data, ecc->size,
+ false);
+
+ /* TODO: use DMA to retrieve OOB */
+ nand_change_read_column_op(nand,
+ mtd->writesize + oob_off,
+ oob, ecc->bytes + 4, false);
+
+ ret = nand_check_erased_ecc_chunk(data, ecc->size,
+ oob, ecc->bytes + 4,
+ NULL, 0,
+ ecc->strength);
+ if (ret >= 0)
+ raw_mode = 1;
+
+ sunxi_nfc_hw_ecc_update_stats(nand, &max_bitflips, ret);
+ }
+ }
+
+ if (oob_required)
+ sunxi_nfc_hw_ecc_read_extra_oob(nand, nand->oob_poi,
+ NULL, !raw_mode,
+ page);
+
+ return max_bitflips;
+}
+
+static int sunxi_nfc_hw_ecc_write_chunk(struct nand_chip *nand,
+ const u8 *data, int data_off,
+ const u8 *oob, int oob_off,
+ int *cur_off, bool bbm,
+ int page)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ int ret;
+
+ if (data_off != *cur_off)
+ nand_change_write_column_op(nand, data_off, NULL, 0, false);
+
+ sunxi_nfc_randomizer_write_buf(nand, data, ecc->size, false, page);
+
+ if (data_off + ecc->size != oob_off)
+ nand_change_write_column_op(nand, oob_off, NULL, 0, false);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
+
+ sunxi_nfc_randomizer_enable(nand);
+ sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, 0, bbm, page);
+
+ writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
+ NFC_ACCESS_DIR | NFC_ECC_OP,
+ nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
+ sunxi_nfc_randomizer_disable(nand);
+ if (ret)
+ return ret;
+
+ *cur_off = oob_off + ecc->bytes + 4;
+
+ return 0;
+}
+
+static void sunxi_nfc_hw_ecc_write_extra_oob(struct nand_chip *nand,
+ u8 *oob, int *cur_off,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ int offset = ((ecc->bytes + 4) * ecc->steps);
+ int len = mtd->oobsize - offset;
+
+ if (len <= 0)
+ return;
+
+ if (!cur_off || *cur_off != offset)
+ nand_change_write_column_op(nand, offset + mtd->writesize,
+ NULL, 0, false);
+
+ sunxi_nfc_randomizer_write_buf(nand, oob + offset, len, false, page);
+
+ if (cur_off)
+ *cur_off = mtd->oobsize + mtd->writesize;
+}
+
+static int sunxi_nfc_hw_ecc_read_page(struct nand_chip *nand, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ unsigned int max_bitflips = 0;
+ int ret, i, cur_off = 0;
+ bool raw_mode = false;
+
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
+
+ nand_read_page_op(nand, page, 0, NULL, 0);
+
+ sunxi_nfc_hw_ecc_enable(nand);
+
+ for (i = 0; i < ecc->steps; i++) {
+ int data_off = i * ecc->size;
+ int oob_off = i * (ecc->bytes + 4);
+ u8 *data = buf + data_off;
+ u8 *oob = nand->oob_poi + oob_off;
+
+ ret = sunxi_nfc_hw_ecc_read_chunk(nand, data, data_off, oob,
+ oob_off + mtd->writesize,
+ &cur_off, &max_bitflips,
+ !i, oob_required, page);
+ if (ret < 0)
+ return ret;
+ else if (ret)
+ raw_mode = true;
+ }
+
+ if (oob_required)
+ sunxi_nfc_hw_ecc_read_extra_oob(nand, nand->oob_poi, &cur_off,
+ !raw_mode, page);
+
+ sunxi_nfc_hw_ecc_disable(nand);
+
+ return max_bitflips;
+}
+
+static int sunxi_nfc_hw_ecc_read_page_dma(struct nand_chip *nand, u8 *buf,
+ int oob_required, int page)
+{
+ int ret;
+
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
+
+ nand_read_page_op(nand, page, 0, NULL, 0);
+
+ ret = sunxi_nfc_hw_ecc_read_chunks_dma(nand, buf, oob_required, page,
+ nand->ecc.steps);
+ if (ret >= 0)
+ return ret;
+
+ /* Fallback to PIO mode */
+ return sunxi_nfc_hw_ecc_read_page(nand, buf, oob_required, page);
+}
+
+static int sunxi_nfc_hw_ecc_read_subpage(struct nand_chip *nand,
+ u32 data_offs, u32 readlen,
+ u8 *bufpoi, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ int ret, i, cur_off = 0;
+ unsigned int max_bitflips = 0;
+
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
+
+ nand_read_page_op(nand, page, 0, NULL, 0);
+
+ sunxi_nfc_hw_ecc_enable(nand);
+
+ for (i = data_offs / ecc->size;
+ i < DIV_ROUND_UP(data_offs + readlen, ecc->size); i++) {
+ int data_off = i * ecc->size;
+ int oob_off = i * (ecc->bytes + 4);
+ u8 *data = bufpoi + data_off;
+ u8 *oob = nand->oob_poi + oob_off;
+
+ ret = sunxi_nfc_hw_ecc_read_chunk(nand, data, data_off,
+ oob,
+ oob_off + mtd->writesize,
+ &cur_off, &max_bitflips, !i,
+ false, page);
+ if (ret < 0)
+ return ret;
+ }
+
+ sunxi_nfc_hw_ecc_disable(nand);
+
+ return max_bitflips;
+}
+
+static int sunxi_nfc_hw_ecc_read_subpage_dma(struct nand_chip *nand,
+ u32 data_offs, u32 readlen,
+ u8 *buf, int page)
+{
+ int nchunks = DIV_ROUND_UP(data_offs + readlen, nand->ecc.size);
+ int ret;
+
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
+
+ nand_read_page_op(nand, page, 0, NULL, 0);
+
+ ret = sunxi_nfc_hw_ecc_read_chunks_dma(nand, buf, false, page, nchunks);
+ if (ret >= 0)
+ return ret;
+
+ /* Fallback to PIO mode */
+ return sunxi_nfc_hw_ecc_read_subpage(nand, data_offs, readlen,
+ buf, page);
+}
+
+static int sunxi_nfc_hw_ecc_write_page(struct nand_chip *nand,
+ const uint8_t *buf, int oob_required,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ int ret, i, cur_off = 0;
+
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
+
+ nand_prog_page_begin_op(nand, page, 0, NULL, 0);
+
+ sunxi_nfc_hw_ecc_enable(nand);
+
+ for (i = 0; i < ecc->steps; i++) {
+ int data_off = i * ecc->size;
+ int oob_off = i * (ecc->bytes + 4);
+ const u8 *data = buf + data_off;
+ const u8 *oob = nand->oob_poi + oob_off;
+
+ ret = sunxi_nfc_hw_ecc_write_chunk(nand, data, data_off, oob,
+ oob_off + mtd->writesize,
+ &cur_off, !i, page);
+ if (ret)
+ return ret;
+ }
+
+ if (oob_required || (nand->options & NAND_NEED_SCRAMBLING))
+ sunxi_nfc_hw_ecc_write_extra_oob(nand, nand->oob_poi,
+ &cur_off, page);
+
+ sunxi_nfc_hw_ecc_disable(nand);
+
+ return nand_prog_page_end_op(nand);
+}
+
+static int sunxi_nfc_hw_ecc_write_subpage(struct nand_chip *nand,
+ u32 data_offs, u32 data_len,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ int ret, i, cur_off = 0;
+
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
+
+ nand_prog_page_begin_op(nand, page, 0, NULL, 0);
+
+ sunxi_nfc_hw_ecc_enable(nand);
+
+ for (i = data_offs / ecc->size;
+ i < DIV_ROUND_UP(data_offs + data_len, ecc->size); i++) {
+ int data_off = i * ecc->size;
+ int oob_off = i * (ecc->bytes + 4);
+ const u8 *data = buf + data_off;
+ const u8 *oob = nand->oob_poi + oob_off;
+
+ ret = sunxi_nfc_hw_ecc_write_chunk(nand, data, data_off, oob,
+ oob_off + mtd->writesize,
+ &cur_off, !i, page);
+ if (ret)
+ return ret;
+ }
+
+ sunxi_nfc_hw_ecc_disable(nand);
+
+ return nand_prog_page_end_op(nand);
+}
+
+static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *nand,
+ const u8 *buf,
+ int oob_required,
+ int page)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ struct scatterlist sg;
+ u32 wait;
+ int ret, i;
+
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
+
+ ret = sunxi_nfc_dma_op_prepare(nfc, buf, ecc->size, ecc->steps,
+ DMA_TO_DEVICE, &sg);
+ if (ret)
+ goto pio_fallback;
+
+ for (i = 0; i < ecc->steps; i++) {
+ const u8 *oob = nand->oob_poi + (i * (ecc->bytes + 4));
+
+ sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, i, !i, page);
+ }
+
+ nand_prog_page_begin_op(nand, page, 0, NULL, 0);
+
+ sunxi_nfc_hw_ecc_enable(nand);
+ sunxi_nfc_randomizer_config(nand, page, false);
+ sunxi_nfc_randomizer_enable(nand);
+
+ writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG,
+ nfc->regs + NFC_REG_WCMD_SET);
+
+ wait = NFC_CMD_INT_FLAG;
+
+ if (nfc->caps->has_mdma)
+ wait |= NFC_DMA_INT_FLAG;
+ else
+ dma_async_issue_pending(nfc->dmac);
+
+ writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD |
+ NFC_DATA_TRANS | NFC_ACCESS_DIR,
+ nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_events(nfc, wait, false, 0);
+ if (ret && !nfc->caps->has_mdma)
+ dmaengine_terminate_all(nfc->dmac);
+
+ sunxi_nfc_randomizer_disable(nand);
+ sunxi_nfc_hw_ecc_disable(nand);
+
+ sunxi_nfc_dma_op_cleanup(nfc, DMA_TO_DEVICE, &sg);
+
+ if (ret)
+ return ret;
+
+ if (oob_required || (nand->options & NAND_NEED_SCRAMBLING))
+ /* TODO: use DMA to transfer extra OOB bytes ? */
+ sunxi_nfc_hw_ecc_write_extra_oob(nand, nand->oob_poi,
+ NULL, page);
+
+ return nand_prog_page_end_op(nand);
+
+pio_fallback:
+ return sunxi_nfc_hw_ecc_write_page(nand, buf, oob_required, page);
+}
+
+static int sunxi_nfc_hw_ecc_read_oob(struct nand_chip *nand, int page)
+{
+ u8 *buf = nand_get_data_buf(nand);
+
+ return nand->ecc.read_page(nand, buf, 1, page);
+}
+
+static int sunxi_nfc_hw_ecc_write_oob(struct nand_chip *nand, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ u8 *buf = nand_get_data_buf(nand);
+ int ret;
+
+ memset(buf, 0xff, mtd->writesize);
+ ret = nand->ecc.write_page(nand, buf, 1, page);
+ if (ret)
+ return ret;
+
+ /* Send command to program the OOB data */
+ return nand_prog_page_end_op(nand);
+}
+
+static const s32 tWB_lut[] = {6, 12, 16, 20};
+static const s32 tRHW_lut[] = {4, 8, 12, 20};
+
+static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration,
+ u32 clk_period)
+{
+ u32 clk_cycles = DIV_ROUND_UP(duration, clk_period);
+ int i;
+
+ for (i = 0; i < lut_size; i++) {
+ if (clk_cycles <= lut[i])
+ return i;
+ }
+
+ /* Doesn't fit */
+ return -EINVAL;
+}
+
+#define sunxi_nand_lookup_timing(l, p, c) \
+ _sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c)
+
+static int sunxi_nfc_setup_interface(struct nand_chip *nand, int csline,
+ const struct nand_interface_config *conf)
+{
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ const struct nand_sdr_timings *timings;
+ u32 min_clk_period = 0;
+ s32 tWB, tADL, tWHR, tRHW, tCAD;
+ long real_clk_rate;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return -ENOTSUPP;
+
+ /* T1 <=> tCLS */
+ if (timings->tCLS_min > min_clk_period)
+ min_clk_period = timings->tCLS_min;
+
+ /* T2 <=> tCLH */
+ if (timings->tCLH_min > min_clk_period)
+ min_clk_period = timings->tCLH_min;
+
+ /* T3 <=> tCS */
+ if (timings->tCS_min > min_clk_period)
+ min_clk_period = timings->tCS_min;
+
+ /* T4 <=> tCH */
+ if (timings->tCH_min > min_clk_period)
+ min_clk_period = timings->tCH_min;
+
+ /* T5 <=> tWP */
+ if (timings->tWP_min > min_clk_period)
+ min_clk_period = timings->tWP_min;
+
+ /* T6 <=> tWH */
+ if (timings->tWH_min > min_clk_period)
+ min_clk_period = timings->tWH_min;
+
+ /* T7 <=> tALS */
+ if (timings->tALS_min > min_clk_period)
+ min_clk_period = timings->tALS_min;
+
+ /* T8 <=> tDS */
+ if (timings->tDS_min > min_clk_period)
+ min_clk_period = timings->tDS_min;
+
+ /* T9 <=> tDH */
+ if (timings->tDH_min > min_clk_period)
+ min_clk_period = timings->tDH_min;
+
+ /* T10 <=> tRR */
+ if (timings->tRR_min > (min_clk_period * 3))
+ min_clk_period = DIV_ROUND_UP(timings->tRR_min, 3);
+
+ /* T11 <=> tALH */
+ if (timings->tALH_min > min_clk_period)
+ min_clk_period = timings->tALH_min;
+
+ /* T12 <=> tRP */
+ if (timings->tRP_min > min_clk_period)
+ min_clk_period = timings->tRP_min;
+
+ /* T13 <=> tREH */
+ if (timings->tREH_min > min_clk_period)
+ min_clk_period = timings->tREH_min;
+
+ /* T14 <=> tRC */
+ if (timings->tRC_min > (min_clk_period * 2))
+ min_clk_period = DIV_ROUND_UP(timings->tRC_min, 2);
+
+ /* T15 <=> tWC */
+ if (timings->tWC_min > (min_clk_period * 2))
+ min_clk_period = DIV_ROUND_UP(timings->tWC_min, 2);
+
+ /* T16 - T19 + tCAD */
+ if (timings->tWB_max > (min_clk_period * 20))
+ min_clk_period = DIV_ROUND_UP(timings->tWB_max, 20);
+
+ if (timings->tADL_min > (min_clk_period * 32))
+ min_clk_period = DIV_ROUND_UP(timings->tADL_min, 32);
+
+ if (timings->tWHR_min > (min_clk_period * 32))
+ min_clk_period = DIV_ROUND_UP(timings->tWHR_min, 32);
+
+ if (timings->tRHW_min > (min_clk_period * 20))
+ min_clk_period = DIV_ROUND_UP(timings->tRHW_min, 20);
+
+ /*
+ * In non-EDO, tREA should be less than tRP to guarantee that the
+ * controller does not sample the IO lines too early. Unfortunately,
+ * the sunxi NAND controller does not allow us to have different
+ * values for tRP and tREH (tRP = tREH = tRW / 2).
+ *
+ * We have 2 options to overcome this limitation:
+ *
+ * 1/ Extend tRC to fulfil the tREA <= tRC / 2 constraint
+ * 2/ Use EDO mode (only works if timings->tRLOH > 0)
+ */
+ if (timings->tREA_max > min_clk_period && !timings->tRLOH_min)
+ min_clk_period = timings->tREA_max;
+
+ tWB = sunxi_nand_lookup_timing(tWB_lut, timings->tWB_max,
+ min_clk_period);
+ if (tWB < 0) {
+ dev_err(nfc->dev, "unsupported tWB\n");
+ return tWB;
+ }
+
+ tADL = DIV_ROUND_UP(timings->tADL_min, min_clk_period) >> 3;
+ if (tADL > 3) {
+ dev_err(nfc->dev, "unsupported tADL\n");
+ return -EINVAL;
+ }
+
+ tWHR = DIV_ROUND_UP(timings->tWHR_min, min_clk_period) >> 3;
+ if (tWHR > 3) {
+ dev_err(nfc->dev, "unsupported tWHR\n");
+ return -EINVAL;
+ }
+
+ tRHW = sunxi_nand_lookup_timing(tRHW_lut, timings->tRHW_min,
+ min_clk_period);
+ if (tRHW < 0) {
+ dev_err(nfc->dev, "unsupported tRHW\n");
+ return tRHW;
+ }
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ /*
+ * TODO: according to ONFI specs this value only applies for DDR NAND,
+ * but Allwinner seems to set this to 0x7. Mimic them for now.
+ */
+ tCAD = 0x7;
+
+ /* TODO: A83 has some more bits for CDQSS, CS, CLHZ, CCS, WC */
+ sunxi_nand->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD);
+
+ /* Convert min_clk_period from picoseconds to nanoseconds */
+ min_clk_period = DIV_ROUND_UP(min_clk_period, 1000);
+
+ /*
+ * Unlike what is stated in Allwinner datasheet, the clk_rate should
+ * be set to (1 / min_clk_period), and not (2 / min_clk_period).
+ * This new formula was verified with a scope and validated by
+ * Allwinner engineers.
+ */
+ sunxi_nand->clk_rate = NSEC_PER_SEC / min_clk_period;
+ real_clk_rate = clk_round_rate(nfc->mod_clk, sunxi_nand->clk_rate);
+ if (real_clk_rate <= 0) {
+ dev_err(nfc->dev, "Unable to round clk %lu\n",
+ sunxi_nand->clk_rate);
+ return -EINVAL;
+ }
+
+ sunxi_nand->timing_ctl = 0;
+
+ /*
+ * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data
+ * output cycle timings shall be used if the host drives tRC less than
+ * 30 ns. We should also use EDO mode if tREA is bigger than tRP.
+ */
+ min_clk_period = NSEC_PER_SEC / real_clk_rate;
+ if (min_clk_period * 2 < 30 || min_clk_period * 1000 < timings->tREA_max)
+ sunxi_nand->timing_ctl = NFC_TIMING_CTL_EDO;
+
+ return 0;
+}
+
+static int sunxi_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+
+ if (section >= ecc->steps)
+ return -ERANGE;
+
+ oobregion->offset = section * (ecc->bytes + 4) + 4;
+ oobregion->length = ecc->bytes;
+
+ return 0;
+}
+
+static int sunxi_nand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+
+ if (section > ecc->steps)
+ return -ERANGE;
+
+ /*
+ * The first 2 bytes are used for BB markers, hence we
+ * only have 2 bytes available in the first user data
+ * section.
+ */
+ if (!section && ecc->engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
+ oobregion->offset = 2;
+ oobregion->length = 2;
+
+ return 0;
+ }
+
+ /*
+ * The controller does not provide access to OOB bytes
+ * past the end of the ECC data.
+ */
+ if (section == ecc->steps && ecc->engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return -ERANGE;
+
+ oobregion->offset = section * (ecc->bytes + 4);
+
+ if (section < ecc->steps)
+ oobregion->length = 4;
+ else
+ oobregion->length = mtd->oobsize - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops sunxi_nand_ooblayout_ops = {
+ .ecc = sunxi_nand_ooblayout_ecc,
+ .free = sunxi_nand_ooblayout_free,
+};
+
+static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand,
+ struct nand_ecc_ctrl *ecc,
+ struct device_node *np)
+{
+ static const u8 strengths[] = { 16, 24, 28, 32, 40, 48, 56, 60, 64 };
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_device *nanddev = mtd_to_nanddev(mtd);
+ int nsectors;
+ int i;
+
+ if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH) {
+ int bytes;
+
+ ecc->size = 1024;
+ nsectors = mtd->writesize / ecc->size;
+
+ /* Reserve 2 bytes for the BBM */
+ bytes = (mtd->oobsize - 2) / nsectors;
+
+ /* 4 non-ECC bytes are added before each ECC bytes section */
+ bytes -= 4;
+
+ /* and bytes has to be even. */
+ if (bytes % 2)
+ bytes--;
+
+ ecc->strength = bytes * 8 / fls(8 * ecc->size);
+
+ for (i = 0; i < ARRAY_SIZE(strengths); i++) {
+ if (strengths[i] > ecc->strength)
+ break;
+ }
+
+ if (!i)
+ ecc->strength = 0;
+ else
+ ecc->strength = strengths[i - 1];
+ }
+
+ if (ecc->size != 512 && ecc->size != 1024)
+ return -EINVAL;
+
+ /* Prefer 1k ECC chunk over 512 ones */
+ if (ecc->size == 512 && mtd->writesize > 512) {
+ ecc->size = 1024;
+ ecc->strength *= 2;
+ }
+
+ /* Add ECC info retrieval from DT */
+ for (i = 0; i < ARRAY_SIZE(strengths); i++) {
+ if (ecc->strength <= strengths[i]) {
+ /*
+ * Update ecc->strength value with the actual strength
+ * that will be used by the ECC engine.
+ */
+ ecc->strength = strengths[i];
+ break;
+ }
+ }
+
+ if (i >= ARRAY_SIZE(strengths)) {
+ dev_err(nfc->dev, "unsupported strength\n");
+ return -ENOTSUPP;
+ }
+
+ /* HW ECC always request ECC bytes for 1024 bytes blocks */
+ ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * 1024), 8);
+
+ /* HW ECC always work with even numbers of ECC bytes */
+ ecc->bytes = ALIGN(ecc->bytes, 2);
+
+ nsectors = mtd->writesize / ecc->size;
+
+ if (mtd->oobsize < ((ecc->bytes + 4) * nsectors))
+ return -EINVAL;
+
+ ecc->read_oob = sunxi_nfc_hw_ecc_read_oob;
+ ecc->write_oob = sunxi_nfc_hw_ecc_write_oob;
+ mtd_set_ooblayout(mtd, &sunxi_nand_ooblayout_ops);
+
+ if (nfc->dmac || nfc->caps->has_mdma) {
+ ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma;
+ ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma;
+ ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma;
+ nand->options |= NAND_USES_DMA;
+ } else {
+ ecc->read_page = sunxi_nfc_hw_ecc_read_page;
+ ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
+ ecc->write_page = sunxi_nfc_hw_ecc_write_page;
+ }
+
+ /* TODO: support DMA for raw accesses and subpage write */
+ ecc->write_subpage = sunxi_nfc_hw_ecc_write_subpage;
+ ecc->read_oob_raw = nand_read_oob_std;
+ ecc->write_oob_raw = nand_write_oob_std;
+
+ sunxi_nand->ecc.ecc_ctl = NFC_ECC_MODE(i) | NFC_ECC_EXCEPTION |
+ NFC_ECC_PIPELINE | NFC_ECC_EN;
+
+ if (ecc->size == 512)
+ sunxi_nand->ecc.ecc_ctl |= NFC_ECC_BLOCK_512;
+
+ return 0;
+}
+
+static int sunxi_nand_attach_chip(struct nand_chip *nand)
+{
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&nand->base);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ struct device_node *np = nand_get_flash_node(nand);
+ int ret;
+
+ if (nand->bbt_options & NAND_BBT_USE_FLASH)
+ nand->bbt_options |= NAND_BBT_NO_OOB;
+
+ if (nand->options & NAND_NEED_SCRAMBLING)
+ nand->options |= NAND_NO_SUBPAGE_WRITE;
+
+ nand->options |= NAND_SUBPAGE_READ;
+
+ if (!ecc->size) {
+ ecc->size = requirements->step_size;
+ ecc->strength = requirements->strength;
+ }
+
+ if (!ecc->size || !ecc->strength)
+ return -EINVAL;
+
+ switch (ecc->engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ ret = sunxi_nand_hw_ecc_ctrl_init(nand, ecc, np);
+ if (ret)
+ return ret;
+ break;
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sunxi_nfc_exec_subop(struct nand_chip *nand,
+ const struct nand_subop *subop)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ u32 cmd = 0, extcmd = 0, cnt = 0, addrs[2] = { };
+ unsigned int i, j, remaining, start;
+ void *inbuf = NULL;
+ int ret;
+
+ for (i = 0; i < subop->ninstrs; i++) {
+ const struct nand_op_instr *instr = &subop->instrs[i];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (cmd & NFC_SEND_CMD1) {
+ if (WARN_ON(cmd & NFC_SEND_CMD2))
+ return -EINVAL;
+
+ cmd |= NFC_SEND_CMD2;
+ extcmd |= instr->ctx.cmd.opcode;
+ } else {
+ cmd |= NFC_SEND_CMD1 |
+ NFC_CMD(instr->ctx.cmd.opcode);
+ }
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ remaining = nand_subop_get_num_addr_cyc(subop, i);
+ start = nand_subop_get_addr_start_off(subop, i);
+ for (j = 0; j < 8 && j + start < remaining; j++) {
+ u32 addr = instr->ctx.addr.addrs[j + start];
+
+ addrs[j / 4] |= addr << (j % 4) * 8;
+ }
+
+ if (j)
+ cmd |= NFC_SEND_ADR | NFC_ADR_NUM(j);
+
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_DATA_OUT_INSTR:
+ start = nand_subop_get_data_start_off(subop, i);
+ remaining = nand_subop_get_data_len(subop, i);
+ cnt = min_t(u32, remaining, NFC_SRAM_SIZE);
+ cmd |= NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD;
+
+ if (instr->type == NAND_OP_DATA_OUT_INSTR) {
+ cmd |= NFC_ACCESS_DIR;
+ memcpy_toio(nfc->regs + NFC_RAM0_BASE,
+ instr->ctx.data.buf.out + start,
+ cnt);
+ } else {
+ inbuf = instr->ctx.data.buf.in + start;
+ }
+
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ cmd |= NFC_WAIT_FLAG;
+ break;
+ }
+ }
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
+
+ if (cmd & NFC_SEND_ADR) {
+ writel(addrs[0], nfc->regs + NFC_REG_ADDR_LOW);
+ writel(addrs[1], nfc->regs + NFC_REG_ADDR_HIGH);
+ }
+
+ if (cmd & NFC_SEND_CMD2)
+ writel(extcmd,
+ nfc->regs +
+ (cmd & NFC_ACCESS_DIR ?
+ NFC_REG_WCMD_SET : NFC_REG_RCMD_SET));
+
+ if (cmd & NFC_DATA_TRANS)
+ writel(cnt, nfc->regs + NFC_REG_CNT);
+
+ writel(cmd, nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG,
+ !(cmd & NFC_WAIT_FLAG) && cnt < 64,
+ 0);
+ if (ret)
+ return ret;
+
+ if (inbuf)
+ memcpy_fromio(inbuf, nfc->regs + NFC_RAM0_BASE, cnt);
+
+ return 0;
+}
+
+static int sunxi_nfc_soft_waitrdy(struct nand_chip *nand,
+ const struct nand_subop *subop)
+{
+ return nand_soft_waitrdy(nand,
+ subop->instrs[0].ctx.waitrdy.timeout_ms);
+}
+
+static const struct nand_op_parser sunxi_nfc_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 1024)),
+ NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(true, 1024),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+);
+
+static const struct nand_op_parser sunxi_nfc_norb_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 1024)),
+ NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(true, 1024),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true)),
+ NAND_OP_PARSER_PATTERN(sunxi_nfc_soft_waitrdy,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+);
+
+static int sunxi_nfc_exec_op(struct nand_chip *nand,
+ const struct nand_operation *op, bool check_only)
+{
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ const struct nand_op_parser *parser;
+
+ if (!check_only)
+ sunxi_nfc_select_chip(nand, op->cs);
+
+ if (sunxi_nand->sels[op->cs].rb >= 0)
+ parser = &sunxi_nfc_op_parser;
+ else
+ parser = &sunxi_nfc_norb_op_parser;
+
+ return nand_op_parser_exec_op(nand, parser, op, check_only);
+}
+
+static const struct nand_controller_ops sunxi_nand_controller_ops = {
+ .attach_chip = sunxi_nand_attach_chip,
+ .setup_interface = sunxi_nfc_setup_interface,
+ .exec_op = sunxi_nfc_exec_op,
+};
+
+static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
+{
+ struct sunxi_nand_chip *sunxi_nand;
+ struct nand_chip *chip;
+ int ret;
+
+ while (!list_empty(&nfc->chips)) {
+ sunxi_nand = list_first_entry(&nfc->chips,
+ struct sunxi_nand_chip,
+ node);
+ chip = &sunxi_nand->nand;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&sunxi_nand->node);
+ }
+}
+
+static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
+ struct device_node *np)
+{
+ struct sunxi_nand_chip *sunxi_nand;
+ struct mtd_info *mtd;
+ struct nand_chip *nand;
+ int nsels;
+ int ret;
+ int i;
+ u32 tmp;
+
+ if (!of_get_property(np, "reg", &nsels))
+ return -EINVAL;
+
+ nsels /= sizeof(u32);
+ if (!nsels) {
+ dev_err(dev, "invalid reg property size\n");
+ return -EINVAL;
+ }
+
+ sunxi_nand = devm_kzalloc(dev, struct_size(sunxi_nand, sels, nsels),
+ GFP_KERNEL);
+ if (!sunxi_nand)
+ return -ENOMEM;
+
+ sunxi_nand->nsels = nsels;
+
+ for (i = 0; i < nsels; i++) {
+ ret = of_property_read_u32_index(np, "reg", i, &tmp);
+ if (ret) {
+ dev_err(dev, "could not retrieve reg property: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (tmp > NFC_MAX_CS) {
+ dev_err(dev,
+ "invalid reg value: %u (max CS = 7)\n",
+ tmp);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
+ dev_err(dev, "CS %d already assigned\n", tmp);
+ return -EINVAL;
+ }
+
+ sunxi_nand->sels[i].cs = tmp;
+
+ if (!of_property_read_u32_index(np, "allwinner,rb", i, &tmp) &&
+ tmp < 2)
+ sunxi_nand->sels[i].rb = tmp;
+ else
+ sunxi_nand->sels[i].rb = -1;
+ }
+
+ nand = &sunxi_nand->nand;
+ /* Default tR value specified in the ONFI spec (chapter 4.15.1) */
+ nand->controller = &nfc->controller;
+ nand->controller->ops = &sunxi_nand_controller_ops;
+
+ /*
+ * Set the ECC mode to the default value in case nothing is specified
+ * in the DT.
+ */
+ nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ nand_set_flash_node(nand, np);
+
+ mtd = nand_to_mtd(nand);
+ mtd->dev.parent = dev;
+
+ ret = nand_scan(nand, nsels);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(dev, "failed to register mtd device: %d\n", ret);
+ nand_cleanup(nand);
+ return ret;
+ }
+
+ list_add_tail(&sunxi_nand->node, &nfc->chips);
+
+ return 0;
+}
+
+static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *nand_np;
+ int ret;
+
+ for_each_child_of_node(np, nand_np) {
+ ret = sunxi_nand_chip_init(dev, nfc, nand_np);
+ if (ret) {
+ of_node_put(nand_np);
+ sunxi_nand_chips_cleanup(nfc);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int sunxi_nfc_dma_init(struct sunxi_nfc *nfc, struct resource *r)
+{
+ int ret;
+
+ if (nfc->caps->has_mdma)
+ return 0;
+
+ nfc->dmac = dma_request_chan(nfc->dev, "rxtx");
+ if (IS_ERR(nfc->dmac)) {
+ ret = PTR_ERR(nfc->dmac);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ /* Ignore errors to fall back to PIO mode */
+ dev_warn(nfc->dev, "failed to request rxtx DMA channel: %d\n", ret);
+ nfc->dmac = NULL;
+ } else {
+ struct dma_slave_config dmac_cfg = { };
+
+ dmac_cfg.src_addr = r->start + nfc->caps->reg_io_data;
+ dmac_cfg.dst_addr = dmac_cfg.src_addr;
+ dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width;
+ dmac_cfg.src_maxburst = nfc->caps->dma_maxburst;
+ dmac_cfg.dst_maxburst = nfc->caps->dma_maxburst;
+ dmaengine_slave_config(nfc->dmac, &dmac_cfg);
+ }
+ return 0;
+}
+
+static int sunxi_nfc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *r;
+ struct sunxi_nfc *nfc;
+ int irq;
+ int ret;
+
+ nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->dev = dev;
+ nand_controller_init(&nfc->controller);
+ INIT_LIST_HEAD(&nfc->chips);
+
+ nfc->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
+ if (IS_ERR(nfc->regs))
+ return PTR_ERR(nfc->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ nfc->ahb_clk = devm_clk_get_enabled(dev, "ahb");
+ if (IS_ERR(nfc->ahb_clk)) {
+ dev_err(dev, "failed to retrieve ahb clk\n");
+ return PTR_ERR(nfc->ahb_clk);
+ }
+
+ nfc->mod_clk = devm_clk_get_enabled(dev, "mod");
+ if (IS_ERR(nfc->mod_clk)) {
+ dev_err(dev, "failed to retrieve mod clk\n");
+ return PTR_ERR(nfc->mod_clk);
+ }
+
+ nfc->reset = devm_reset_control_get_optional_exclusive(dev, "ahb");
+ if (IS_ERR(nfc->reset))
+ return PTR_ERR(nfc->reset);
+
+ ret = reset_control_deassert(nfc->reset);
+ if (ret) {
+ dev_err(dev, "reset err %d\n", ret);
+ return ret;
+ }
+
+ nfc->caps = of_device_get_match_data(&pdev->dev);
+ if (!nfc->caps) {
+ ret = -EINVAL;
+ goto out_ahb_reset_reassert;
+ }
+
+ ret = sunxi_nfc_rst(nfc);
+ if (ret)
+ goto out_ahb_reset_reassert;
+
+ writel(0, nfc->regs + NFC_REG_INT);
+ ret = devm_request_irq(dev, irq, sunxi_nfc_interrupt,
+ 0, "sunxi-nand", nfc);
+ if (ret)
+ goto out_ahb_reset_reassert;
+
+ ret = sunxi_nfc_dma_init(nfc, r);
+
+ if (ret)
+ goto out_ahb_reset_reassert;
+
+ platform_set_drvdata(pdev, nfc);
+
+ ret = sunxi_nand_chips_init(dev, nfc);
+ if (ret) {
+ dev_err(dev, "failed to init nand chips\n");
+ goto out_release_dmac;
+ }
+
+ return 0;
+
+out_release_dmac:
+ if (nfc->dmac)
+ dma_release_channel(nfc->dmac);
+out_ahb_reset_reassert:
+ reset_control_assert(nfc->reset);
+
+ return ret;
+}
+
+static void sunxi_nfc_remove(struct platform_device *pdev)
+{
+ struct sunxi_nfc *nfc = platform_get_drvdata(pdev);
+
+ sunxi_nand_chips_cleanup(nfc);
+
+ reset_control_assert(nfc->reset);
+
+ if (nfc->dmac)
+ dma_release_channel(nfc->dmac);
+}
+
+static const struct sunxi_nfc_caps sunxi_nfc_a10_caps = {
+ .reg_io_data = NFC_REG_A10_IO_DATA,
+ .dma_maxburst = 4,
+};
+
+static const struct sunxi_nfc_caps sunxi_nfc_a23_caps = {
+ .has_mdma = true,
+ .reg_io_data = NFC_REG_A23_IO_DATA,
+ .dma_maxburst = 8,
+};
+
+static const struct of_device_id sunxi_nfc_ids[] = {
+ {
+ .compatible = "allwinner,sun4i-a10-nand",
+ .data = &sunxi_nfc_a10_caps,
+ },
+ {
+ .compatible = "allwinner,sun8i-a23-nand-controller",
+ .data = &sunxi_nfc_a23_caps,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sunxi_nfc_ids);
+
+static struct platform_driver sunxi_nfc_driver = {
+ .driver = {
+ .name = "sunxi_nand",
+ .of_match_table = sunxi_nfc_ids,
+ },
+ .probe = sunxi_nfc_probe,
+ .remove_new = sunxi_nfc_remove,
+};
+module_platform_driver(sunxi_nfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Boris BREZILLON");
+MODULE_DESCRIPTION("Allwinner NAND Flash Controller driver");
+MODULE_ALIAS("platform:sunxi_nand");
diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
new file mode 100644
index 0000000000..a553e3ac8f
--- /dev/null
+++ b/drivers/mtd/nand/raw/tegra_nand.c
@@ -0,0 +1,1290 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Stefan Agner <stefan@agner.ch>
+ * Copyright (C) 2014-2015 Lucas Stach <dev@lynxeye.de>
+ * Copyright (C) 2012 Avionic Design GmbH
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <soc/tegra/common.h>
+
+#define COMMAND 0x00
+#define COMMAND_GO BIT(31)
+#define COMMAND_CLE BIT(30)
+#define COMMAND_ALE BIT(29)
+#define COMMAND_PIO BIT(28)
+#define COMMAND_TX BIT(27)
+#define COMMAND_RX BIT(26)
+#define COMMAND_SEC_CMD BIT(25)
+#define COMMAND_AFT_DAT BIT(24)
+#define COMMAND_TRANS_SIZE(size) ((((size) - 1) & 0xf) << 20)
+#define COMMAND_A_VALID BIT(19)
+#define COMMAND_B_VALID BIT(18)
+#define COMMAND_RD_STATUS_CHK BIT(17)
+#define COMMAND_RBSY_CHK BIT(16)
+#define COMMAND_CE(x) BIT(8 + ((x) & 0x7))
+#define COMMAND_CLE_SIZE(size) ((((size) - 1) & 0x3) << 4)
+#define COMMAND_ALE_SIZE(size) ((((size) - 1) & 0xf) << 0)
+
+#define STATUS 0x04
+
+#define ISR 0x08
+#define ISR_CORRFAIL_ERR BIT(24)
+#define ISR_UND BIT(7)
+#define ISR_OVR BIT(6)
+#define ISR_CMD_DONE BIT(5)
+#define ISR_ECC_ERR BIT(4)
+
+#define IER 0x0c
+#define IER_ERR_TRIG_VAL(x) (((x) & 0xf) << 16)
+#define IER_UND BIT(7)
+#define IER_OVR BIT(6)
+#define IER_CMD_DONE BIT(5)
+#define IER_ECC_ERR BIT(4)
+#define IER_GIE BIT(0)
+
+#define CONFIG 0x10
+#define CONFIG_HW_ECC BIT(31)
+#define CONFIG_ECC_SEL BIT(30)
+#define CONFIG_ERR_COR BIT(29)
+#define CONFIG_PIPE_EN BIT(28)
+#define CONFIG_TVAL_4 (0 << 24)
+#define CONFIG_TVAL_6 (1 << 24)
+#define CONFIG_TVAL_8 (2 << 24)
+#define CONFIG_SKIP_SPARE BIT(23)
+#define CONFIG_BUS_WIDTH_16 BIT(21)
+#define CONFIG_COM_BSY BIT(20)
+#define CONFIG_PS_256 (0 << 16)
+#define CONFIG_PS_512 (1 << 16)
+#define CONFIG_PS_1024 (2 << 16)
+#define CONFIG_PS_2048 (3 << 16)
+#define CONFIG_PS_4096 (4 << 16)
+#define CONFIG_SKIP_SPARE_SIZE_4 (0 << 14)
+#define CONFIG_SKIP_SPARE_SIZE_8 (1 << 14)
+#define CONFIG_SKIP_SPARE_SIZE_12 (2 << 14)
+#define CONFIG_SKIP_SPARE_SIZE_16 (3 << 14)
+#define CONFIG_TAG_BYTE_SIZE(x) ((x) & 0xff)
+
+#define TIMING_1 0x14
+#define TIMING_TRP_RESP(x) (((x) & 0xf) << 28)
+#define TIMING_TWB(x) (((x) & 0xf) << 24)
+#define TIMING_TCR_TAR_TRR(x) (((x) & 0xf) << 20)
+#define TIMING_TWHR(x) (((x) & 0xf) << 16)
+#define TIMING_TCS(x) (((x) & 0x3) << 14)
+#define TIMING_TWH(x) (((x) & 0x3) << 12)
+#define TIMING_TWP(x) (((x) & 0xf) << 8)
+#define TIMING_TRH(x) (((x) & 0x3) << 4)
+#define TIMING_TRP(x) (((x) & 0xf) << 0)
+
+#define RESP 0x18
+
+#define TIMING_2 0x1c
+#define TIMING_TADL(x) ((x) & 0xf)
+
+#define CMD_REG1 0x20
+#define CMD_REG2 0x24
+#define ADDR_REG1 0x28
+#define ADDR_REG2 0x2c
+
+#define DMA_MST_CTRL 0x30
+#define DMA_MST_CTRL_GO BIT(31)
+#define DMA_MST_CTRL_IN (0 << 30)
+#define DMA_MST_CTRL_OUT BIT(30)
+#define DMA_MST_CTRL_PERF_EN BIT(29)
+#define DMA_MST_CTRL_IE_DONE BIT(28)
+#define DMA_MST_CTRL_REUSE BIT(27)
+#define DMA_MST_CTRL_BURST_1 (2 << 24)
+#define DMA_MST_CTRL_BURST_4 (3 << 24)
+#define DMA_MST_CTRL_BURST_8 (4 << 24)
+#define DMA_MST_CTRL_BURST_16 (5 << 24)
+#define DMA_MST_CTRL_IS_DONE BIT(20)
+#define DMA_MST_CTRL_EN_A BIT(2)
+#define DMA_MST_CTRL_EN_B BIT(1)
+
+#define DMA_CFG_A 0x34
+#define DMA_CFG_B 0x38
+
+#define FIFO_CTRL 0x3c
+#define FIFO_CTRL_CLR_ALL BIT(3)
+
+#define DATA_PTR 0x40
+#define TAG_PTR 0x44
+#define ECC_PTR 0x48
+
+#define DEC_STATUS 0x4c
+#define DEC_STATUS_A_ECC_FAIL BIT(1)
+#define DEC_STATUS_ERR_COUNT_MASK 0x00ff0000
+#define DEC_STATUS_ERR_COUNT_SHIFT 16
+
+#define HWSTATUS_CMD 0x50
+#define HWSTATUS_MASK 0x54
+#define HWSTATUS_RDSTATUS_MASK(x) (((x) & 0xff) << 24)
+#define HWSTATUS_RDSTATUS_VALUE(x) (((x) & 0xff) << 16)
+#define HWSTATUS_RBSY_MASK(x) (((x) & 0xff) << 8)
+#define HWSTATUS_RBSY_VALUE(x) (((x) & 0xff) << 0)
+
+#define BCH_CONFIG 0xcc
+#define BCH_ENABLE BIT(0)
+#define BCH_TVAL_4 (0 << 4)
+#define BCH_TVAL_8 (1 << 4)
+#define BCH_TVAL_14 (2 << 4)
+#define BCH_TVAL_16 (3 << 4)
+
+#define DEC_STAT_RESULT 0xd0
+#define DEC_STAT_BUF 0xd4
+#define DEC_STAT_BUF_FAIL_SEC_FLAG_MASK 0xff000000
+#define DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT 24
+#define DEC_STAT_BUF_CORR_SEC_FLAG_MASK 0x00ff0000
+#define DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT 16
+#define DEC_STAT_BUF_MAX_CORR_CNT_MASK 0x00001f00
+#define DEC_STAT_BUF_MAX_CORR_CNT_SHIFT 8
+
+#define OFFSET(val, off) ((val) < (off) ? 0 : (val) - (off))
+
+#define SKIP_SPARE_BYTES 4
+#define BITS_PER_STEP_RS 18
+#define BITS_PER_STEP_BCH 13
+
+#define INT_MASK (IER_UND | IER_OVR | IER_CMD_DONE | IER_GIE)
+#define HWSTATUS_CMD_DEFAULT NAND_STATUS_READY
+#define HWSTATUS_MASK_DEFAULT (HWSTATUS_RDSTATUS_MASK(1) | \
+ HWSTATUS_RDSTATUS_VALUE(0) | \
+ HWSTATUS_RBSY_MASK(NAND_STATUS_READY) | \
+ HWSTATUS_RBSY_VALUE(NAND_STATUS_READY))
+
+struct tegra_nand_controller {
+ struct nand_controller controller;
+ struct device *dev;
+ void __iomem *regs;
+ int irq;
+ struct clk *clk;
+ struct completion command_complete;
+ struct completion dma_complete;
+ bool last_read_error;
+ int cur_cs;
+ struct nand_chip *chip;
+};
+
+struct tegra_nand_chip {
+ struct nand_chip chip;
+ struct gpio_desc *wp_gpio;
+ struct mtd_oob_region ecc;
+ u32 config;
+ u32 config_ecc;
+ u32 bch_config;
+ int cs[1];
+};
+
+static inline struct tegra_nand_controller *
+ to_tegra_ctrl(struct nand_controller *hw_ctrl)
+{
+ return container_of(hw_ctrl, struct tegra_nand_controller, controller);
+}
+
+static inline struct tegra_nand_chip *to_tegra_chip(struct nand_chip *chip)
+{
+ return container_of(chip, struct tegra_nand_chip, chip);
+}
+
+static int tegra_nand_ooblayout_rs_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int bytes_per_step = DIV_ROUND_UP(BITS_PER_STEP_RS * chip->ecc.strength,
+ BITS_PER_BYTE);
+
+ if (section > 0)
+ return -ERANGE;
+
+ oobregion->offset = SKIP_SPARE_BYTES;
+ oobregion->length = round_up(bytes_per_step * chip->ecc.steps, 4);
+
+ return 0;
+}
+
+static int tegra_nand_ooblayout_no_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ return -ERANGE;
+}
+
+static const struct mtd_ooblayout_ops tegra_nand_oob_rs_ops = {
+ .ecc = tegra_nand_ooblayout_rs_ecc,
+ .free = tegra_nand_ooblayout_no_free,
+};
+
+static int tegra_nand_ooblayout_bch_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int bytes_per_step = DIV_ROUND_UP(BITS_PER_STEP_BCH * chip->ecc.strength,
+ BITS_PER_BYTE);
+
+ if (section > 0)
+ return -ERANGE;
+
+ oobregion->offset = SKIP_SPARE_BYTES;
+ oobregion->length = round_up(bytes_per_step * chip->ecc.steps, 4);
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops tegra_nand_oob_bch_ops = {
+ .ecc = tegra_nand_ooblayout_bch_ecc,
+ .free = tegra_nand_ooblayout_no_free,
+};
+
+static irqreturn_t tegra_nand_irq(int irq, void *data)
+{
+ struct tegra_nand_controller *ctrl = data;
+ u32 isr, dma;
+
+ isr = readl_relaxed(ctrl->regs + ISR);
+ dma = readl_relaxed(ctrl->regs + DMA_MST_CTRL);
+ dev_dbg(ctrl->dev, "isr %08x\n", isr);
+
+ if (!isr && !(dma & DMA_MST_CTRL_IS_DONE))
+ return IRQ_NONE;
+
+ /*
+ * The bit name is somewhat missleading: This is also set when
+ * HW ECC was successful. The data sheet states:
+ * Correctable OR Un-correctable errors occurred in the DMA transfer...
+ */
+ if (isr & ISR_CORRFAIL_ERR)
+ ctrl->last_read_error = true;
+
+ if (isr & ISR_CMD_DONE)
+ complete(&ctrl->command_complete);
+
+ if (isr & ISR_UND)
+ dev_err(ctrl->dev, "FIFO underrun\n");
+
+ if (isr & ISR_OVR)
+ dev_err(ctrl->dev, "FIFO overrun\n");
+
+ /* handle DMA interrupts */
+ if (dma & DMA_MST_CTRL_IS_DONE) {
+ writel_relaxed(dma, ctrl->regs + DMA_MST_CTRL);
+ complete(&ctrl->dma_complete);
+ }
+
+ /* clear interrupts */
+ writel_relaxed(isr, ctrl->regs + ISR);
+
+ return IRQ_HANDLED;
+}
+
+static const char * const tegra_nand_reg_names[] = {
+ "COMMAND",
+ "STATUS",
+ "ISR",
+ "IER",
+ "CONFIG",
+ "TIMING",
+ NULL,
+ "TIMING2",
+ "CMD_REG1",
+ "CMD_REG2",
+ "ADDR_REG1",
+ "ADDR_REG2",
+ "DMA_MST_CTRL",
+ "DMA_CFG_A",
+ "DMA_CFG_B",
+ "FIFO_CTRL",
+};
+
+static void tegra_nand_dump_reg(struct tegra_nand_controller *ctrl)
+{
+ u32 reg;
+ int i;
+
+ dev_err(ctrl->dev, "Tegra NAND controller register dump\n");
+ for (i = 0; i < ARRAY_SIZE(tegra_nand_reg_names); i++) {
+ const char *reg_name = tegra_nand_reg_names[i];
+
+ if (!reg_name)
+ continue;
+
+ reg = readl_relaxed(ctrl->regs + (i * 4));
+ dev_err(ctrl->dev, "%s: 0x%08x\n", reg_name, reg);
+ }
+}
+
+static void tegra_nand_controller_abort(struct tegra_nand_controller *ctrl)
+{
+ u32 isr, dma;
+
+ disable_irq(ctrl->irq);
+
+ /* Abort current command/DMA operation */
+ writel_relaxed(0, ctrl->regs + DMA_MST_CTRL);
+ writel_relaxed(0, ctrl->regs + COMMAND);
+
+ /* clear interrupts */
+ isr = readl_relaxed(ctrl->regs + ISR);
+ writel_relaxed(isr, ctrl->regs + ISR);
+ dma = readl_relaxed(ctrl->regs + DMA_MST_CTRL);
+ writel_relaxed(dma, ctrl->regs + DMA_MST_CTRL);
+
+ reinit_completion(&ctrl->command_complete);
+ reinit_completion(&ctrl->dma_complete);
+
+ enable_irq(ctrl->irq);
+}
+
+static int tegra_nand_cmd(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ const struct nand_op_instr *instr_data_in = NULL;
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+ unsigned int op_id, size = 0, offset = 0;
+ bool first_cmd = true;
+ u32 reg, cmd = 0;
+ int ret;
+
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ unsigned int naddrs, i;
+ const u8 *addrs;
+ u32 addr1 = 0, addr2 = 0;
+
+ instr = &subop->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (first_cmd) {
+ cmd |= COMMAND_CLE;
+ writel_relaxed(instr->ctx.cmd.opcode,
+ ctrl->regs + CMD_REG1);
+ } else {
+ cmd |= COMMAND_SEC_CMD;
+ writel_relaxed(instr->ctx.cmd.opcode,
+ ctrl->regs + CMD_REG2);
+ }
+ first_cmd = false;
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+
+ cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(naddrs);
+ for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
+ addr1 |= *addrs++ << (BITS_PER_BYTE * i);
+ naddrs -= i;
+ for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
+ addr2 |= *addrs++ << (BITS_PER_BYTE * i);
+
+ writel_relaxed(addr1, ctrl->regs + ADDR_REG1);
+ writel_relaxed(addr2, ctrl->regs + ADDR_REG2);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ size = nand_subop_get_data_len(subop, op_id);
+ offset = nand_subop_get_data_start_off(subop, op_id);
+
+ cmd |= COMMAND_TRANS_SIZE(size) | COMMAND_PIO |
+ COMMAND_RX | COMMAND_A_VALID;
+
+ instr_data_in = instr;
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ size = nand_subop_get_data_len(subop, op_id);
+ offset = nand_subop_get_data_start_off(subop, op_id);
+
+ cmd |= COMMAND_TRANS_SIZE(size) | COMMAND_PIO |
+ COMMAND_TX | COMMAND_A_VALID;
+ memcpy(&reg, instr->ctx.data.buf.out + offset, size);
+
+ writel_relaxed(reg, ctrl->regs + RESP);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ cmd |= COMMAND_RBSY_CHK;
+ break;
+ }
+ }
+
+ cmd |= COMMAND_GO | COMMAND_CE(ctrl->cur_cs);
+ writel_relaxed(cmd, ctrl->regs + COMMAND);
+ ret = wait_for_completion_timeout(&ctrl->command_complete,
+ msecs_to_jiffies(500));
+ if (!ret) {
+ dev_err(ctrl->dev, "COMMAND timeout\n");
+ tegra_nand_dump_reg(ctrl);
+ tegra_nand_controller_abort(ctrl);
+ return -ETIMEDOUT;
+ }
+
+ if (instr_data_in) {
+ reg = readl_relaxed(ctrl->regs + RESP);
+ memcpy(instr_data_in->ctx.data.buf.in + offset, &reg, size);
+ }
+
+ return 0;
+}
+
+static const struct nand_op_parser tegra_nand_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 4)),
+ NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 4)),
+ );
+
+static void tegra_nand_select_target(struct nand_chip *chip,
+ unsigned int die_nr)
+{
+ struct tegra_nand_chip *nand = to_tegra_chip(chip);
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+
+ ctrl->cur_cs = nand->cs[die_nr];
+}
+
+static int tegra_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ if (!check_only)
+ tegra_nand_select_target(chip, op->cs);
+
+ return nand_op_parser_exec_op(chip, &tegra_nand_op_parser, op,
+ check_only);
+}
+
+static void tegra_nand_hw_ecc(struct tegra_nand_controller *ctrl,
+ struct nand_chip *chip, bool enable)
+{
+ struct tegra_nand_chip *nand = to_tegra_chip(chip);
+
+ if (chip->ecc.algo == NAND_ECC_ALGO_BCH && enable)
+ writel_relaxed(nand->bch_config, ctrl->regs + BCH_CONFIG);
+ else
+ writel_relaxed(0, ctrl->regs + BCH_CONFIG);
+
+ if (enable)
+ writel_relaxed(nand->config_ecc, ctrl->regs + CONFIG);
+ else
+ writel_relaxed(nand->config, ctrl->regs + CONFIG);
+}
+
+static int tegra_nand_page_xfer(struct mtd_info *mtd, struct nand_chip *chip,
+ void *buf, void *oob_buf, int oob_len, int page,
+ bool read)
+{
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+ enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ dma_addr_t dma_addr = 0, dma_addr_oob = 0;
+ u32 addr1, cmd, dma_ctrl;
+ int ret;
+
+ tegra_nand_select_target(chip, chip->cur_cs);
+
+ if (read) {
+ writel_relaxed(NAND_CMD_READ0, ctrl->regs + CMD_REG1);
+ writel_relaxed(NAND_CMD_READSTART, ctrl->regs + CMD_REG2);
+ } else {
+ writel_relaxed(NAND_CMD_SEQIN, ctrl->regs + CMD_REG1);
+ writel_relaxed(NAND_CMD_PAGEPROG, ctrl->regs + CMD_REG2);
+ }
+ cmd = COMMAND_CLE | COMMAND_SEC_CMD;
+
+ /* Lower 16-bits are column, by default 0 */
+ addr1 = page << 16;
+
+ if (!buf)
+ addr1 |= mtd->writesize;
+ writel_relaxed(addr1, ctrl->regs + ADDR_REG1);
+
+ if (chip->options & NAND_ROW_ADDR_3) {
+ writel_relaxed(page >> 16, ctrl->regs + ADDR_REG2);
+ cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(5);
+ } else {
+ cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(4);
+ }
+
+ if (buf) {
+ dma_addr = dma_map_single(ctrl->dev, buf, mtd->writesize, dir);
+ ret = dma_mapping_error(ctrl->dev, dma_addr);
+ if (ret) {
+ dev_err(ctrl->dev, "dma mapping error\n");
+ return -EINVAL;
+ }
+
+ writel_relaxed(mtd->writesize - 1, ctrl->regs + DMA_CFG_A);
+ writel_relaxed(dma_addr, ctrl->regs + DATA_PTR);
+ }
+
+ if (oob_buf) {
+ dma_addr_oob = dma_map_single(ctrl->dev, oob_buf, mtd->oobsize,
+ dir);
+ ret = dma_mapping_error(ctrl->dev, dma_addr_oob);
+ if (ret) {
+ dev_err(ctrl->dev, "dma mapping error\n");
+ ret = -EINVAL;
+ goto err_unmap_dma_page;
+ }
+
+ writel_relaxed(oob_len - 1, ctrl->regs + DMA_CFG_B);
+ writel_relaxed(dma_addr_oob, ctrl->regs + TAG_PTR);
+ }
+
+ dma_ctrl = DMA_MST_CTRL_GO | DMA_MST_CTRL_PERF_EN |
+ DMA_MST_CTRL_IE_DONE | DMA_MST_CTRL_IS_DONE |
+ DMA_MST_CTRL_BURST_16;
+
+ if (buf)
+ dma_ctrl |= DMA_MST_CTRL_EN_A;
+ if (oob_buf)
+ dma_ctrl |= DMA_MST_CTRL_EN_B;
+
+ if (read)
+ dma_ctrl |= DMA_MST_CTRL_IN | DMA_MST_CTRL_REUSE;
+ else
+ dma_ctrl |= DMA_MST_CTRL_OUT;
+
+ writel_relaxed(dma_ctrl, ctrl->regs + DMA_MST_CTRL);
+
+ cmd |= COMMAND_GO | COMMAND_RBSY_CHK | COMMAND_TRANS_SIZE(9) |
+ COMMAND_CE(ctrl->cur_cs);
+
+ if (buf)
+ cmd |= COMMAND_A_VALID;
+ if (oob_buf)
+ cmd |= COMMAND_B_VALID;
+
+ if (read)
+ cmd |= COMMAND_RX;
+ else
+ cmd |= COMMAND_TX | COMMAND_AFT_DAT;
+
+ writel_relaxed(cmd, ctrl->regs + COMMAND);
+
+ ret = wait_for_completion_timeout(&ctrl->command_complete,
+ msecs_to_jiffies(500));
+ if (!ret) {
+ dev_err(ctrl->dev, "COMMAND timeout\n");
+ tegra_nand_dump_reg(ctrl);
+ tegra_nand_controller_abort(ctrl);
+ ret = -ETIMEDOUT;
+ goto err_unmap_dma;
+ }
+
+ ret = wait_for_completion_timeout(&ctrl->dma_complete,
+ msecs_to_jiffies(500));
+ if (!ret) {
+ dev_err(ctrl->dev, "DMA timeout\n");
+ tegra_nand_dump_reg(ctrl);
+ tegra_nand_controller_abort(ctrl);
+ ret = -ETIMEDOUT;
+ goto err_unmap_dma;
+ }
+ ret = 0;
+
+err_unmap_dma:
+ if (oob_buf)
+ dma_unmap_single(ctrl->dev, dma_addr_oob, mtd->oobsize, dir);
+err_unmap_dma_page:
+ if (buf)
+ dma_unmap_single(ctrl->dev, dma_addr, mtd->writesize, dir);
+
+ return ret;
+}
+
+static int tegra_nand_read_page_raw(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ void *oob_buf = oob_required ? chip->oob_poi : NULL;
+
+ return tegra_nand_page_xfer(mtd, chip, buf, oob_buf,
+ mtd->oobsize, page, true);
+}
+
+static int tegra_nand_write_page_raw(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ void *oob_buf = oob_required ? chip->oob_poi : NULL;
+
+ return tegra_nand_page_xfer(mtd, chip, (void *)buf, oob_buf,
+ mtd->oobsize, page, false);
+}
+
+static int tegra_nand_read_oob(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi,
+ mtd->oobsize, page, true);
+}
+
+static int tegra_nand_write_oob(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi,
+ mtd->oobsize, page, false);
+}
+
+static int tegra_nand_read_page_hwecc(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+ struct tegra_nand_chip *nand = to_tegra_chip(chip);
+ void *oob_buf = oob_required ? chip->oob_poi : NULL;
+ u32 dec_stat, max_corr_cnt;
+ unsigned long fail_sec_flag;
+ int ret;
+
+ tegra_nand_hw_ecc(ctrl, chip, true);
+ ret = tegra_nand_page_xfer(mtd, chip, buf, oob_buf, 0, page, true);
+ tegra_nand_hw_ecc(ctrl, chip, false);
+ if (ret)
+ return ret;
+
+ /* No correctable or un-correctable errors, page must have 0 bitflips */
+ if (!ctrl->last_read_error)
+ return 0;
+
+ /*
+ * Correctable or un-correctable errors occurred. Use DEC_STAT_BUF
+ * which contains information for all ECC selections.
+ *
+ * Note that since we do not use Command Queues DEC_RESULT does not
+ * state the number of pages we can read from the DEC_STAT_BUF. But
+ * since CORRFAIL_ERR did occur during page read we do have a valid
+ * result in DEC_STAT_BUF.
+ */
+ ctrl->last_read_error = false;
+ dec_stat = readl_relaxed(ctrl->regs + DEC_STAT_BUF);
+
+ fail_sec_flag = (dec_stat & DEC_STAT_BUF_FAIL_SEC_FLAG_MASK) >>
+ DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT;
+
+ max_corr_cnt = (dec_stat & DEC_STAT_BUF_MAX_CORR_CNT_MASK) >>
+ DEC_STAT_BUF_MAX_CORR_CNT_SHIFT;
+
+ if (fail_sec_flag) {
+ int bit, max_bitflips = 0;
+
+ /*
+ * Since we do not support subpage writes, a complete page
+ * is either written or not. We can take a shortcut here by
+ * checking wheather any of the sector has been successful
+ * read. If at least one sectors has been read successfully,
+ * the page must have been a written previously. It cannot
+ * be an erased page.
+ *
+ * E.g. controller might return fail_sec_flag with 0x4, which
+ * would mean only the third sector failed to correct. The
+ * page must have been written and the third sector is really
+ * not correctable anymore.
+ */
+ if (fail_sec_flag ^ GENMASK(chip->ecc.steps - 1, 0)) {
+ mtd->ecc_stats.failed += hweight8(fail_sec_flag);
+ return max_corr_cnt;
+ }
+
+ /*
+ * All sectors failed to correct, but the ECC isn't smart
+ * enough to figure out if a page is really just erased.
+ * Read OOB data and check whether data/OOB is completely
+ * erased or if error correction just failed for all sub-
+ * pages.
+ */
+ ret = tegra_nand_read_oob(chip, page);
+ if (ret < 0)
+ return ret;
+
+ for_each_set_bit(bit, &fail_sec_flag, chip->ecc.steps) {
+ u8 *data = buf + (chip->ecc.size * bit);
+ u8 *oob = chip->oob_poi + nand->ecc.offset +
+ (chip->ecc.bytes * bit);
+
+ ret = nand_check_erased_ecc_chunk(data, chip->ecc.size,
+ oob, chip->ecc.bytes,
+ NULL, 0,
+ chip->ecc.strength);
+ if (ret < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += ret;
+ max_bitflips = max(ret, max_bitflips);
+ }
+ }
+
+ return max_t(unsigned int, max_corr_cnt, max_bitflips);
+ } else {
+ int corr_sec_flag;
+
+ corr_sec_flag = (dec_stat & DEC_STAT_BUF_CORR_SEC_FLAG_MASK) >>
+ DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT;
+
+ /*
+ * The value returned in the register is the maximum of
+ * bitflips encountered in any of the ECC regions. As there is
+ * no way to get the number of bitflips in a specific regions
+ * we are not able to deliver correct stats but instead
+ * overestimate the number of corrected bitflips by assuming
+ * that all regions where errors have been corrected
+ * encountered the maximum number of bitflips.
+ */
+ mtd->ecc_stats.corrected += max_corr_cnt * hweight8(corr_sec_flag);
+
+ return max_corr_cnt;
+ }
+}
+
+static int tegra_nand_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+ void *oob_buf = oob_required ? chip->oob_poi : NULL;
+ int ret;
+
+ tegra_nand_hw_ecc(ctrl, chip, true);
+ ret = tegra_nand_page_xfer(mtd, chip, (void *)buf, oob_buf,
+ 0, page, false);
+ tegra_nand_hw_ecc(ctrl, chip, false);
+
+ return ret;
+}
+
+static void tegra_nand_setup_timing(struct tegra_nand_controller *ctrl,
+ const struct nand_sdr_timings *timings)
+{
+ /*
+ * The period (and all other timings in this function) is in ps,
+ * so need to take care here to avoid integer overflows.
+ */
+ unsigned int rate = clk_get_rate(ctrl->clk) / 1000000;
+ unsigned int period = DIV_ROUND_UP(1000000, rate);
+ u32 val, reg = 0;
+
+ val = DIV_ROUND_UP(max3(timings->tAR_min, timings->tRR_min,
+ timings->tRC_min), period);
+ reg |= TIMING_TCR_TAR_TRR(OFFSET(val, 3));
+
+ val = DIV_ROUND_UP(max(max(timings->tCS_min, timings->tCH_min),
+ max(timings->tALS_min, timings->tALH_min)),
+ period);
+ reg |= TIMING_TCS(OFFSET(val, 2));
+
+ val = DIV_ROUND_UP(max(timings->tRP_min, timings->tREA_max) + 6000,
+ period);
+ reg |= TIMING_TRP(OFFSET(val, 1)) | TIMING_TRP_RESP(OFFSET(val, 1));
+
+ reg |= TIMING_TWB(OFFSET(DIV_ROUND_UP(timings->tWB_max, period), 1));
+ reg |= TIMING_TWHR(OFFSET(DIV_ROUND_UP(timings->tWHR_min, period), 1));
+ reg |= TIMING_TWH(OFFSET(DIV_ROUND_UP(timings->tWH_min, period), 1));
+ reg |= TIMING_TWP(OFFSET(DIV_ROUND_UP(timings->tWP_min, period), 1));
+ reg |= TIMING_TRH(OFFSET(DIV_ROUND_UP(timings->tREH_min, period), 1));
+
+ writel_relaxed(reg, ctrl->regs + TIMING_1);
+
+ val = DIV_ROUND_UP(timings->tADL_min, period);
+ reg = TIMING_TADL(OFFSET(val, 3));
+
+ writel_relaxed(reg, ctrl->regs + TIMING_2);
+}
+
+static int tegra_nand_setup_interface(struct nand_chip *chip, int csline,
+ const struct nand_interface_config *conf)
+{
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+ const struct nand_sdr_timings *timings;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return PTR_ERR(timings);
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ tegra_nand_setup_timing(ctrl, timings);
+
+ return 0;
+}
+
+static const int rs_strength_bootable[] = { 4 };
+static const int rs_strength[] = { 4, 6, 8 };
+static const int bch_strength_bootable[] = { 8, 16 };
+static const int bch_strength[] = { 4, 8, 14, 16 };
+
+static int tegra_nand_get_strength(struct nand_chip *chip, const int *strength,
+ int strength_len, int bits_per_step,
+ int oobsize)
+{
+ struct nand_device *base = mtd_to_nanddev(nand_to_mtd(chip));
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(base);
+ bool maximize = base->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH;
+ int i;
+
+ /*
+ * Loop through available strengths. Backwards in case we try to
+ * maximize the BCH strength.
+ */
+ for (i = 0; i < strength_len; i++) {
+ int strength_sel, bytes_per_step, bytes_per_page;
+
+ if (maximize) {
+ strength_sel = strength[strength_len - i - 1];
+ } else {
+ strength_sel = strength[i];
+
+ if (strength_sel < requirements->strength)
+ continue;
+ }
+
+ bytes_per_step = DIV_ROUND_UP(bits_per_step * strength_sel,
+ BITS_PER_BYTE);
+ bytes_per_page = round_up(bytes_per_step * chip->ecc.steps, 4);
+
+ /* Check whether strength fits OOB */
+ if (bytes_per_page < (oobsize - SKIP_SPARE_BYTES))
+ return strength_sel;
+ }
+
+ return -EINVAL;
+}
+
+static int tegra_nand_select_strength(struct nand_chip *chip, int oobsize)
+{
+ const int *strength;
+ int strength_len, bits_per_step;
+
+ switch (chip->ecc.algo) {
+ case NAND_ECC_ALGO_RS:
+ bits_per_step = BITS_PER_STEP_RS;
+ if (chip->options & NAND_IS_BOOT_MEDIUM) {
+ strength = rs_strength_bootable;
+ strength_len = ARRAY_SIZE(rs_strength_bootable);
+ } else {
+ strength = rs_strength;
+ strength_len = ARRAY_SIZE(rs_strength);
+ }
+ break;
+ case NAND_ECC_ALGO_BCH:
+ bits_per_step = BITS_PER_STEP_BCH;
+ if (chip->options & NAND_IS_BOOT_MEDIUM) {
+ strength = bch_strength_bootable;
+ strength_len = ARRAY_SIZE(bch_strength_bootable);
+ } else {
+ strength = bch_strength;
+ strength_len = ARRAY_SIZE(bch_strength);
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return tegra_nand_get_strength(chip, strength, strength_len,
+ bits_per_step, oobsize);
+}
+
+static int tegra_nand_attach_chip(struct nand_chip *chip)
+{
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&chip->base);
+ struct tegra_nand_chip *nand = to_tegra_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int bits_per_step;
+ int ret;
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ chip->ecc.size = 512;
+ chip->ecc.steps = mtd->writesize / chip->ecc.size;
+ if (requirements->step_size != 512) {
+ dev_err(ctrl->dev, "Unsupported step size %d\n",
+ requirements->step_size);
+ return -EINVAL;
+ }
+
+ chip->ecc.read_page = tegra_nand_read_page_hwecc;
+ chip->ecc.write_page = tegra_nand_write_page_hwecc;
+ chip->ecc.read_page_raw = tegra_nand_read_page_raw;
+ chip->ecc.write_page_raw = tegra_nand_write_page_raw;
+ chip->ecc.read_oob = tegra_nand_read_oob;
+ chip->ecc.write_oob = tegra_nand_write_oob;
+
+ if (chip->options & NAND_BUSWIDTH_16)
+ nand->config |= CONFIG_BUS_WIDTH_16;
+
+ if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) {
+ if (mtd->writesize < 2048)
+ chip->ecc.algo = NAND_ECC_ALGO_RS;
+ else
+ chip->ecc.algo = NAND_ECC_ALGO_BCH;
+ }
+
+ if (chip->ecc.algo == NAND_ECC_ALGO_BCH && mtd->writesize < 2048) {
+ dev_err(ctrl->dev, "BCH supports 2K or 4K page size only\n");
+ return -EINVAL;
+ }
+
+ if (!chip->ecc.strength) {
+ ret = tegra_nand_select_strength(chip, mtd->oobsize);
+ if (ret < 0) {
+ dev_err(ctrl->dev,
+ "No valid strength found, minimum %d\n",
+ requirements->strength);
+ return ret;
+ }
+
+ chip->ecc.strength = ret;
+ }
+
+ nand->config_ecc = CONFIG_PIPE_EN | CONFIG_SKIP_SPARE |
+ CONFIG_SKIP_SPARE_SIZE_4;
+
+ switch (chip->ecc.algo) {
+ case NAND_ECC_ALGO_RS:
+ bits_per_step = BITS_PER_STEP_RS * chip->ecc.strength;
+ mtd_set_ooblayout(mtd, &tegra_nand_oob_rs_ops);
+ nand->config_ecc |= CONFIG_HW_ECC | CONFIG_ECC_SEL |
+ CONFIG_ERR_COR;
+ switch (chip->ecc.strength) {
+ case 4:
+ nand->config_ecc |= CONFIG_TVAL_4;
+ break;
+ case 6:
+ nand->config_ecc |= CONFIG_TVAL_6;
+ break;
+ case 8:
+ nand->config_ecc |= CONFIG_TVAL_8;
+ break;
+ default:
+ dev_err(ctrl->dev, "ECC strength %d not supported\n",
+ chip->ecc.strength);
+ return -EINVAL;
+ }
+ break;
+ case NAND_ECC_ALGO_BCH:
+ bits_per_step = BITS_PER_STEP_BCH * chip->ecc.strength;
+ mtd_set_ooblayout(mtd, &tegra_nand_oob_bch_ops);
+ nand->bch_config = BCH_ENABLE;
+ switch (chip->ecc.strength) {
+ case 4:
+ nand->bch_config |= BCH_TVAL_4;
+ break;
+ case 8:
+ nand->bch_config |= BCH_TVAL_8;
+ break;
+ case 14:
+ nand->bch_config |= BCH_TVAL_14;
+ break;
+ case 16:
+ nand->bch_config |= BCH_TVAL_16;
+ break;
+ default:
+ dev_err(ctrl->dev, "ECC strength %d not supported\n",
+ chip->ecc.strength);
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_err(ctrl->dev, "ECC algorithm not supported\n");
+ return -EINVAL;
+ }
+
+ dev_info(ctrl->dev, "Using %s with strength %d per 512 byte step\n",
+ chip->ecc.algo == NAND_ECC_ALGO_BCH ? "BCH" : "RS",
+ chip->ecc.strength);
+
+ chip->ecc.bytes = DIV_ROUND_UP(bits_per_step, BITS_PER_BYTE);
+
+ switch (mtd->writesize) {
+ case 256:
+ nand->config |= CONFIG_PS_256;
+ break;
+ case 512:
+ nand->config |= CONFIG_PS_512;
+ break;
+ case 1024:
+ nand->config |= CONFIG_PS_1024;
+ break;
+ case 2048:
+ nand->config |= CONFIG_PS_2048;
+ break;
+ case 4096:
+ nand->config |= CONFIG_PS_4096;
+ break;
+ default:
+ dev_err(ctrl->dev, "Unsupported writesize %d\n",
+ mtd->writesize);
+ return -ENODEV;
+ }
+
+ /* Store complete configuration for HW ECC in config_ecc */
+ nand->config_ecc |= nand->config;
+
+ /* Non-HW ECC read/writes complete OOB */
+ nand->config |= CONFIG_TAG_BYTE_SIZE(mtd->oobsize - 1);
+ writel_relaxed(nand->config, ctrl->regs + CONFIG);
+
+ return 0;
+}
+
+static const struct nand_controller_ops tegra_nand_controller_ops = {
+ .attach_chip = &tegra_nand_attach_chip,
+ .exec_op = tegra_nand_exec_op,
+ .setup_interface = tegra_nand_setup_interface,
+};
+
+static int tegra_nand_chips_init(struct device *dev,
+ struct tegra_nand_controller *ctrl)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *np_nand;
+ int nsels, nchips = of_get_child_count(np);
+ struct tegra_nand_chip *nand;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ int ret;
+ u32 cs;
+
+ if (nchips != 1) {
+ dev_err(dev, "Currently only one NAND chip supported\n");
+ return -EINVAL;
+ }
+
+ np_nand = of_get_next_child(np, NULL);
+
+ nsels = of_property_count_elems_of_size(np_nand, "reg", sizeof(u32));
+ if (nsels != 1) {
+ dev_err(dev, "Missing/invalid reg property\n");
+ return -EINVAL;
+ }
+
+ /* Retrieve CS id, currently only single die NAND supported */
+ ret = of_property_read_u32(np_nand, "reg", &cs);
+ if (ret) {
+ dev_err(dev, "could not retrieve reg property: %d\n", ret);
+ return ret;
+ }
+
+ nand = devm_kzalloc(dev, sizeof(*nand), GFP_KERNEL);
+ if (!nand)
+ return -ENOMEM;
+
+ nand->cs[0] = cs;
+
+ nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW);
+
+ if (IS_ERR(nand->wp_gpio)) {
+ ret = PTR_ERR(nand->wp_gpio);
+ dev_err(dev, "Failed to request WP GPIO: %d\n", ret);
+ return ret;
+ }
+
+ chip = &nand->chip;
+ chip->controller = &ctrl->controller;
+
+ mtd = nand_to_mtd(chip);
+
+ mtd->dev.parent = dev;
+ mtd->owner = THIS_MODULE;
+
+ nand_set_flash_node(chip, np_nand);
+
+ if (!mtd->name)
+ mtd->name = "tegra_nand";
+
+ chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA;
+
+ ret = nand_scan(chip, 1);
+ if (ret)
+ return ret;
+
+ mtd_ooblayout_ecc(mtd, 0, &nand->ecc);
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(dev, "Failed to register mtd device: %d\n", ret);
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ ctrl->chip = chip;
+
+ return 0;
+}
+
+static int tegra_nand_probe(struct platform_device *pdev)
+{
+ struct reset_control *rst;
+ struct tegra_nand_controller *ctrl;
+ int err = 0;
+
+ ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ctrl);
+ nand_controller_init(&ctrl->controller);
+ ctrl->controller.ops = &tegra_nand_controller_ops;
+
+ ctrl->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctrl->regs))
+ return PTR_ERR(ctrl->regs);
+
+ rst = devm_reset_control_get(&pdev->dev, "nand");
+ if (IS_ERR(rst))
+ return PTR_ERR(rst);
+
+ ctrl->clk = devm_clk_get(&pdev->dev, "nand");
+ if (IS_ERR(ctrl->clk))
+ return PTR_ERR(ctrl->clk);
+
+ err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
+ if (err)
+ return err;
+
+ /*
+ * This driver doesn't support active power management yet,
+ * so we will simply keep device resumed.
+ */
+ pm_runtime_enable(&pdev->dev);
+ err = pm_runtime_resume_and_get(&pdev->dev);
+ if (err)
+ goto err_dis_pm;
+
+ err = reset_control_reset(rst);
+ if (err) {
+ dev_err(ctrl->dev, "Failed to reset HW: %d\n", err);
+ goto err_put_pm;
+ }
+
+ writel_relaxed(HWSTATUS_CMD_DEFAULT, ctrl->regs + HWSTATUS_CMD);
+ writel_relaxed(HWSTATUS_MASK_DEFAULT, ctrl->regs + HWSTATUS_MASK);
+ writel_relaxed(INT_MASK, ctrl->regs + IER);
+
+ init_completion(&ctrl->command_complete);
+ init_completion(&ctrl->dma_complete);
+
+ ctrl->irq = platform_get_irq(pdev, 0);
+ if (ctrl->irq < 0) {
+ err = ctrl->irq;
+ goto err_put_pm;
+ }
+ err = devm_request_irq(&pdev->dev, ctrl->irq, tegra_nand_irq, 0,
+ dev_name(&pdev->dev), ctrl);
+ if (err) {
+ dev_err(ctrl->dev, "Failed to get IRQ: %d\n", err);
+ goto err_put_pm;
+ }
+
+ writel_relaxed(DMA_MST_CTRL_IS_DONE, ctrl->regs + DMA_MST_CTRL);
+
+ err = tegra_nand_chips_init(ctrl->dev, ctrl);
+ if (err)
+ goto err_put_pm;
+
+ return 0;
+
+err_put_pm:
+ pm_runtime_put_sync_suspend(ctrl->dev);
+ pm_runtime_force_suspend(ctrl->dev);
+err_dis_pm:
+ pm_runtime_disable(&pdev->dev);
+ return err;
+}
+
+static void tegra_nand_remove(struct platform_device *pdev)
+{
+ struct tegra_nand_controller *ctrl = platform_get_drvdata(pdev);
+ struct nand_chip *chip = ctrl->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ WARN_ON(mtd_device_unregister(mtd));
+
+ nand_cleanup(chip);
+
+ pm_runtime_put_sync_suspend(ctrl->dev);
+ pm_runtime_force_suspend(ctrl->dev);
+}
+
+static int __maybe_unused tegra_nand_runtime_resume(struct device *dev)
+{
+ struct tegra_nand_controller *ctrl = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(ctrl->clk);
+ if (err) {
+ dev_err(dev, "Failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tegra_nand_runtime_suspend(struct device *dev)
+{
+ struct tegra_nand_controller *ctrl = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(ctrl->clk);
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_nand_pm = {
+ SET_RUNTIME_PM_OPS(tegra_nand_runtime_suspend, tegra_nand_runtime_resume,
+ NULL)
+};
+
+static const struct of_device_id tegra_nand_of_match[] = {
+ { .compatible = "nvidia,tegra20-nand" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tegra_nand_of_match);
+
+static struct platform_driver tegra_nand_driver = {
+ .driver = {
+ .name = "tegra-nand",
+ .of_match_table = tegra_nand_of_match,
+ .pm = &tegra_nand_pm,
+ },
+ .probe = tegra_nand_probe,
+ .remove_new = tegra_nand_remove,
+};
+module_platform_driver(tegra_nand_driver);
+
+MODULE_DESCRIPTION("NVIDIA Tegra NAND driver");
+MODULE_AUTHOR("Thierry Reding <thierry.reding@nvidia.com>");
+MODULE_AUTHOR("Lucas Stach <dev@lynxeye.de>");
+MODULE_AUTHOR("Stefan Agner <stefan@agner.ch>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/txx9ndfmc.c b/drivers/mtd/nand/raw/txx9ndfmc.c
new file mode 100644
index 0000000000..eddcc0728a
--- /dev/null
+++ b/drivers/mtd/nand/raw/txx9ndfmc.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TXx9 NAND flash memory controller driver
+ * Based on RBTX49xx patch from CELF patch archive.
+ *
+ * (C) Copyright TOSHIBA CORPORATION 2004-2007
+ * All Rights Reserved.
+ */
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/io.h>
+#include <linux/platform_data/txx9/ndfmc.h>
+
+/* TXX9 NDFMC Registers */
+#define TXX9_NDFDTR 0x00
+#define TXX9_NDFMCR 0x04
+#define TXX9_NDFSR 0x08
+#define TXX9_NDFISR 0x0c
+#define TXX9_NDFIMR 0x10
+#define TXX9_NDFSPR 0x14
+#define TXX9_NDFRSTR 0x18 /* not TX4939 */
+
+/* NDFMCR : NDFMC Mode Control */
+#define TXX9_NDFMCR_WE 0x80
+#define TXX9_NDFMCR_ECC_ALL 0x60
+#define TXX9_NDFMCR_ECC_RESET 0x60
+#define TXX9_NDFMCR_ECC_READ 0x40
+#define TXX9_NDFMCR_ECC_ON 0x20
+#define TXX9_NDFMCR_ECC_OFF 0x00
+#define TXX9_NDFMCR_CE 0x10
+#define TXX9_NDFMCR_BSPRT 0x04 /* TX4925/TX4926 only */
+#define TXX9_NDFMCR_ALE 0x02
+#define TXX9_NDFMCR_CLE 0x01
+/* TX4939 only */
+#define TXX9_NDFMCR_X16 0x0400
+#define TXX9_NDFMCR_DMAREQ_MASK 0x0300
+#define TXX9_NDFMCR_DMAREQ_NODMA 0x0000
+#define TXX9_NDFMCR_DMAREQ_128 0x0100
+#define TXX9_NDFMCR_DMAREQ_256 0x0200
+#define TXX9_NDFMCR_DMAREQ_512 0x0300
+#define TXX9_NDFMCR_CS_MASK 0x0c
+#define TXX9_NDFMCR_CS(ch) ((ch) << 2)
+
+/* NDFMCR : NDFMC Status */
+#define TXX9_NDFSR_BUSY 0x80
+/* TX4939 only */
+#define TXX9_NDFSR_DMARUN 0x40
+
+/* NDFMCR : NDFMC Reset */
+#define TXX9_NDFRSTR_RST 0x01
+
+struct txx9ndfmc_priv {
+ struct platform_device *dev;
+ struct nand_chip chip;
+ int cs;
+ const char *mtdname;
+};
+
+#define MAX_TXX9NDFMC_DEV 4
+struct txx9ndfmc_drvdata {
+ struct mtd_info *mtds[MAX_TXX9NDFMC_DEV];
+ void __iomem *base;
+ unsigned char hold; /* in gbusclock */
+ unsigned char spw; /* in gbusclock */
+ struct nand_controller controller;
+};
+
+static struct platform_device *mtd_to_platdev(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct txx9ndfmc_priv *txx9_priv = nand_get_controller_data(chip);
+ return txx9_priv->dev;
+}
+
+static void __iomem *ndregaddr(struct platform_device *dev, unsigned int reg)
+{
+ struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
+ struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
+
+ return drvdata->base + (reg << plat->shift);
+}
+
+static u32 txx9ndfmc_read(struct platform_device *dev, unsigned int reg)
+{
+ return __raw_readl(ndregaddr(dev, reg));
+}
+
+static void txx9ndfmc_write(struct platform_device *dev,
+ u32 val, unsigned int reg)
+{
+ __raw_writel(val, ndregaddr(dev, reg));
+}
+
+static uint8_t txx9ndfmc_read_byte(struct nand_chip *chip)
+{
+ struct platform_device *dev = mtd_to_platdev(nand_to_mtd(chip));
+
+ return txx9ndfmc_read(dev, TXX9_NDFDTR);
+}
+
+static void txx9ndfmc_write_buf(struct nand_chip *chip, const uint8_t *buf,
+ int len)
+{
+ struct platform_device *dev = mtd_to_platdev(nand_to_mtd(chip));
+ void __iomem *ndfdtr = ndregaddr(dev, TXX9_NDFDTR);
+ u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
+
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_WE, TXX9_NDFMCR);
+ while (len--)
+ __raw_writel(*buf++, ndfdtr);
+ txx9ndfmc_write(dev, mcr, TXX9_NDFMCR);
+}
+
+static void txx9ndfmc_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
+{
+ struct platform_device *dev = mtd_to_platdev(nand_to_mtd(chip));
+ void __iomem *ndfdtr = ndregaddr(dev, TXX9_NDFDTR);
+
+ while (len--)
+ *buf++ = __raw_readl(ndfdtr);
+}
+
+static void txx9ndfmc_cmd_ctrl(struct nand_chip *chip, int cmd,
+ unsigned int ctrl)
+{
+ struct txx9ndfmc_priv *txx9_priv = nand_get_controller_data(chip);
+ struct platform_device *dev = txx9_priv->dev;
+ struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
+
+ mcr &= ~(TXX9_NDFMCR_CLE | TXX9_NDFMCR_ALE | TXX9_NDFMCR_CE);
+ mcr |= ctrl & NAND_CLE ? TXX9_NDFMCR_CLE : 0;
+ mcr |= ctrl & NAND_ALE ? TXX9_NDFMCR_ALE : 0;
+ /* TXX9_NDFMCR_CE bit is 0:high 1:low */
+ mcr |= ctrl & NAND_NCE ? TXX9_NDFMCR_CE : 0;
+ if (txx9_priv->cs >= 0 && (ctrl & NAND_NCE)) {
+ mcr &= ~TXX9_NDFMCR_CS_MASK;
+ mcr |= TXX9_NDFMCR_CS(txx9_priv->cs);
+ }
+ txx9ndfmc_write(dev, mcr, TXX9_NDFMCR);
+ }
+ if (cmd != NAND_CMD_NONE)
+ txx9ndfmc_write(dev, cmd & 0xff, TXX9_NDFDTR);
+ if (plat->flags & NDFMC_PLAT_FLAG_DUMMYWRITE) {
+ /* dummy write to update external latch */
+ if ((ctrl & NAND_CTRL_CHANGE) && cmd == NAND_CMD_NONE)
+ txx9ndfmc_write(dev, 0, TXX9_NDFDTR);
+ }
+}
+
+static int txx9ndfmc_dev_ready(struct nand_chip *chip)
+{
+ struct platform_device *dev = mtd_to_platdev(nand_to_mtd(chip));
+
+ return !(txx9ndfmc_read(dev, TXX9_NDFSR) & TXX9_NDFSR_BUSY);
+}
+
+static int txx9ndfmc_calculate_ecc(struct nand_chip *chip, const uint8_t *dat,
+ uint8_t *ecc_code)
+{
+ struct platform_device *dev = mtd_to_platdev(nand_to_mtd(chip));
+ int eccbytes;
+ u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
+
+ mcr &= ~TXX9_NDFMCR_ECC_ALL;
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR);
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_READ, TXX9_NDFMCR);
+ for (eccbytes = chip->ecc.bytes; eccbytes > 0; eccbytes -= 3) {
+ ecc_code[1] = txx9ndfmc_read(dev, TXX9_NDFDTR);
+ ecc_code[0] = txx9ndfmc_read(dev, TXX9_NDFDTR);
+ ecc_code[2] = txx9ndfmc_read(dev, TXX9_NDFDTR);
+ ecc_code += 3;
+ }
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR);
+ return 0;
+}
+
+static int txx9ndfmc_correct_data(struct nand_chip *chip, unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ int eccsize;
+ int corrected = 0;
+ int stat;
+
+ for (eccsize = chip->ecc.size; eccsize > 0; eccsize -= 256) {
+ stat = rawnand_sw_hamming_correct(chip, buf, read_ecc,
+ calc_ecc);
+ if (stat < 0)
+ return stat;
+ corrected += stat;
+ buf += 256;
+ read_ecc += 3;
+ calc_ecc += 3;
+ }
+ return corrected;
+}
+
+static void txx9ndfmc_enable_hwecc(struct nand_chip *chip, int mode)
+{
+ struct platform_device *dev = mtd_to_platdev(nand_to_mtd(chip));
+ u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
+
+ mcr &= ~TXX9_NDFMCR_ECC_ALL;
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_RESET, TXX9_NDFMCR);
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR);
+ txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_ON, TXX9_NDFMCR);
+}
+
+static void txx9ndfmc_initialize(struct platform_device *dev)
+{
+ struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
+ struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
+ int tmout = 100;
+
+ if (plat->flags & NDFMC_PLAT_FLAG_NO_RSTR)
+ ; /* no NDFRSTR. Write to NDFSPR resets the NDFMC. */
+ else {
+ /* reset NDFMC */
+ txx9ndfmc_write(dev,
+ txx9ndfmc_read(dev, TXX9_NDFRSTR) |
+ TXX9_NDFRSTR_RST,
+ TXX9_NDFRSTR);
+ while (txx9ndfmc_read(dev, TXX9_NDFRSTR) & TXX9_NDFRSTR_RST) {
+ if (--tmout == 0) {
+ dev_err(&dev->dev, "reset failed.\n");
+ break;
+ }
+ udelay(1);
+ }
+ }
+ /* setup Hold Time, Strobe Pulse Width */
+ txx9ndfmc_write(dev, (drvdata->hold << 4) | drvdata->spw, TXX9_NDFSPR);
+ txx9ndfmc_write(dev,
+ (plat->flags & NDFMC_PLAT_FLAG_USE_BSPRT) ?
+ TXX9_NDFMCR_BSPRT : 0, TXX9_NDFMCR);
+}
+
+#define TXX9NDFMC_NS_TO_CYC(gbusclk, ns) \
+ DIV_ROUND_UP((ns) * DIV_ROUND_UP(gbusclk, 1000), 1000000)
+
+static int txx9ndfmc_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ chip->ecc.strength = 1;
+
+ if (mtd->writesize >= 512) {
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 6;
+ } else {
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ }
+
+ chip->ecc.calculate = txx9ndfmc_calculate_ecc;
+ chip->ecc.correct = txx9ndfmc_correct_data;
+ chip->ecc.hwctl = txx9ndfmc_enable_hwecc;
+
+ return 0;
+}
+
+static const struct nand_controller_ops txx9ndfmc_controller_ops = {
+ .attach_chip = txx9ndfmc_attach_chip,
+};
+
+static int __init txx9ndfmc_probe(struct platform_device *dev)
+{
+ struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
+ int hold, spw;
+ int i;
+ struct txx9ndfmc_drvdata *drvdata;
+ unsigned long gbusclk = plat->gbus_clock;
+
+ drvdata = devm_kzalloc(&dev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+ drvdata->base = devm_platform_ioremap_resource(dev, 0);
+ if (IS_ERR(drvdata->base))
+ return PTR_ERR(drvdata->base);
+
+ hold = plat->hold ?: 20; /* tDH */
+ spw = plat->spw ?: 90; /* max(tREADID, tWP, tRP) */
+
+ hold = TXX9NDFMC_NS_TO_CYC(gbusclk, hold);
+ spw = TXX9NDFMC_NS_TO_CYC(gbusclk, spw);
+ if (plat->flags & NDFMC_PLAT_FLAG_HOLDADD)
+ hold -= 2; /* actual hold time : (HOLD + 2) BUSCLK */
+ spw -= 1; /* actual wait time : (SPW + 1) BUSCLK */
+ hold = clamp(hold, 1, 15);
+ drvdata->hold = hold;
+ spw = clamp(spw, 1, 15);
+ drvdata->spw = spw;
+ dev_info(&dev->dev, "CLK:%ldMHz HOLD:%d SPW:%d\n",
+ (gbusclk + 500000) / 1000000, hold, spw);
+
+ nand_controller_init(&drvdata->controller);
+ drvdata->controller.ops = &txx9ndfmc_controller_ops;
+
+ platform_set_drvdata(dev, drvdata);
+ txx9ndfmc_initialize(dev);
+
+ for (i = 0; i < MAX_TXX9NDFMC_DEV; i++) {
+ struct txx9ndfmc_priv *txx9_priv;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+
+ if (!(plat->ch_mask & (1 << i)))
+ continue;
+ txx9_priv = kzalloc(sizeof(struct txx9ndfmc_priv),
+ GFP_KERNEL);
+ if (!txx9_priv)
+ continue;
+ chip = &txx9_priv->chip;
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = &dev->dev;
+
+ chip->legacy.read_byte = txx9ndfmc_read_byte;
+ chip->legacy.read_buf = txx9ndfmc_read_buf;
+ chip->legacy.write_buf = txx9ndfmc_write_buf;
+ chip->legacy.cmd_ctrl = txx9ndfmc_cmd_ctrl;
+ chip->legacy.dev_ready = txx9ndfmc_dev_ready;
+ chip->legacy.chip_delay = 100;
+ chip->controller = &drvdata->controller;
+
+ nand_set_controller_data(chip, txx9_priv);
+ txx9_priv->dev = dev;
+
+ if (plat->ch_mask != 1) {
+ txx9_priv->cs = i;
+ txx9_priv->mtdname = kasprintf(GFP_KERNEL, "%s.%u",
+ dev_name(&dev->dev), i);
+ } else {
+ txx9_priv->cs = -1;
+ txx9_priv->mtdname = kstrdup(dev_name(&dev->dev),
+ GFP_KERNEL);
+ }
+ if (!txx9_priv->mtdname) {
+ kfree(txx9_priv);
+ dev_err(&dev->dev, "Unable to allocate MTD name.\n");
+ continue;
+ }
+ if (plat->wide_mask & (1 << i))
+ chip->options |= NAND_BUSWIDTH_16;
+
+ if (nand_scan(chip, 1)) {
+ kfree(txx9_priv->mtdname);
+ kfree(txx9_priv);
+ continue;
+ }
+ mtd->name = txx9_priv->mtdname;
+
+ mtd_device_register(mtd, NULL, 0);
+ drvdata->mtds[i] = mtd;
+ }
+
+ return 0;
+}
+
+static int __exit txx9ndfmc_remove(struct platform_device *dev)
+{
+ struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
+ int ret, i;
+
+ if (!drvdata)
+ return 0;
+ for (i = 0; i < MAX_TXX9NDFMC_DEV; i++) {
+ struct mtd_info *mtd = drvdata->mtds[i];
+ struct nand_chip *chip;
+ struct txx9ndfmc_priv *txx9_priv;
+
+ if (!mtd)
+ continue;
+ chip = mtd_to_nand(mtd);
+ txx9_priv = nand_get_controller_data(chip);
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ kfree(txx9_priv->mtdname);
+ kfree(txx9_priv);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int txx9ndfmc_resume(struct platform_device *dev)
+{
+ if (platform_get_drvdata(dev))
+ txx9ndfmc_initialize(dev);
+ return 0;
+}
+#else
+#define txx9ndfmc_resume NULL
+#endif
+
+static struct platform_driver txx9ndfmc_driver = {
+ .remove = __exit_p(txx9ndfmc_remove),
+ .resume = txx9ndfmc_resume,
+ .driver = {
+ .name = "txx9ndfmc",
+ },
+};
+
+module_platform_driver_probe(txx9ndfmc_driver, txx9ndfmc_probe);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TXx9 SoC NAND flash controller driver");
+MODULE_ALIAS("platform:txx9ndfmc");
diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c
new file mode 100644
index 0000000000..3f783b8f76
--- /dev/null
+++ b/drivers/mtd/nand/raw/vf610_nfc.c
@@ -0,0 +1,953 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2009-2015 Freescale Semiconductor, Inc. and others
+ *
+ * Description: MPC5125, VF610, MCF54418 and Kinetis K70 Nand driver.
+ * Jason ported to M54418TWR and MVFA5 (VF610).
+ * Authors: Stefan Agner <stefan.agner@toradex.com>
+ * Bill Pringlemeir <bpringlemeir@nbsps.com>
+ * Shaohui Xie <b21989@freescale.com>
+ * Jason Jin <Jason.jin@freescale.com>
+ *
+ * Based on original driver mpc5121_nfc.c.
+ *
+ * Limitations:
+ * - Untested on MPC5125 and M54418.
+ * - DMA and pipelining not used.
+ * - 2K pages or less.
+ * - HW ECC: Only 2K page with 64+ OOB.
+ * - HW ECC: Only 24 and 32-bit error correction implemented.
+ */
+
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/swab.h>
+
+#define DRV_NAME "vf610_nfc"
+
+/* Register Offsets */
+#define NFC_FLASH_CMD1 0x3F00
+#define NFC_FLASH_CMD2 0x3F04
+#define NFC_COL_ADDR 0x3F08
+#define NFC_ROW_ADDR 0x3F0c
+#define NFC_ROW_ADDR_INC 0x3F14
+#define NFC_FLASH_STATUS1 0x3F18
+#define NFC_FLASH_STATUS2 0x3F1c
+#define NFC_CACHE_SWAP 0x3F28
+#define NFC_SECTOR_SIZE 0x3F2c
+#define NFC_FLASH_CONFIG 0x3F30
+#define NFC_IRQ_STATUS 0x3F38
+
+/* Addresses for NFC MAIN RAM BUFFER areas */
+#define NFC_MAIN_AREA(n) ((n) * 0x1000)
+
+#define PAGE_2K 0x0800
+#define OOB_64 0x0040
+#define OOB_MAX 0x0100
+
+/* NFC_CMD2[CODE] controller cycle bit masks */
+#define COMMAND_CMD_BYTE1 BIT(14)
+#define COMMAND_CAR_BYTE1 BIT(13)
+#define COMMAND_CAR_BYTE2 BIT(12)
+#define COMMAND_RAR_BYTE1 BIT(11)
+#define COMMAND_RAR_BYTE2 BIT(10)
+#define COMMAND_RAR_BYTE3 BIT(9)
+#define COMMAND_NADDR_BYTES(x) GENMASK(13, 13 - (x) + 1)
+#define COMMAND_WRITE_DATA BIT(8)
+#define COMMAND_CMD_BYTE2 BIT(7)
+#define COMMAND_RB_HANDSHAKE BIT(6)
+#define COMMAND_READ_DATA BIT(5)
+#define COMMAND_CMD_BYTE3 BIT(4)
+#define COMMAND_READ_STATUS BIT(3)
+#define COMMAND_READ_ID BIT(2)
+
+/* NFC ECC mode define */
+#define ECC_BYPASS 0
+#define ECC_45_BYTE 6
+#define ECC_60_BYTE 7
+
+/*** Register Mask and bit definitions */
+
+/* NFC_FLASH_CMD1 Field */
+#define CMD_BYTE2_MASK 0xFF000000
+#define CMD_BYTE2_SHIFT 24
+
+/* NFC_FLASH_CM2 Field */
+#define CMD_BYTE1_MASK 0xFF000000
+#define CMD_BYTE1_SHIFT 24
+#define CMD_CODE_MASK 0x00FFFF00
+#define CMD_CODE_SHIFT 8
+#define BUFNO_MASK 0x00000006
+#define BUFNO_SHIFT 1
+#define START_BIT BIT(0)
+
+/* NFC_COL_ADDR Field */
+#define COL_ADDR_MASK 0x0000FFFF
+#define COL_ADDR_SHIFT 0
+#define COL_ADDR(pos, val) (((val) & 0xFF) << (8 * (pos)))
+
+/* NFC_ROW_ADDR Field */
+#define ROW_ADDR_MASK 0x00FFFFFF
+#define ROW_ADDR_SHIFT 0
+#define ROW_ADDR(pos, val) (((val) & 0xFF) << (8 * (pos)))
+
+#define ROW_ADDR_CHIP_SEL_RB_MASK 0xF0000000
+#define ROW_ADDR_CHIP_SEL_RB_SHIFT 28
+#define ROW_ADDR_CHIP_SEL_MASK 0x0F000000
+#define ROW_ADDR_CHIP_SEL_SHIFT 24
+
+/* NFC_FLASH_STATUS2 Field */
+#define STATUS_BYTE1_MASK 0x000000FF
+
+/* NFC_FLASH_CONFIG Field */
+#define CONFIG_ECC_SRAM_ADDR_MASK 0x7FC00000
+#define CONFIG_ECC_SRAM_ADDR_SHIFT 22
+#define CONFIG_ECC_SRAM_REQ_BIT BIT(21)
+#define CONFIG_DMA_REQ_BIT BIT(20)
+#define CONFIG_ECC_MODE_MASK 0x000E0000
+#define CONFIG_ECC_MODE_SHIFT 17
+#define CONFIG_FAST_FLASH_BIT BIT(16)
+#define CONFIG_16BIT BIT(7)
+#define CONFIG_BOOT_MODE_BIT BIT(6)
+#define CONFIG_ADDR_AUTO_INCR_BIT BIT(5)
+#define CONFIG_BUFNO_AUTO_INCR_BIT BIT(4)
+#define CONFIG_PAGE_CNT_MASK 0xF
+#define CONFIG_PAGE_CNT_SHIFT 0
+
+/* NFC_IRQ_STATUS Field */
+#define IDLE_IRQ_BIT BIT(29)
+#define IDLE_EN_BIT BIT(20)
+#define CMD_DONE_CLEAR_BIT BIT(18)
+#define IDLE_CLEAR_BIT BIT(17)
+
+/*
+ * ECC status - seems to consume 8 bytes (double word). The documented
+ * status byte is located in the lowest byte of the second word (which is
+ * the 4th or 7th byte depending on endianness).
+ * Calculate an offset to store the ECC status at the end of the buffer.
+ */
+#define ECC_SRAM_ADDR (PAGE_2K + OOB_MAX - 8)
+
+#define ECC_STATUS 0x4
+#define ECC_STATUS_MASK 0x80
+#define ECC_STATUS_ERR_COUNT 0x3F
+
+enum vf610_nfc_variant {
+ NFC_VFC610 = 1,
+};
+
+struct vf610_nfc {
+ struct nand_controller base;
+ struct nand_chip chip;
+ struct device *dev;
+ void __iomem *regs;
+ struct completion cmd_done;
+ /* Status and ID are in alternate locations. */
+ enum vf610_nfc_variant variant;
+ struct clk *clk;
+ /*
+ * Indicate that user data is accessed (full page/oob). This is
+ * useful to indicate the driver whether to swap byte endianness.
+ * See comments in vf610_nfc_rd_from_sram/vf610_nfc_wr_to_sram.
+ */
+ bool data_access;
+ u32 ecc_mode;
+};
+
+static inline struct vf610_nfc *chip_to_nfc(struct nand_chip *chip)
+{
+ return container_of(chip, struct vf610_nfc, chip);
+}
+
+static inline u32 vf610_nfc_read(struct vf610_nfc *nfc, uint reg)
+{
+ return readl(nfc->regs + reg);
+}
+
+static inline void vf610_nfc_write(struct vf610_nfc *nfc, uint reg, u32 val)
+{
+ writel(val, nfc->regs + reg);
+}
+
+static inline void vf610_nfc_set(struct vf610_nfc *nfc, uint reg, u32 bits)
+{
+ vf610_nfc_write(nfc, reg, vf610_nfc_read(nfc, reg) | bits);
+}
+
+static inline void vf610_nfc_clear(struct vf610_nfc *nfc, uint reg, u32 bits)
+{
+ vf610_nfc_write(nfc, reg, vf610_nfc_read(nfc, reg) & ~bits);
+}
+
+static inline void vf610_nfc_set_field(struct vf610_nfc *nfc, u32 reg,
+ u32 mask, u32 shift, u32 val)
+{
+ vf610_nfc_write(nfc, reg,
+ (vf610_nfc_read(nfc, reg) & (~mask)) | val << shift);
+}
+
+static inline bool vf610_nfc_kernel_is_little_endian(void)
+{
+#ifdef __LITTLE_ENDIAN
+ return true;
+#else
+ return false;
+#endif
+}
+
+/*
+ * Read accessor for internal SRAM buffer
+ * @dst: destination address in regular memory
+ * @src: source address in SRAM buffer
+ * @len: bytes to copy
+ * @fix_endian: Fix endianness if required
+ *
+ * Use this accessor for the internal SRAM buffers. On the ARM
+ * Freescale Vybrid SoC it's known that the driver can treat
+ * the SRAM buffer as if it's memory. Other platform might need
+ * to treat the buffers differently.
+ *
+ * The controller stores bytes from the NAND chip internally in big
+ * endianness. On little endian platforms such as Vybrid this leads
+ * to reversed byte order.
+ * For performance reason (and earlier probably due to unawareness)
+ * the driver avoids correcting endianness where it has control over
+ * write and read side (e.g. page wise data access).
+ */
+static inline void vf610_nfc_rd_from_sram(void *dst, const void __iomem *src,
+ size_t len, bool fix_endian)
+{
+ if (vf610_nfc_kernel_is_little_endian() && fix_endian) {
+ unsigned int i;
+
+ for (i = 0; i < len; i += 4) {
+ u32 val = swab32(__raw_readl(src + i));
+
+ memcpy(dst + i, &val, min(sizeof(val), len - i));
+ }
+ } else {
+ memcpy_fromio(dst, src, len);
+ }
+}
+
+/*
+ * Write accessor for internal SRAM buffer
+ * @dst: destination address in SRAM buffer
+ * @src: source address in regular memory
+ * @len: bytes to copy
+ * @fix_endian: Fix endianness if required
+ *
+ * Use this accessor for the internal SRAM buffers. On the ARM
+ * Freescale Vybrid SoC it's known that the driver can treat
+ * the SRAM buffer as if it's memory. Other platform might need
+ * to treat the buffers differently.
+ *
+ * The controller stores bytes from the NAND chip internally in big
+ * endianness. On little endian platforms such as Vybrid this leads
+ * to reversed byte order.
+ * For performance reason (and earlier probably due to unawareness)
+ * the driver avoids correcting endianness where it has control over
+ * write and read side (e.g. page wise data access).
+ */
+static inline void vf610_nfc_wr_to_sram(void __iomem *dst, const void *src,
+ size_t len, bool fix_endian)
+{
+ if (vf610_nfc_kernel_is_little_endian() && fix_endian) {
+ unsigned int i;
+
+ for (i = 0; i < len; i += 4) {
+ u32 val;
+
+ memcpy(&val, src + i, min(sizeof(val), len - i));
+ __raw_writel(swab32(val), dst + i);
+ }
+ } else {
+ memcpy_toio(dst, src, len);
+ }
+}
+
+/* Clear flags for upcoming command */
+static inline void vf610_nfc_clear_status(struct vf610_nfc *nfc)
+{
+ u32 tmp = vf610_nfc_read(nfc, NFC_IRQ_STATUS);
+
+ tmp |= CMD_DONE_CLEAR_BIT | IDLE_CLEAR_BIT;
+ vf610_nfc_write(nfc, NFC_IRQ_STATUS, tmp);
+}
+
+static void vf610_nfc_done(struct vf610_nfc *nfc)
+{
+ unsigned long timeout = msecs_to_jiffies(100);
+
+ /*
+ * Barrier is needed after this write. This write need
+ * to be done before reading the next register the first
+ * time.
+ * vf610_nfc_set implicates such a barrier by using writel
+ * to write to the register.
+ */
+ vf610_nfc_set(nfc, NFC_IRQ_STATUS, IDLE_EN_BIT);
+ vf610_nfc_set(nfc, NFC_FLASH_CMD2, START_BIT);
+
+ if (!wait_for_completion_timeout(&nfc->cmd_done, timeout))
+ dev_warn(nfc->dev, "Timeout while waiting for BUSY.\n");
+
+ vf610_nfc_clear_status(nfc);
+}
+
+static irqreturn_t vf610_nfc_irq(int irq, void *data)
+{
+ struct vf610_nfc *nfc = data;
+
+ vf610_nfc_clear(nfc, NFC_IRQ_STATUS, IDLE_EN_BIT);
+ complete(&nfc->cmd_done);
+
+ return IRQ_HANDLED;
+}
+
+static inline void vf610_nfc_ecc_mode(struct vf610_nfc *nfc, int ecc_mode)
+{
+ vf610_nfc_set_field(nfc, NFC_FLASH_CONFIG,
+ CONFIG_ECC_MODE_MASK,
+ CONFIG_ECC_MODE_SHIFT, ecc_mode);
+}
+
+static inline void vf610_nfc_run(struct vf610_nfc *nfc, u32 col, u32 row,
+ u32 cmd1, u32 cmd2, u32 trfr_sz)
+{
+ vf610_nfc_set_field(nfc, NFC_COL_ADDR, COL_ADDR_MASK,
+ COL_ADDR_SHIFT, col);
+
+ vf610_nfc_set_field(nfc, NFC_ROW_ADDR, ROW_ADDR_MASK,
+ ROW_ADDR_SHIFT, row);
+
+ vf610_nfc_write(nfc, NFC_SECTOR_SIZE, trfr_sz);
+ vf610_nfc_write(nfc, NFC_FLASH_CMD1, cmd1);
+ vf610_nfc_write(nfc, NFC_FLASH_CMD2, cmd2);
+
+ dev_dbg(nfc->dev,
+ "col 0x%04x, row 0x%08x, cmd1 0x%08x, cmd2 0x%08x, len %d\n",
+ col, row, cmd1, cmd2, trfr_sz);
+
+ vf610_nfc_done(nfc);
+}
+
+static inline const struct nand_op_instr *
+vf610_get_next_instr(const struct nand_subop *subop, int *op_id)
+{
+ if (*op_id + 1 >= subop->ninstrs)
+ return NULL;
+
+ (*op_id)++;
+
+ return &subop->instrs[*op_id];
+}
+
+static int vf610_nfc_cmd(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
+ int op_id = -1, trfr_sz = 0, offset = 0;
+ u32 col = 0, row = 0, cmd1 = 0, cmd2 = 0, code = 0;
+ bool force8bit = false;
+
+ /*
+ * Some ops are optional, but the hardware requires the operations
+ * to be in this exact order.
+ * The op parser enforces the order and makes sure that there isn't
+ * a read and write element in a single operation.
+ */
+ instr = vf610_get_next_instr(subop, &op_id);
+ if (!instr)
+ return -EINVAL;
+
+ if (instr && instr->type == NAND_OP_CMD_INSTR) {
+ cmd2 |= instr->ctx.cmd.opcode << CMD_BYTE1_SHIFT;
+ code |= COMMAND_CMD_BYTE1;
+
+ instr = vf610_get_next_instr(subop, &op_id);
+ }
+
+ if (instr && instr->type == NAND_OP_ADDR_INSTR) {
+ int naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ int i = nand_subop_get_addr_start_off(subop, op_id);
+
+ for (; i < naddrs; i++) {
+ u8 val = instr->ctx.addr.addrs[i];
+
+ if (i < 2)
+ col |= COL_ADDR(i, val);
+ else
+ row |= ROW_ADDR(i - 2, val);
+ }
+ code |= COMMAND_NADDR_BYTES(naddrs);
+
+ instr = vf610_get_next_instr(subop, &op_id);
+ }
+
+ if (instr && instr->type == NAND_OP_DATA_OUT_INSTR) {
+ trfr_sz = nand_subop_get_data_len(subop, op_id);
+ offset = nand_subop_get_data_start_off(subop, op_id);
+ force8bit = instr->ctx.data.force_8bit;
+
+ /*
+ * Don't fix endianness on page access for historical reasons.
+ * See comment in vf610_nfc_wr_to_sram
+ */
+ vf610_nfc_wr_to_sram(nfc->regs + NFC_MAIN_AREA(0) + offset,
+ instr->ctx.data.buf.out + offset,
+ trfr_sz, !nfc->data_access);
+ code |= COMMAND_WRITE_DATA;
+
+ instr = vf610_get_next_instr(subop, &op_id);
+ }
+
+ if (instr && instr->type == NAND_OP_CMD_INSTR) {
+ cmd1 |= instr->ctx.cmd.opcode << CMD_BYTE2_SHIFT;
+ code |= COMMAND_CMD_BYTE2;
+
+ instr = vf610_get_next_instr(subop, &op_id);
+ }
+
+ if (instr && instr->type == NAND_OP_WAITRDY_INSTR) {
+ code |= COMMAND_RB_HANDSHAKE;
+
+ instr = vf610_get_next_instr(subop, &op_id);
+ }
+
+ if (instr && instr->type == NAND_OP_DATA_IN_INSTR) {
+ trfr_sz = nand_subop_get_data_len(subop, op_id);
+ offset = nand_subop_get_data_start_off(subop, op_id);
+ force8bit = instr->ctx.data.force_8bit;
+
+ code |= COMMAND_READ_DATA;
+ }
+
+ if (force8bit && (chip->options & NAND_BUSWIDTH_16))
+ vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_16BIT);
+
+ cmd2 |= code << CMD_CODE_SHIFT;
+
+ vf610_nfc_run(nfc, col, row, cmd1, cmd2, trfr_sz);
+
+ if (instr && instr->type == NAND_OP_DATA_IN_INSTR) {
+ /*
+ * Don't fix endianness on page access for historical reasons.
+ * See comment in vf610_nfc_rd_from_sram
+ */
+ vf610_nfc_rd_from_sram(instr->ctx.data.buf.in + offset,
+ nfc->regs + NFC_MAIN_AREA(0) + offset,
+ trfr_sz, !nfc->data_access);
+ }
+
+ if (force8bit && (chip->options & NAND_BUSWIDTH_16))
+ vf610_nfc_set(nfc, NFC_FLASH_CONFIG, CONFIG_16BIT);
+
+ return 0;
+}
+
+static const struct nand_op_parser vf610_nfc_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(vf610_nfc_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 5),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(true, PAGE_2K + OOB_MAX),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ NAND_OP_PARSER_PATTERN(vf610_nfc_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 5),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, PAGE_2K + OOB_MAX)),
+ );
+
+/*
+ * This function supports Vybrid only (MPC5125 would have full RB and four CS)
+ */
+static void vf610_nfc_select_target(struct nand_chip *chip, unsigned int cs)
+{
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
+ u32 tmp;
+
+ /* Vybrid only (MPC5125 would have full RB and four CS) */
+ if (nfc->variant != NFC_VFC610)
+ return;
+
+ tmp = vf610_nfc_read(nfc, NFC_ROW_ADDR);
+ tmp &= ~(ROW_ADDR_CHIP_SEL_RB_MASK | ROW_ADDR_CHIP_SEL_MASK);
+ tmp |= 1 << ROW_ADDR_CHIP_SEL_RB_SHIFT;
+ tmp |= BIT(cs) << ROW_ADDR_CHIP_SEL_SHIFT;
+
+ vf610_nfc_write(nfc, NFC_ROW_ADDR, tmp);
+}
+
+static int vf610_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ if (!check_only)
+ vf610_nfc_select_target(chip, op->cs);
+
+ return nand_op_parser_exec_op(chip, &vf610_nfc_op_parser, op,
+ check_only);
+}
+
+static inline int vf610_nfc_correct_data(struct nand_chip *chip, uint8_t *dat,
+ uint8_t *oob, int page)
+{
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 ecc_status_off = NFC_MAIN_AREA(0) + ECC_SRAM_ADDR + ECC_STATUS;
+ u8 ecc_status;
+ u8 ecc_count;
+ int flips_threshold = nfc->chip.ecc.strength / 2;
+
+ ecc_status = vf610_nfc_read(nfc, ecc_status_off) & 0xff;
+ ecc_count = ecc_status & ECC_STATUS_ERR_COUNT;
+
+ if (!(ecc_status & ECC_STATUS_MASK))
+ return ecc_count;
+
+ nfc->data_access = true;
+ nand_read_oob_op(&nfc->chip, page, 0, oob, mtd->oobsize);
+ nfc->data_access = false;
+
+ /*
+ * On an erased page, bit count (including OOB) should be zero or
+ * at least less then half of the ECC strength.
+ */
+ return nand_check_erased_ecc_chunk(dat, nfc->chip.ecc.size, oob,
+ mtd->oobsize, NULL, 0,
+ flips_threshold);
+}
+
+static void vf610_nfc_fill_row(struct nand_chip *chip, int page, u32 *code,
+ u32 *row)
+{
+ *row = ROW_ADDR(0, page & 0xff) | ROW_ADDR(1, page >> 8);
+ *code |= COMMAND_RAR_BYTE1 | COMMAND_RAR_BYTE2;
+
+ if (chip->options & NAND_ROW_ADDR_3) {
+ *row |= ROW_ADDR(2, page >> 16);
+ *code |= COMMAND_RAR_BYTE3;
+ }
+}
+
+static int vf610_nfc_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int trfr_sz = mtd->writesize + mtd->oobsize;
+ u32 row = 0, cmd1 = 0, cmd2 = 0, code = 0;
+ int stat;
+
+ vf610_nfc_select_target(chip, chip->cur_cs);
+
+ cmd2 |= NAND_CMD_READ0 << CMD_BYTE1_SHIFT;
+ code |= COMMAND_CMD_BYTE1 | COMMAND_CAR_BYTE1 | COMMAND_CAR_BYTE2;
+
+ vf610_nfc_fill_row(chip, page, &code, &row);
+
+ cmd1 |= NAND_CMD_READSTART << CMD_BYTE2_SHIFT;
+ code |= COMMAND_CMD_BYTE2 | COMMAND_RB_HANDSHAKE | COMMAND_READ_DATA;
+
+ cmd2 |= code << CMD_CODE_SHIFT;
+
+ vf610_nfc_ecc_mode(nfc, nfc->ecc_mode);
+ vf610_nfc_run(nfc, 0, row, cmd1, cmd2, trfr_sz);
+ vf610_nfc_ecc_mode(nfc, ECC_BYPASS);
+
+ /*
+ * Don't fix endianness on page access for historical reasons.
+ * See comment in vf610_nfc_rd_from_sram
+ */
+ vf610_nfc_rd_from_sram(buf, nfc->regs + NFC_MAIN_AREA(0),
+ mtd->writesize, false);
+ if (oob_required)
+ vf610_nfc_rd_from_sram(chip->oob_poi,
+ nfc->regs + NFC_MAIN_AREA(0) +
+ mtd->writesize,
+ mtd->oobsize, false);
+
+ stat = vf610_nfc_correct_data(chip, buf, chip->oob_poi, page);
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ return 0;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ return stat;
+ }
+}
+
+static int vf610_nfc_write_page(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int trfr_sz = mtd->writesize + mtd->oobsize;
+ u32 row = 0, cmd1 = 0, cmd2 = 0, code = 0;
+ u8 status;
+ int ret;
+
+ vf610_nfc_select_target(chip, chip->cur_cs);
+
+ cmd2 |= NAND_CMD_SEQIN << CMD_BYTE1_SHIFT;
+ code |= COMMAND_CMD_BYTE1 | COMMAND_CAR_BYTE1 | COMMAND_CAR_BYTE2;
+
+ vf610_nfc_fill_row(chip, page, &code, &row);
+
+ cmd1 |= NAND_CMD_PAGEPROG << CMD_BYTE2_SHIFT;
+ code |= COMMAND_CMD_BYTE2 | COMMAND_WRITE_DATA;
+
+ /*
+ * Don't fix endianness on page access for historical reasons.
+ * See comment in vf610_nfc_wr_to_sram
+ */
+ vf610_nfc_wr_to_sram(nfc->regs + NFC_MAIN_AREA(0), buf,
+ mtd->writesize, false);
+
+ code |= COMMAND_RB_HANDSHAKE;
+ cmd2 |= code << CMD_CODE_SHIFT;
+
+ vf610_nfc_ecc_mode(nfc, nfc->ecc_mode);
+ vf610_nfc_run(nfc, 0, row, cmd1, cmd2, trfr_sz);
+ vf610_nfc_ecc_mode(nfc, ECC_BYPASS);
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+
+static int vf610_nfc_read_page_raw(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
+ int ret;
+
+ nfc->data_access = true;
+ ret = nand_read_page_raw(chip, buf, oob_required, page);
+ nfc->data_access = false;
+
+ return ret;
+}
+
+static int vf610_nfc_write_page_raw(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ nfc->data_access = true;
+ ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
+ if (!ret && oob_required)
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
+ false);
+ nfc->data_access = false;
+
+ if (ret)
+ return ret;
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int vf610_nfc_read_oob(struct nand_chip *chip, int page)
+{
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
+ int ret;
+
+ nfc->data_access = true;
+ ret = nand_read_oob_std(chip, page);
+ nfc->data_access = false;
+
+ return ret;
+}
+
+static int vf610_nfc_write_oob(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
+ int ret;
+
+ nfc->data_access = true;
+ ret = nand_prog_page_begin_op(chip, page, mtd->writesize,
+ chip->oob_poi, mtd->oobsize);
+ nfc->data_access = false;
+
+ if (ret)
+ return ret;
+
+ return nand_prog_page_end_op(chip);
+}
+
+static const struct of_device_id vf610_nfc_dt_ids[] = {
+ { .compatible = "fsl,vf610-nfc", .data = (void *)NFC_VFC610 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, vf610_nfc_dt_ids);
+
+static void vf610_nfc_preinit_controller(struct vf610_nfc *nfc)
+{
+ vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_16BIT);
+ vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_ADDR_AUTO_INCR_BIT);
+ vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_BUFNO_AUTO_INCR_BIT);
+ vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_BOOT_MODE_BIT);
+ vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_DMA_REQ_BIT);
+ vf610_nfc_set(nfc, NFC_FLASH_CONFIG, CONFIG_FAST_FLASH_BIT);
+ vf610_nfc_ecc_mode(nfc, ECC_BYPASS);
+
+ /* Disable virtual pages, only one elementary transfer unit */
+ vf610_nfc_set_field(nfc, NFC_FLASH_CONFIG, CONFIG_PAGE_CNT_MASK,
+ CONFIG_PAGE_CNT_SHIFT, 1);
+}
+
+static void vf610_nfc_init_controller(struct vf610_nfc *nfc)
+{
+ if (nfc->chip.options & NAND_BUSWIDTH_16)
+ vf610_nfc_set(nfc, NFC_FLASH_CONFIG, CONFIG_16BIT);
+ else
+ vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_16BIT);
+
+ if (nfc->chip.ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
+ /* Set ECC status offset in SRAM */
+ vf610_nfc_set_field(nfc, NFC_FLASH_CONFIG,
+ CONFIG_ECC_SRAM_ADDR_MASK,
+ CONFIG_ECC_SRAM_ADDR_SHIFT,
+ ECC_SRAM_ADDR >> 3);
+
+ /* Enable ECC status in SRAM */
+ vf610_nfc_set(nfc, NFC_FLASH_CONFIG, CONFIG_ECC_SRAM_REQ_BIT);
+ }
+}
+
+static int vf610_nfc_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
+
+ vf610_nfc_init_controller(nfc);
+
+ /* Bad block options. */
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ /* Single buffer only, max 256 OOB minus ECC status */
+ if (mtd->writesize + mtd->oobsize > PAGE_2K + OOB_MAX - 8) {
+ dev_err(nfc->dev, "Unsupported flash page size\n");
+ return -ENXIO;
+ }
+
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+ return 0;
+
+ if (mtd->writesize != PAGE_2K && mtd->oobsize < 64) {
+ dev_err(nfc->dev, "Unsupported flash with hwecc\n");
+ return -ENXIO;
+ }
+
+ if (chip->ecc.size != mtd->writesize) {
+ dev_err(nfc->dev, "Step size needs to be page size\n");
+ return -ENXIO;
+ }
+
+ /* Only 64 byte ECC layouts known */
+ if (mtd->oobsize > 64)
+ mtd->oobsize = 64;
+
+ /* Use default large page ECC layout defined in NAND core */
+ mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
+ if (chip->ecc.strength == 32) {
+ nfc->ecc_mode = ECC_60_BYTE;
+ chip->ecc.bytes = 60;
+ } else if (chip->ecc.strength == 24) {
+ nfc->ecc_mode = ECC_45_BYTE;
+ chip->ecc.bytes = 45;
+ } else {
+ dev_err(nfc->dev, "Unsupported ECC strength\n");
+ return -ENXIO;
+ }
+
+ chip->ecc.read_page = vf610_nfc_read_page;
+ chip->ecc.write_page = vf610_nfc_write_page;
+ chip->ecc.read_page_raw = vf610_nfc_read_page_raw;
+ chip->ecc.write_page_raw = vf610_nfc_write_page_raw;
+ chip->ecc.read_oob = vf610_nfc_read_oob;
+ chip->ecc.write_oob = vf610_nfc_write_oob;
+
+ chip->ecc.size = PAGE_2K;
+
+ return 0;
+}
+
+static const struct nand_controller_ops vf610_nfc_controller_ops = {
+ .attach_chip = vf610_nfc_attach_chip,
+ .exec_op = vf610_nfc_exec_op,
+
+};
+
+static int vf610_nfc_probe(struct platform_device *pdev)
+{
+ struct vf610_nfc *nfc;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ struct device_node *child;
+ const struct of_device_id *of_id;
+ int err;
+ int irq;
+
+ nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->dev = &pdev->dev;
+ chip = &nfc->chip;
+ mtd = nand_to_mtd(chip);
+
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = nfc->dev;
+ mtd->name = DRV_NAME;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ nfc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(nfc->regs))
+ return PTR_ERR(nfc->regs);
+
+ nfc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(nfc->clk)) {
+ dev_err(nfc->dev, "Unable to get and enable clock!\n");
+ return PTR_ERR(nfc->clk);
+ }
+
+ of_id = of_match_device(vf610_nfc_dt_ids, &pdev->dev);
+ if (!of_id)
+ return -ENODEV;
+
+ nfc->variant = (uintptr_t)of_id->data;
+
+ for_each_available_child_of_node(nfc->dev->of_node, child) {
+ if (of_device_is_compatible(child, "fsl,vf610-nfc-nandcs")) {
+
+ if (nand_get_flash_node(chip)) {
+ dev_err(nfc->dev,
+ "Only one NAND chip supported!\n");
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ nand_set_flash_node(chip, child);
+ }
+ }
+
+ if (!nand_get_flash_node(chip)) {
+ dev_err(nfc->dev, "NAND chip sub-node missing!\n");
+ return -ENODEV;
+ }
+
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+
+ init_completion(&nfc->cmd_done);
+
+ err = devm_request_irq(nfc->dev, irq, vf610_nfc_irq, 0, DRV_NAME, nfc);
+ if (err) {
+ dev_err(nfc->dev, "Error requesting IRQ!\n");
+ return err;
+ }
+
+ vf610_nfc_preinit_controller(nfc);
+
+ nand_controller_init(&nfc->base);
+ nfc->base.ops = &vf610_nfc_controller_ops;
+ chip->controller = &nfc->base;
+
+ /* Scan the NAND chip */
+ err = nand_scan(chip, 1);
+ if (err)
+ return err;
+
+ platform_set_drvdata(pdev, nfc);
+
+ /* Register device in MTD */
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err)
+ goto err_cleanup_nand;
+ return 0;
+
+err_cleanup_nand:
+ nand_cleanup(chip);
+ return err;
+}
+
+static void vf610_nfc_remove(struct platform_device *pdev)
+{
+ struct vf610_nfc *nfc = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &nfc->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int vf610_nfc_suspend(struct device *dev)
+{
+ struct vf610_nfc *nfc = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(nfc->clk);
+ return 0;
+}
+
+static int vf610_nfc_resume(struct device *dev)
+{
+ struct vf610_nfc *nfc = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(nfc->clk);
+ if (err)
+ return err;
+
+ vf610_nfc_preinit_controller(nfc);
+ vf610_nfc_init_controller(nfc);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(vf610_nfc_pm_ops, vf610_nfc_suspend, vf610_nfc_resume);
+
+static struct platform_driver vf610_nfc_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = vf610_nfc_dt_ids,
+ .pm = &vf610_nfc_pm_ops,
+ },
+ .probe = vf610_nfc_probe,
+ .remove_new = vf610_nfc_remove,
+};
+
+module_platform_driver(vf610_nfc_driver);
+
+MODULE_AUTHOR("Stefan Agner <stefan.agner@toradex.com>");
+MODULE_DESCRIPTION("Freescale VF610/MPC5125 NFC MTD NAND driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c
new file mode 100644
index 0000000000..51d802a165
--- /dev/null
+++ b/drivers/mtd/nand/raw/xway_nand.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright © 2012 John Crispin <john@phrozen.org>
+ * Copyright © 2016 Hauke Mehrtens <hauke@hauke-m.de>
+ */
+
+#include <linux/mtd/rawnand.h>
+#include <linux/of_gpio.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <lantiq_soc.h>
+
+/* nand registers */
+#define EBU_ADDSEL1 0x24
+#define EBU_NAND_CON 0xB0
+#define EBU_NAND_WAIT 0xB4
+#define NAND_WAIT_RD BIT(0) /* NAND flash status output */
+#define NAND_WAIT_WR_C BIT(3) /* NAND Write/Read complete */
+#define EBU_NAND_ECC0 0xB8
+#define EBU_NAND_ECC_AC 0xBC
+
+/*
+ * nand commands
+ * The pins of the NAND chip are selected based on the address bits of the
+ * "register" read and write. There are no special registers, but an
+ * address range and the lower address bits are used to activate the
+ * correct line. For example when the bit (1 << 2) is set in the address
+ * the ALE pin will be activated.
+ */
+#define NAND_CMD_ALE BIT(2) /* address latch enable */
+#define NAND_CMD_CLE BIT(3) /* command latch enable */
+#define NAND_CMD_CS BIT(4) /* chip select */
+#define NAND_CMD_SE BIT(5) /* spare area access latch */
+#define NAND_CMD_WP BIT(6) /* write protect */
+#define NAND_WRITE_CMD (NAND_CMD_CS | NAND_CMD_CLE)
+#define NAND_WRITE_ADDR (NAND_CMD_CS | NAND_CMD_ALE)
+#define NAND_WRITE_DATA (NAND_CMD_CS)
+#define NAND_READ_DATA (NAND_CMD_CS)
+
+/* we need to tel the ebu which addr we mapped the nand to */
+#define ADDSEL1_MASK(x) (x << 4)
+#define ADDSEL1_REGEN 1
+
+/* we need to tell the EBU that we have nand attached and set it up properly */
+#define BUSCON1_SETUP (1 << 22)
+#define BUSCON1_BCGEN_RES (0x3 << 12)
+#define BUSCON1_WAITWRC2 (2 << 8)
+#define BUSCON1_WAITRDC2 (2 << 6)
+#define BUSCON1_HOLDC1 (1 << 4)
+#define BUSCON1_RECOVC1 (1 << 2)
+#define BUSCON1_CMULT4 1
+
+#define NAND_CON_CE (1 << 20)
+#define NAND_CON_OUT_CS1 (1 << 10)
+#define NAND_CON_IN_CS1 (1 << 8)
+#define NAND_CON_PRE_P (1 << 7)
+#define NAND_CON_WP_P (1 << 6)
+#define NAND_CON_SE_P (1 << 5)
+#define NAND_CON_CS_P (1 << 4)
+#define NAND_CON_CSMUX (1 << 1)
+#define NAND_CON_NANDM 1
+
+struct xway_nand_data {
+ struct nand_controller controller;
+ struct nand_chip chip;
+ unsigned long csflags;
+ void __iomem *nandaddr;
+};
+
+static u8 xway_readb(struct mtd_info *mtd, int op)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct xway_nand_data *data = nand_get_controller_data(chip);
+
+ return readb(data->nandaddr + op);
+}
+
+static void xway_writeb(struct mtd_info *mtd, int op, u8 value)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct xway_nand_data *data = nand_get_controller_data(chip);
+
+ writeb(value, data->nandaddr + op);
+}
+
+static void xway_select_chip(struct nand_chip *chip, int select)
+{
+ struct xway_nand_data *data = nand_get_controller_data(chip);
+
+ switch (select) {
+ case -1:
+ ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON);
+ ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON);
+ spin_unlock_irqrestore(&ebu_lock, data->csflags);
+ break;
+ case 0:
+ spin_lock_irqsave(&ebu_lock, data->csflags);
+ ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON);
+ ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static void xway_cmd_ctrl(struct nand_chip *chip, int cmd, unsigned int ctrl)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ xway_writeb(mtd, NAND_WRITE_CMD, cmd);
+ else if (ctrl & NAND_ALE)
+ xway_writeb(mtd, NAND_WRITE_ADDR, cmd);
+
+ while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
+ ;
+}
+
+static int xway_dev_ready(struct nand_chip *chip)
+{
+ return ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_RD;
+}
+
+static unsigned char xway_read_byte(struct nand_chip *chip)
+{
+ return xway_readb(nand_to_mtd(chip), NAND_READ_DATA);
+}
+
+static void xway_read_buf(struct nand_chip *chip, u_char *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = xway_readb(nand_to_mtd(chip), NAND_WRITE_DATA);
+}
+
+static void xway_write_buf(struct nand_chip *chip, const u_char *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ xway_writeb(nand_to_mtd(chip), NAND_WRITE_DATA, buf[i]);
+}
+
+static int xway_attach_chip(struct nand_chip *chip)
+{
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+ chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+ return 0;
+}
+
+static const struct nand_controller_ops xway_nand_ops = {
+ .attach_chip = xway_attach_chip,
+};
+
+/*
+ * Probe for the NAND device.
+ */
+static int xway_nand_probe(struct platform_device *pdev)
+{
+ struct xway_nand_data *data;
+ struct mtd_info *mtd;
+ int err;
+ u32 cs;
+ u32 cs_flag = 0;
+
+ /* Allocate memory for the device structure (and zero it) */
+ data = devm_kzalloc(&pdev->dev, sizeof(struct xway_nand_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->nandaddr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->nandaddr))
+ return PTR_ERR(data->nandaddr);
+
+ nand_set_flash_node(&data->chip, pdev->dev.of_node);
+ mtd = nand_to_mtd(&data->chip);
+ mtd->dev.parent = &pdev->dev;
+
+ data->chip.legacy.cmd_ctrl = xway_cmd_ctrl;
+ data->chip.legacy.dev_ready = xway_dev_ready;
+ data->chip.legacy.select_chip = xway_select_chip;
+ data->chip.legacy.write_buf = xway_write_buf;
+ data->chip.legacy.read_buf = xway_read_buf;
+ data->chip.legacy.read_byte = xway_read_byte;
+ data->chip.legacy.chip_delay = 30;
+
+ nand_controller_init(&data->controller);
+ data->controller.ops = &xway_nand_ops;
+ data->chip.controller = &data->controller;
+
+ platform_set_drvdata(pdev, data);
+ nand_set_controller_data(&data->chip, data);
+
+ /* load our CS from the DT. Either we find a valid 1 or default to 0 */
+ err = of_property_read_u32(pdev->dev.of_node, "lantiq,cs", &cs);
+ if (!err && cs == 1)
+ cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1;
+
+ /* setup the EBU to run in NAND mode on our base addr */
+ ltq_ebu_w32(CPHYSADDR(data->nandaddr)
+ | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1);
+
+ ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2
+ | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1
+ | BUSCON1_CMULT4, LTQ_EBU_BUSCON1);
+
+ ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P
+ | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P
+ | cs_flag, EBU_NAND_CON);
+
+ /*
+ * This driver assumes that the default ECC engine should be TYPE_SOFT.
+ * Set ->engine_type before registering the NAND devices in order to
+ * provide a driver specific default value.
+ */
+ data->chip.ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
+ /* Scan to find existence of the device */
+ err = nand_scan(&data->chip, 1);
+ if (err)
+ return err;
+
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err)
+ nand_cleanup(&data->chip);
+
+ return err;
+}
+
+/*
+ * Remove a NAND device.
+ */
+static void xway_nand_remove(struct platform_device *pdev)
+{
+ struct xway_nand_data *data = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &data->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+}
+
+static const struct of_device_id xway_nand_match[] = {
+ { .compatible = "lantiq,nand-xway" },
+ {},
+};
+
+static struct platform_driver xway_nand_driver = {
+ .probe = xway_nand_probe,
+ .remove_new = xway_nand_remove,
+ .driver = {
+ .name = "lantiq,nand-xway",
+ .of_match_table = xway_nand_match,
+ },
+};
+
+builtin_platform_driver(xway_nand_driver);
diff --git a/drivers/mtd/nand/spi/Kconfig b/drivers/mtd/nand/spi/Kconfig
new file mode 100644
index 0000000000..3d7649a2dd
--- /dev/null
+++ b/drivers/mtd/nand/spi/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig MTD_SPI_NAND
+ tristate "SPI NAND device Support"
+ select MTD_NAND_CORE
+ select MTD_NAND_ECC
+ depends on SPI_MASTER
+ select SPI_MEM
+ help
+ This is the framework for the SPI NAND device drivers.
diff --git a/drivers/mtd/nand/spi/Makefile b/drivers/mtd/nand/spi/Makefile
new file mode 100644
index 0000000000..cd8b66bf77
--- /dev/null
+++ b/drivers/mtd/nand/spi/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+spinand-objs := core.o alliancememory.o ato.o esmt.o gigadevice.o macronix.o
+spinand-objs += micron.o paragon.o toshiba.o winbond.o xtx.o
+obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
diff --git a/drivers/mtd/nand/spi/alliancememory.c b/drivers/mtd/nand/spi/alliancememory.c
new file mode 100644
index 0000000000..7936ea546b
--- /dev/null
+++ b/drivers/mtd/nand/spi/alliancememory.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Mario Kicherer <dev@kicherer.org>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_ALLIANCEMEMORY 0x52
+
+#define AM_STATUS_ECC_BITMASK (3 << 4)
+
+#define AM_STATUS_ECC_NONE_DETECTED (0 << 4)
+#define AM_STATUS_ECC_CORRECTED (1 << 4)
+#define AM_STATUS_ECC_ERRORED (2 << 4)
+#define AM_STATUS_ECC_MAX_CORRECTED (3 << 4)
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int am_get_eccsize(struct mtd_info *mtd)
+{
+ if (mtd->oobsize == 64)
+ return 0x20;
+ else if (mtd->oobsize == 128)
+ return 0x38;
+ else if (mtd->oobsize == 256)
+ return 0x70;
+ else
+ return -EINVAL;
+}
+
+static int am_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ int ecc_bytes;
+
+ ecc_bytes = am_get_eccsize(mtd);
+ if (ecc_bytes < 0)
+ return ecc_bytes;
+
+ region->offset = mtd->oobsize - ecc_bytes;
+ region->length = ecc_bytes;
+
+ return 0;
+}
+
+static int am_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ int ecc_bytes;
+
+ if (section)
+ return -ERANGE;
+
+ ecc_bytes = am_get_eccsize(mtd);
+ if (ecc_bytes < 0)
+ return ecc_bytes;
+
+ /*
+ * It is unclear how many bytes are used for the bad block marker. We
+ * reserve the common two bytes here.
+ *
+ * The free area in this kind of flash is divided into chunks where the
+ * first 4 bytes of each chunk are unprotected. The number of chunks
+ * depends on the specific model. The models with 4096+256 bytes pages
+ * have 8 chunks, the others 4 chunks.
+ */
+
+ region->offset = 2;
+ region->length = mtd->oobsize - 2 - ecc_bytes;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops am_ooblayout = {
+ .ecc = am_ooblayout_ecc,
+ .free = am_ooblayout_free,
+};
+
+static int am_ecc_get_status(struct spinand_device *spinand, u8 status)
+{
+ switch (status & AM_STATUS_ECC_BITMASK) {
+ case AM_STATUS_ECC_NONE_DETECTED:
+ return 0;
+
+ case AM_STATUS_ECC_CORRECTED:
+ /*
+ * use oobsize to determine the flash model and the maximum of
+ * correctable errors and return maximum - 1 by convention
+ */
+ if (spinand->base.mtd.oobsize == 64)
+ return 3;
+ else
+ return 7;
+
+ case AM_STATUS_ECC_ERRORED:
+ return -EBADMSG;
+
+ case AM_STATUS_ECC_MAX_CORRECTED:
+ /*
+ * use oobsize to determine the flash model and the maximum of
+ * correctable errors
+ */
+ if (spinand->base.mtd.oobsize == 64)
+ return 4;
+ else
+ return 8;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct spinand_info alliancememory_spinand_table[] = {
+ SPINAND_INFO("AS5F34G04SND",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x2f),
+ NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&am_ooblayout,
+ am_ecc_get_status)),
+};
+
+static const struct spinand_manufacturer_ops alliancememory_spinand_manuf_ops = {
+};
+
+const struct spinand_manufacturer alliancememory_spinand_manufacturer = {
+ .id = SPINAND_MFR_ALLIANCEMEMORY,
+ .name = "AllianceMemory",
+ .chips = alliancememory_spinand_table,
+ .nchips = ARRAY_SIZE(alliancememory_spinand_table),
+ .ops = &alliancememory_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/ato.c b/drivers/mtd/nand/spi/ato.c
new file mode 100644
index 0000000000..82b377c068
--- /dev/null
+++ b/drivers/mtd/nand/spi/ato.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Aidan MacDonald
+ *
+ * Author: Aidan MacDonald <aidanmacdonald.0x0@gmail.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+
+#define SPINAND_MFR_ATO 0x9b
+
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+
+static int ato25d1ga_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 8;
+ region->length = 8;
+ return 0;
+}
+
+static int ato25d1ga_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ if (section) {
+ region->offset = (16 * section);
+ region->length = 8;
+ } else {
+ /* first byte of section 0 is reserved for the BBM */
+ region->offset = 1;
+ region->length = 7;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops ato25d1ga_ooblayout = {
+ .ecc = ato25d1ga_ooblayout_ecc,
+ .free = ato25d1ga_ooblayout_free,
+};
+
+
+static const struct spinand_info ato_spinand_table[] = {
+ SPINAND_INFO("ATO25D1GA",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x12),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&ato25d1ga_ooblayout, NULL)),
+};
+
+static const struct spinand_manufacturer_ops ato_spinand_manuf_ops = {
+};
+
+const struct spinand_manufacturer ato_spinand_manufacturer = {
+ .id = SPINAND_MFR_ATO,
+ .name = "ATO",
+ .chips = ato_spinand_table,
+ .nchips = ARRAY_SIZE(ato_spinand_table),
+ .ops = &ato_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
new file mode 100644
index 0000000000..393ff37f0d
--- /dev/null
+++ b/drivers/mtd/nand/spi/core.c
@@ -0,0 +1,1406 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016-2017 Micron Technology, Inc.
+ *
+ * Authors:
+ * Peter Pan <peterpandong@micron.com>
+ * Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#define pr_fmt(fmt) "spi-nand: " fmt
+
+#include <linux/device.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/spinand.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
+{
+ struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
+ spinand->scratchbuf);
+ int ret;
+
+ ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ *val = *spinand->scratchbuf;
+ return 0;
+}
+
+static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
+{
+ struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
+ spinand->scratchbuf);
+
+ *spinand->scratchbuf = val;
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
+static int spinand_read_status(struct spinand_device *spinand, u8 *status)
+{
+ return spinand_read_reg_op(spinand, REG_STATUS, status);
+}
+
+static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+
+ if (WARN_ON(spinand->cur_target < 0 ||
+ spinand->cur_target >= nand->memorg.ntargets))
+ return -EINVAL;
+
+ *cfg = spinand->cfg_cache[spinand->cur_target];
+ return 0;
+}
+
+static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ int ret;
+
+ if (WARN_ON(spinand->cur_target < 0 ||
+ spinand->cur_target >= nand->memorg.ntargets))
+ return -EINVAL;
+
+ if (spinand->cfg_cache[spinand->cur_target] == cfg)
+ return 0;
+
+ ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
+ if (ret)
+ return ret;
+
+ spinand->cfg_cache[spinand->cur_target] = cfg;
+ return 0;
+}
+
+/**
+ * spinand_upd_cfg() - Update the configuration register
+ * @spinand: the spinand device
+ * @mask: the mask encoding the bits to update in the config reg
+ * @val: the new value to apply
+ *
+ * Update the configuration register.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
+{
+ int ret;
+ u8 cfg;
+
+ ret = spinand_get_cfg(spinand, &cfg);
+ if (ret)
+ return ret;
+
+ cfg &= ~mask;
+ cfg |= val;
+
+ return spinand_set_cfg(spinand, cfg);
+}
+
+/**
+ * spinand_select_target() - Select a specific NAND target/die
+ * @spinand: the spinand device
+ * @target: the target/die to select
+ *
+ * Select a new target/die. If chip only has one die, this function is a NOOP.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int spinand_select_target(struct spinand_device *spinand, unsigned int target)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ int ret;
+
+ if (WARN_ON(target >= nand->memorg.ntargets))
+ return -EINVAL;
+
+ if (spinand->cur_target == target)
+ return 0;
+
+ if (nand->memorg.ntargets == 1) {
+ spinand->cur_target = target;
+ return 0;
+ }
+
+ ret = spinand->select_target(spinand, target);
+ if (ret)
+ return ret;
+
+ spinand->cur_target = target;
+ return 0;
+}
+
+static int spinand_read_cfg(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ unsigned int target;
+ int ret;
+
+ for (target = 0; target < nand->memorg.ntargets; target++) {
+ ret = spinand_select_target(spinand, target);
+ if (ret)
+ return ret;
+
+ /*
+ * We use spinand_read_reg_op() instead of spinand_get_cfg()
+ * here to bypass the config cache.
+ */
+ ret = spinand_read_reg_op(spinand, REG_CFG,
+ &spinand->cfg_cache[target]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int spinand_init_cfg_cache(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct device *dev = &spinand->spimem->spi->dev;
+
+ spinand->cfg_cache = devm_kcalloc(dev,
+ nand->memorg.ntargets,
+ sizeof(*spinand->cfg_cache),
+ GFP_KERNEL);
+ if (!spinand->cfg_cache)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int spinand_init_quad_enable(struct spinand_device *spinand)
+{
+ bool enable = false;
+
+ if (!(spinand->flags & SPINAND_HAS_QE_BIT))
+ return 0;
+
+ if (spinand->op_templates.read_cache->data.buswidth == 4 ||
+ spinand->op_templates.write_cache->data.buswidth == 4 ||
+ spinand->op_templates.update_cache->data.buswidth == 4)
+ enable = true;
+
+ return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
+ enable ? CFG_QUAD_ENABLE : 0);
+}
+
+static int spinand_ecc_enable(struct spinand_device *spinand,
+ bool enable)
+{
+ return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
+ enable ? CFG_ECC_ENABLE : 0);
+}
+
+static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+
+ if (spinand->eccinfo.get_status)
+ return spinand->eccinfo.get_status(spinand, status);
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case STATUS_ECC_HAS_BITFLIPS:
+ /*
+ * We have no way to know exactly how many bitflips have been
+ * fixed, so let's return the maximum possible value so that
+ * wear-leveling layers move the data immediately.
+ */
+ return nanddev_get_ecc_conf(nand)->strength;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ return -ERANGE;
+}
+
+static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ /* Reserve 2 bytes for the BBM. */
+ region->offset = 2;
+ region->length = 62;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
+ .ecc = spinand_noecc_ooblayout_ecc,
+ .free = spinand_noecc_ooblayout_free,
+};
+
+static int spinand_ondie_ecc_init_ctx(struct nand_device *nand)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ struct spinand_ondie_ecc_conf *engine_conf;
+
+ nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
+ nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size;
+ nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength;
+
+ engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL);
+ if (!engine_conf)
+ return -ENOMEM;
+
+ nand->ecc.ctx.priv = engine_conf;
+
+ if (spinand->eccinfo.ooblayout)
+ mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
+ else
+ mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
+
+ return 0;
+}
+
+static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand)
+{
+ kfree(nand->ecc.ctx.priv);
+}
+
+static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ bool enable = (req->mode != MTD_OPS_RAW);
+
+ memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand));
+
+ /* Only enable or disable the engine */
+ return spinand_ecc_enable(spinand, enable);
+}
+
+static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ struct mtd_info *mtd = spinand_to_mtd(spinand);
+ int ret;
+
+ if (req->mode == MTD_OPS_RAW)
+ return 0;
+
+ /* Nothing to do when finishing a page write */
+ if (req->type == NAND_PAGE_WRITE)
+ return 0;
+
+ /* Finish a page read: check the status, report errors/bitflips */
+ ret = spinand_check_ecc_status(spinand, engine_conf->status);
+ if (ret == -EBADMSG)
+ mtd->ecc_stats.failed++;
+ else if (ret > 0)
+ mtd->ecc_stats.corrected += ret;
+
+ return ret;
+}
+
+static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = {
+ .init_ctx = spinand_ondie_ecc_init_ctx,
+ .cleanup_ctx = spinand_ondie_ecc_cleanup_ctx,
+ .prepare_io_req = spinand_ondie_ecc_prepare_io_req,
+ .finish_io_req = spinand_ondie_ecc_finish_io_req,
+};
+
+static struct nand_ecc_engine spinand_ondie_ecc_engine = {
+ .ops = &spinand_ondie_ecc_engine_ops,
+};
+
+static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status)
+{
+ struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
+
+ if (nand->ecc.ctx.conf.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE &&
+ engine_conf)
+ engine_conf->status = status;
+}
+
+static int spinand_write_enable_op(struct spinand_device *spinand)
+{
+ struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
+
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
+static int spinand_load_page_op(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ unsigned int row = nanddev_pos_to_row(nand, &req->pos);
+ struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
+
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
+static int spinand_read_from_cache_op(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct mtd_info *mtd = spinand_to_mtd(spinand);
+ struct spi_mem_dirmap_desc *rdesc;
+ unsigned int nbytes = 0;
+ void *buf = NULL;
+ u16 column = 0;
+ ssize_t ret;
+
+ if (req->datalen) {
+ buf = spinand->databuf;
+ nbytes = nanddev_page_size(nand);
+ column = 0;
+ }
+
+ if (req->ooblen) {
+ nbytes += nanddev_per_page_oobsize(nand);
+ if (!buf) {
+ buf = spinand->oobbuf;
+ column = nanddev_page_size(nand);
+ }
+ }
+
+ if (req->mode == MTD_OPS_RAW)
+ rdesc = spinand->dirmaps[req->pos.plane].rdesc;
+ else
+ rdesc = spinand->dirmaps[req->pos.plane].rdesc_ecc;
+
+ while (nbytes) {
+ ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
+ if (ret < 0)
+ return ret;
+
+ if (!ret || ret > nbytes)
+ return -EIO;
+
+ nbytes -= ret;
+ column += ret;
+ buf += ret;
+ }
+
+ if (req->datalen)
+ memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
+ req->datalen);
+
+ if (req->ooblen) {
+ if (req->mode == MTD_OPS_AUTO_OOB)
+ mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
+ spinand->oobbuf,
+ req->ooboffs,
+ req->ooblen);
+ else
+ memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
+ req->ooblen);
+ }
+
+ return 0;
+}
+
+static int spinand_write_to_cache_op(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct mtd_info *mtd = spinand_to_mtd(spinand);
+ struct spi_mem_dirmap_desc *wdesc;
+ unsigned int nbytes, column = 0;
+ void *buf = spinand->databuf;
+ ssize_t ret;
+
+ /*
+ * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
+ * the cache content to 0xFF (depends on vendor implementation), so we
+ * must fill the page cache entirely even if we only want to program
+ * the data portion of the page, otherwise we might corrupt the BBM or
+ * user data previously programmed in OOB area.
+ *
+ * Only reset the data buffer manually, the OOB buffer is prepared by
+ * ECC engines ->prepare_io_req() callback.
+ */
+ nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
+ memset(spinand->databuf, 0xff, nanddev_page_size(nand));
+
+ if (req->datalen)
+ memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
+ req->datalen);
+
+ if (req->ooblen) {
+ if (req->mode == MTD_OPS_AUTO_OOB)
+ mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
+ spinand->oobbuf,
+ req->ooboffs,
+ req->ooblen);
+ else
+ memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
+ req->ooblen);
+ }
+
+ if (req->mode == MTD_OPS_RAW)
+ wdesc = spinand->dirmaps[req->pos.plane].wdesc;
+ else
+ wdesc = spinand->dirmaps[req->pos.plane].wdesc_ecc;
+
+ while (nbytes) {
+ ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
+ if (ret < 0)
+ return ret;
+
+ if (!ret || ret > nbytes)
+ return -EIO;
+
+ nbytes -= ret;
+ column += ret;
+ buf += ret;
+ }
+
+ return 0;
+}
+
+static int spinand_program_op(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ unsigned int row = nanddev_pos_to_row(nand, &req->pos);
+ struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
+
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
+static int spinand_erase_op(struct spinand_device *spinand,
+ const struct nand_pos *pos)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ unsigned int row = nanddev_pos_to_row(nand, pos);
+ struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
+
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
+static int spinand_wait(struct spinand_device *spinand,
+ unsigned long initial_delay_us,
+ unsigned long poll_delay_us,
+ u8 *s)
+{
+ struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS,
+ spinand->scratchbuf);
+ u8 status;
+ int ret;
+
+ ret = spi_mem_poll_status(spinand->spimem, &op, STATUS_BUSY, 0,
+ initial_delay_us,
+ poll_delay_us,
+ SPINAND_WAITRDY_TIMEOUT_MS);
+ if (ret)
+ return ret;
+
+ status = *spinand->scratchbuf;
+ if (!(status & STATUS_BUSY))
+ goto out;
+
+ /*
+ * Extra read, just in case the STATUS_READY bit has changed
+ * since our last check
+ */
+ ret = spinand_read_status(spinand, &status);
+ if (ret)
+ return ret;
+
+out:
+ if (s)
+ *s = status;
+
+ return status & STATUS_BUSY ? -ETIMEDOUT : 0;
+}
+
+static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
+ u8 ndummy, u8 *buf)
+{
+ struct spi_mem_op op = SPINAND_READID_OP(
+ naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
+ int ret;
+
+ ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (!ret)
+ memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
+
+ return ret;
+}
+
+static int spinand_reset_op(struct spinand_device *spinand)
+{
+ struct spi_mem_op op = SPINAND_RESET_OP;
+ int ret;
+
+ ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ return spinand_wait(spinand,
+ SPINAND_RESET_INITIAL_DELAY_US,
+ SPINAND_RESET_POLL_DELAY_US,
+ NULL);
+}
+
+static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
+{
+ return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
+}
+
+static int spinand_read_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ u8 status;
+ int ret;
+
+ ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
+ if (ret)
+ return ret;
+
+ ret = spinand_load_page_op(spinand, req);
+ if (ret)
+ return ret;
+
+ ret = spinand_wait(spinand,
+ SPINAND_READ_INITIAL_DELAY_US,
+ SPINAND_READ_POLL_DELAY_US,
+ &status);
+ if (ret < 0)
+ return ret;
+
+ spinand_ondie_ecc_save_status(nand, status);
+
+ ret = spinand_read_from_cache_op(spinand, req);
+ if (ret)
+ return ret;
+
+ return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
+}
+
+static int spinand_write_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ u8 status;
+ int ret;
+
+ ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
+ if (ret)
+ return ret;
+
+ ret = spinand_write_enable_op(spinand);
+ if (ret)
+ return ret;
+
+ ret = spinand_write_to_cache_op(spinand, req);
+ if (ret)
+ return ret;
+
+ ret = spinand_program_op(spinand, req);
+ if (ret)
+ return ret;
+
+ ret = spinand_wait(spinand,
+ SPINAND_WRITE_INITIAL_DELAY_US,
+ SPINAND_WRITE_POLL_DELAY_US,
+ &status);
+ if (!ret && (status & STATUS_PROG_FAILED))
+ return -EIO;
+
+ return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
+}
+
+static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct mtd_ecc_stats old_stats;
+ unsigned int max_bitflips = 0;
+ struct nand_io_iter iter;
+ bool disable_ecc = false;
+ bool ecc_failed = false;
+ int ret = 0;
+
+ if (ops->mode == MTD_OPS_RAW || !spinand->eccinfo.ooblayout)
+ disable_ecc = true;
+
+ mutex_lock(&spinand->lock);
+
+ old_stats = mtd->ecc_stats;
+
+ nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
+ if (disable_ecc)
+ iter.req.mode = MTD_OPS_RAW;
+
+ ret = spinand_select_target(spinand, iter.req.pos.target);
+ if (ret)
+ break;
+
+ ret = spinand_read_page(spinand, &iter.req);
+ if (ret < 0 && ret != -EBADMSG)
+ break;
+
+ if (ret == -EBADMSG)
+ ecc_failed = true;
+ else
+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
+
+ ret = 0;
+ ops->retlen += iter.req.datalen;
+ ops->oobretlen += iter.req.ooblen;
+ }
+
+ if (ops->stats) {
+ ops->stats->uncorrectable_errors +=
+ mtd->ecc_stats.failed - old_stats.failed;
+ ops->stats->corrected_bitflips +=
+ mtd->ecc_stats.corrected - old_stats.corrected;
+ }
+
+ mutex_unlock(&spinand->lock);
+
+ if (ecc_failed && !ret)
+ ret = -EBADMSG;
+
+ return ret ? ret : max_bitflips;
+}
+
+static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct nand_io_iter iter;
+ bool disable_ecc = false;
+ int ret = 0;
+
+ if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
+ disable_ecc = true;
+
+ mutex_lock(&spinand->lock);
+
+ nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) {
+ if (disable_ecc)
+ iter.req.mode = MTD_OPS_RAW;
+
+ ret = spinand_select_target(spinand, iter.req.pos.target);
+ if (ret)
+ break;
+
+ ret = spinand_write_page(spinand, &iter.req);
+ if (ret)
+ break;
+
+ ops->retlen += iter.req.datalen;
+ ops->oobretlen += iter.req.ooblen;
+ }
+
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ u8 marker[2] = { };
+ struct nand_page_io_req req = {
+ .pos = *pos,
+ .ooblen = sizeof(marker),
+ .ooboffs = 0,
+ .oobbuf.in = marker,
+ .mode = MTD_OPS_RAW,
+ };
+
+ spinand_select_target(spinand, pos->target);
+ spinand_read_page(spinand, &req);
+ if (marker[0] != 0xff || marker[1] != 0xff)
+ return true;
+
+ return false;
+}
+
+static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ struct nand_pos pos;
+ int ret;
+
+ nanddev_offs_to_pos(nand, offs, &pos);
+ mutex_lock(&spinand->lock);
+ ret = nanddev_isbad(nand, &pos);
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ u8 marker[2] = { };
+ struct nand_page_io_req req = {
+ .pos = *pos,
+ .ooboffs = 0,
+ .ooblen = sizeof(marker),
+ .oobbuf.out = marker,
+ .mode = MTD_OPS_RAW,
+ };
+ int ret;
+
+ ret = spinand_select_target(spinand, pos->target);
+ if (ret)
+ return ret;
+
+ ret = spinand_write_enable_op(spinand);
+ if (ret)
+ return ret;
+
+ return spinand_write_page(spinand, &req);
+}
+
+static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ struct nand_pos pos;
+ int ret;
+
+ nanddev_offs_to_pos(nand, offs, &pos);
+ mutex_lock(&spinand->lock);
+ ret = nanddev_markbad(nand, &pos);
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ u8 status;
+ int ret;
+
+ ret = spinand_select_target(spinand, pos->target);
+ if (ret)
+ return ret;
+
+ ret = spinand_write_enable_op(spinand);
+ if (ret)
+ return ret;
+
+ ret = spinand_erase_op(spinand, pos);
+ if (ret)
+ return ret;
+
+ ret = spinand_wait(spinand,
+ SPINAND_ERASE_INITIAL_DELAY_US,
+ SPINAND_ERASE_POLL_DELAY_US,
+ &status);
+
+ if (!ret && (status & STATUS_ERASE_FAILED))
+ ret = -EIO;
+
+ return ret;
+}
+
+static int spinand_mtd_erase(struct mtd_info *mtd,
+ struct erase_info *einfo)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ int ret;
+
+ mutex_lock(&spinand->lock);
+ ret = nanddev_mtd_erase(mtd, einfo);
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct nand_pos pos;
+ int ret;
+
+ nanddev_offs_to_pos(nand, offs, &pos);
+ mutex_lock(&spinand->lock);
+ ret = nanddev_isreserved(nand, &pos);
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_create_dirmap(struct spinand_device *spinand,
+ unsigned int plane)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct spi_mem_dirmap_info info = {
+ .length = nanddev_page_size(nand) +
+ nanddev_per_page_oobsize(nand),
+ };
+ struct spi_mem_dirmap_desc *desc;
+
+ /* The plane number is passed in MSB just above the column address */
+ info.offset = plane << fls(nand->memorg.pagesize);
+
+ info.op_tmpl = *spinand->op_templates.update_cache;
+ desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
+ spinand->spimem, &info);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ spinand->dirmaps[plane].wdesc = desc;
+
+ info.op_tmpl = *spinand->op_templates.read_cache;
+ desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
+ spinand->spimem, &info);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ spinand->dirmaps[plane].rdesc = desc;
+
+ if (nand->ecc.engine->integration != NAND_ECC_ENGINE_INTEGRATION_PIPELINED) {
+ spinand->dirmaps[plane].wdesc_ecc = spinand->dirmaps[plane].wdesc;
+ spinand->dirmaps[plane].rdesc_ecc = spinand->dirmaps[plane].rdesc;
+
+ return 0;
+ }
+
+ info.op_tmpl = *spinand->op_templates.update_cache;
+ info.op_tmpl.data.ecc = true;
+ desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
+ spinand->spimem, &info);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ spinand->dirmaps[plane].wdesc_ecc = desc;
+
+ info.op_tmpl = *spinand->op_templates.read_cache;
+ info.op_tmpl.data.ecc = true;
+ desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
+ spinand->spimem, &info);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ spinand->dirmaps[plane].rdesc_ecc = desc;
+
+ return 0;
+}
+
+static int spinand_create_dirmaps(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ int i, ret;
+
+ spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev,
+ sizeof(*spinand->dirmaps) *
+ nand->memorg.planes_per_lun,
+ GFP_KERNEL);
+ if (!spinand->dirmaps)
+ return -ENOMEM;
+
+ for (i = 0; i < nand->memorg.planes_per_lun; i++) {
+ ret = spinand_create_dirmap(spinand, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct nand_ops spinand_ops = {
+ .erase = spinand_erase,
+ .markbad = spinand_markbad,
+ .isbad = spinand_isbad,
+};
+
+static const struct spinand_manufacturer *spinand_manufacturers[] = {
+ &alliancememory_spinand_manufacturer,
+ &ato_spinand_manufacturer,
+ &esmt_c8_spinand_manufacturer,
+ &gigadevice_spinand_manufacturer,
+ &macronix_spinand_manufacturer,
+ &micron_spinand_manufacturer,
+ &paragon_spinand_manufacturer,
+ &toshiba_spinand_manufacturer,
+ &winbond_spinand_manufacturer,
+ &xtx_spinand_manufacturer,
+};
+
+static int spinand_manufacturer_match(struct spinand_device *spinand,
+ enum spinand_readid_method rdid_method)
+{
+ u8 *id = spinand->id.data;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
+ const struct spinand_manufacturer *manufacturer =
+ spinand_manufacturers[i];
+
+ if (id[0] != manufacturer->id)
+ continue;
+
+ ret = spinand_match_and_init(spinand,
+ manufacturer->chips,
+ manufacturer->nchips,
+ rdid_method);
+ if (ret < 0)
+ continue;
+
+ spinand->manufacturer = manufacturer;
+ return 0;
+ }
+ return -ENOTSUPP;
+}
+
+static int spinand_id_detect(struct spinand_device *spinand)
+{
+ u8 *id = spinand->id.data;
+ int ret;
+
+ ret = spinand_read_id_op(spinand, 0, 0, id);
+ if (ret)
+ return ret;
+ ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE);
+ if (!ret)
+ return 0;
+
+ ret = spinand_read_id_op(spinand, 1, 0, id);
+ if (ret)
+ return ret;
+ ret = spinand_manufacturer_match(spinand,
+ SPINAND_READID_METHOD_OPCODE_ADDR);
+ if (!ret)
+ return 0;
+
+ ret = spinand_read_id_op(spinand, 0, 1, id);
+ if (ret)
+ return ret;
+ ret = spinand_manufacturer_match(spinand,
+ SPINAND_READID_METHOD_OPCODE_DUMMY);
+
+ return ret;
+}
+
+static int spinand_manufacturer_init(struct spinand_device *spinand)
+{
+ if (spinand->manufacturer->ops->init)
+ return spinand->manufacturer->ops->init(spinand);
+
+ return 0;
+}
+
+static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
+{
+ /* Release manufacturer private data */
+ if (spinand->manufacturer->ops->cleanup)
+ return spinand->manufacturer->ops->cleanup(spinand);
+}
+
+static const struct spi_mem_op *
+spinand_select_op_variant(struct spinand_device *spinand,
+ const struct spinand_op_variants *variants)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ unsigned int i;
+
+ for (i = 0; i < variants->nops; i++) {
+ struct spi_mem_op op = variants->ops[i];
+ unsigned int nbytes;
+ int ret;
+
+ nbytes = nanddev_per_page_oobsize(nand) +
+ nanddev_page_size(nand);
+
+ while (nbytes) {
+ op.data.nbytes = nbytes;
+ ret = spi_mem_adjust_op_size(spinand->spimem, &op);
+ if (ret)
+ break;
+
+ if (!spi_mem_supports_op(spinand->spimem, &op))
+ break;
+
+ nbytes -= op.data.nbytes;
+ }
+
+ if (!nbytes)
+ return &variants->ops[i];
+ }
+
+ return NULL;
+}
+
+/**
+ * spinand_match_and_init() - Try to find a match between a device ID and an
+ * entry in a spinand_info table
+ * @spinand: SPI NAND object
+ * @table: SPI NAND device description table
+ * @table_size: size of the device description table
+ * @rdid_method: read id method to match
+ *
+ * Match between a device ID retrieved through the READ_ID command and an
+ * entry in the SPI NAND description table. If a match is found, the spinand
+ * object will be initialized with information provided by the matching
+ * spinand_info entry.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int spinand_match_and_init(struct spinand_device *spinand,
+ const struct spinand_info *table,
+ unsigned int table_size,
+ enum spinand_readid_method rdid_method)
+{
+ u8 *id = spinand->id.data;
+ struct nand_device *nand = spinand_to_nand(spinand);
+ unsigned int i;
+
+ for (i = 0; i < table_size; i++) {
+ const struct spinand_info *info = &table[i];
+ const struct spi_mem_op *op;
+
+ if (rdid_method != info->devid.method)
+ continue;
+
+ if (memcmp(id + 1, info->devid.id, info->devid.len))
+ continue;
+
+ nand->memorg = table[i].memorg;
+ nanddev_set_ecc_requirements(nand, &table[i].eccreq);
+ spinand->eccinfo = table[i].eccinfo;
+ spinand->flags = table[i].flags;
+ spinand->id.len = 1 + table[i].devid.len;
+ spinand->select_target = table[i].select_target;
+
+ op = spinand_select_op_variant(spinand,
+ info->op_variants.read_cache);
+ if (!op)
+ return -ENOTSUPP;
+
+ spinand->op_templates.read_cache = op;
+
+ op = spinand_select_op_variant(spinand,
+ info->op_variants.write_cache);
+ if (!op)
+ return -ENOTSUPP;
+
+ spinand->op_templates.write_cache = op;
+
+ op = spinand_select_op_variant(spinand,
+ info->op_variants.update_cache);
+ spinand->op_templates.update_cache = op;
+
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int spinand_detect(struct spinand_device *spinand)
+{
+ struct device *dev = &spinand->spimem->spi->dev;
+ struct nand_device *nand = spinand_to_nand(spinand);
+ int ret;
+
+ ret = spinand_reset_op(spinand);
+ if (ret)
+ return ret;
+
+ ret = spinand_id_detect(spinand);
+ if (ret) {
+ dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
+ spinand->id.data);
+ return ret;
+ }
+
+ if (nand->memorg.ntargets > 1 && !spinand->select_target) {
+ dev_err(dev,
+ "SPI NANDs with more than one die must implement ->select_target()\n");
+ return -EINVAL;
+ }
+
+ dev_info(&spinand->spimem->spi->dev,
+ "%s SPI NAND was found.\n", spinand->manufacturer->name);
+ dev_info(&spinand->spimem->spi->dev,
+ "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
+ nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
+ nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
+
+ return 0;
+}
+
+static int spinand_init_flash(struct spinand_device *spinand)
+{
+ struct device *dev = &spinand->spimem->spi->dev;
+ struct nand_device *nand = spinand_to_nand(spinand);
+ int ret, i;
+
+ ret = spinand_read_cfg(spinand);
+ if (ret)
+ return ret;
+
+ ret = spinand_init_quad_enable(spinand);
+ if (ret)
+ return ret;
+
+ ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
+ if (ret)
+ return ret;
+
+ ret = spinand_manufacturer_init(spinand);
+ if (ret) {
+ dev_err(dev,
+ "Failed to initialize the SPI NAND chip (err = %d)\n",
+ ret);
+ return ret;
+ }
+
+ /* After power up, all blocks are locked, so unlock them here. */
+ for (i = 0; i < nand->memorg.ntargets; i++) {
+ ret = spinand_select_target(spinand, i);
+ if (ret)
+ break;
+
+ ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
+ if (ret)
+ break;
+ }
+
+ if (ret)
+ spinand_manufacturer_cleanup(spinand);
+
+ return ret;
+}
+
+static void spinand_mtd_resume(struct mtd_info *mtd)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ int ret;
+
+ ret = spinand_reset_op(spinand);
+ if (ret)
+ return;
+
+ ret = spinand_init_flash(spinand);
+ if (ret)
+ return;
+
+ spinand_ecc_enable(spinand, false);
+}
+
+static int spinand_init(struct spinand_device *spinand)
+{
+ struct device *dev = &spinand->spimem->spi->dev;
+ struct mtd_info *mtd = spinand_to_mtd(spinand);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ int ret;
+
+ /*
+ * We need a scratch buffer because the spi_mem interface requires that
+ * buf passed in spi_mem_op->data.buf be DMA-able.
+ */
+ spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
+ if (!spinand->scratchbuf)
+ return -ENOMEM;
+
+ ret = spinand_detect(spinand);
+ if (ret)
+ goto err_free_bufs;
+
+ /*
+ * Use kzalloc() instead of devm_kzalloc() here, because some drivers
+ * may use this buffer for DMA access.
+ * Memory allocated by devm_ does not guarantee DMA-safe alignment.
+ */
+ spinand->databuf = kzalloc(nanddev_page_size(nand) +
+ nanddev_per_page_oobsize(nand),
+ GFP_KERNEL);
+ if (!spinand->databuf) {
+ ret = -ENOMEM;
+ goto err_free_bufs;
+ }
+
+ spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
+
+ ret = spinand_init_cfg_cache(spinand);
+ if (ret)
+ goto err_free_bufs;
+
+ ret = spinand_init_flash(spinand);
+ if (ret)
+ goto err_free_bufs;
+
+ ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
+ if (ret)
+ goto err_manuf_cleanup;
+
+ /* SPI-NAND default ECC engine is on-die */
+ nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
+ nand->ecc.ondie_engine = &spinand_ondie_ecc_engine;
+
+ spinand_ecc_enable(spinand, false);
+ ret = nanddev_ecc_engine_init(nand);
+ if (ret)
+ goto err_cleanup_nanddev;
+
+ mtd->_read_oob = spinand_mtd_read;
+ mtd->_write_oob = spinand_mtd_write;
+ mtd->_block_isbad = spinand_mtd_block_isbad;
+ mtd->_block_markbad = spinand_mtd_block_markbad;
+ mtd->_block_isreserved = spinand_mtd_block_isreserved;
+ mtd->_erase = spinand_mtd_erase;
+ mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
+ mtd->_resume = spinand_mtd_resume;
+
+ if (nand->ecc.engine) {
+ ret = mtd_ooblayout_count_freebytes(mtd);
+ if (ret < 0)
+ goto err_cleanup_ecc_engine;
+ }
+
+ mtd->oobavail = ret;
+
+ /* Propagate ECC information to mtd_info */
+ mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength;
+ mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size;
+
+ ret = spinand_create_dirmaps(spinand);
+ if (ret) {
+ dev_err(dev,
+ "Failed to create direct mappings for read/write operations (err = %d)\n",
+ ret);
+ goto err_cleanup_ecc_engine;
+ }
+
+ return 0;
+
+err_cleanup_ecc_engine:
+ nanddev_ecc_engine_cleanup(nand);
+
+err_cleanup_nanddev:
+ nanddev_cleanup(nand);
+
+err_manuf_cleanup:
+ spinand_manufacturer_cleanup(spinand);
+
+err_free_bufs:
+ kfree(spinand->databuf);
+ kfree(spinand->scratchbuf);
+ return ret;
+}
+
+static void spinand_cleanup(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+
+ nanddev_cleanup(nand);
+ spinand_manufacturer_cleanup(spinand);
+ kfree(spinand->databuf);
+ kfree(spinand->scratchbuf);
+}
+
+static int spinand_probe(struct spi_mem *mem)
+{
+ struct spinand_device *spinand;
+ struct mtd_info *mtd;
+ int ret;
+
+ spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
+ GFP_KERNEL);
+ if (!spinand)
+ return -ENOMEM;
+
+ spinand->spimem = mem;
+ spi_mem_set_drvdata(mem, spinand);
+ spinand_set_of_node(spinand, mem->spi->dev.of_node);
+ mutex_init(&spinand->lock);
+ mtd = spinand_to_mtd(spinand);
+ mtd->dev.parent = &mem->spi->dev;
+
+ ret = spinand_init(spinand);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ goto err_spinand_cleanup;
+
+ return 0;
+
+err_spinand_cleanup:
+ spinand_cleanup(spinand);
+
+ return ret;
+}
+
+static int spinand_remove(struct spi_mem *mem)
+{
+ struct spinand_device *spinand;
+ struct mtd_info *mtd;
+ int ret;
+
+ spinand = spi_mem_get_drvdata(mem);
+ mtd = spinand_to_mtd(spinand);
+
+ ret = mtd_device_unregister(mtd);
+ if (ret)
+ return ret;
+
+ spinand_cleanup(spinand);
+
+ return 0;
+}
+
+static const struct spi_device_id spinand_ids[] = {
+ { .name = "spi-nand" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(spi, spinand_ids);
+
+#ifdef CONFIG_OF
+static const struct of_device_id spinand_of_ids[] = {
+ { .compatible = "spi-nand" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, spinand_of_ids);
+#endif
+
+static struct spi_mem_driver spinand_drv = {
+ .spidrv = {
+ .id_table = spinand_ids,
+ .driver = {
+ .name = "spi-nand",
+ .of_match_table = of_match_ptr(spinand_of_ids),
+ },
+ },
+ .probe = spinand_probe,
+ .remove = spinand_remove,
+};
+module_spi_mem_driver(spinand_drv);
+
+MODULE_DESCRIPTION("SPI NAND framework");
+MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/spi/esmt.c b/drivers/mtd/nand/spi/esmt.c
new file mode 100644
index 0000000000..31c439a557
--- /dev/null
+++ b/drivers/mtd/nand/spi/esmt.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author:
+ * Chuanhong Guo <gch981213@gmail.com> - the main driver logic
+ * Martin Kurbanov <mmkurbanov@sberdevices.ru> - OOB layout
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+/* ESMT uses GigaDevice 0xc8 JECDEC ID on some SPI NANDs */
+#define SPINAND_MFR_ESMT_C8 0xc8
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+/*
+ * OOB spare area map (64 bytes)
+ *
+ * Bad Block Markers
+ * filled by HW and kernel Reserved
+ * | +-----------------------+-----------------------+
+ * | | | |
+ * | | OOB free data Area |non ECC protected |
+ * | +-------------|-----+-----------------|-----+-----------------|-----+
+ * | | | | | | | |
+ * +-|---|----------+--|-----|--------------+--|-----|--------------+--|-----|--------------+
+ * | | | section0 | | | section1 | | | section2 | | | section3 |
+ * +-v-+-v-+---+----+--v--+--v--+-----+-----+--v--+--v--+-----+-----+--v--+--v--+-----+-----+
+ * | | | | | | | | | | | | | | | | |
+ * |0:1|2:3|4:7|8:15|16:17|18:19|20:23|24:31|32:33|34:35|36:39|40:47|48:49|50:51|52:55|56:63|
+ * | | | | | | | | | | | | | | | | |
+ * +---+---+-^-+--^-+-----+-----+--^--+--^--+-----+-----+--^--+--^--+-----+-----+--^--+--^--+
+ * | | | | | | | |
+ * | +----------------|-----+-----------------|-----+-----------------|-----+
+ * | ECC Area|(Main + Spare) - filled|by ESMT NAND HW |
+ * | | | |
+ * +---------------------+-----------------------+-----------------------+
+ * OOB ECC protected Area - not used due to
+ * partial programming from some filesystems
+ * (like JFFS2 with cleanmarkers)
+ */
+
+#define ESMT_OOB_SECTION_COUNT 4
+#define ESMT_OOB_SECTION_SIZE(nand) \
+ (nanddev_per_page_oobsize(nand) / ESMT_OOB_SECTION_COUNT)
+#define ESMT_OOB_FREE_SIZE(nand) \
+ (ESMT_OOB_SECTION_SIZE(nand) / 2)
+#define ESMT_OOB_ECC_SIZE(nand) \
+ (ESMT_OOB_SECTION_SIZE(nand) - ESMT_OOB_FREE_SIZE(nand))
+#define ESMT_OOB_BBM_SIZE 2
+
+static int f50l1g41lb_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+
+ if (section >= ESMT_OOB_SECTION_COUNT)
+ return -ERANGE;
+
+ region->offset = section * ESMT_OOB_SECTION_SIZE(nand) +
+ ESMT_OOB_FREE_SIZE(nand);
+ region->length = ESMT_OOB_ECC_SIZE(nand);
+
+ return 0;
+}
+
+static int f50l1g41lb_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+
+ if (section >= ESMT_OOB_SECTION_COUNT)
+ return -ERANGE;
+
+ /*
+ * Reserve space for bad blocks markers (section0) and
+ * reserved bytes (sections 1-3)
+ */
+ region->offset = section * ESMT_OOB_SECTION_SIZE(nand) + 2;
+
+ /* Use only 2 non-protected ECC bytes per each OOB section */
+ region->length = 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops f50l1g41lb_ooblayout = {
+ .ecc = f50l1g41lb_ooblayout_ecc,
+ .free = f50l1g41lb_ooblayout_free,
+};
+
+static const struct spinand_info esmt_c8_spinand_table[] = {
+ SPINAND_INFO("F50L1G41LB",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x01),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
+ SPINAND_INFO("F50D1G41LB",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
+ SPINAND_INFO("F50D2G41KA",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x51),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
+};
+
+static const struct spinand_manufacturer_ops esmt_spinand_manuf_ops = {
+};
+
+const struct spinand_manufacturer esmt_c8_spinand_manufacturer = {
+ .id = SPINAND_MFR_ESMT_C8,
+ .name = "ESMT",
+ .chips = esmt_c8_spinand_table,
+ .nchips = ARRAY_SIZE(esmt_c8_spinand_table),
+ .ops = &esmt_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
new file mode 100644
index 0000000000..987710e094
--- /dev/null
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -0,0 +1,545 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author:
+ * Chuanhong Guo <gch981213@gmail.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_GIGADEVICE 0xC8
+
+#define GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS (1 << 4)
+#define GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS (3 << 4)
+
+#define GD5FXGQ5XE_STATUS_ECC_1_4_BITFLIPS (1 << 4)
+#define GD5FXGQ5XE_STATUS_ECC_4_BITFLIPS (3 << 4)
+
+#define GD5FXGQXXEXXG_REG_STATUS2 0xf0
+
+#define GD5FXGQ4UXFXXG_STATUS_ECC_MASK (7 << 4)
+#define GD5FXGQ4UXFXXG_STATUS_ECC_NO_BITFLIPS (0 << 4)
+#define GD5FXGQ4UXFXXG_STATUS_ECC_1_3_BITFLIPS (1 << 4)
+#define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4)
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(read_cache_variants_f,
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP_3A(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP_3A(false, 0, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(read_cache_variants_1gq5,
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(read_cache_variants_2gq5,
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 4, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int gd5fxgq4xa_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 8;
+ region->length = 8;
+
+ return 0;
+}
+
+static int gd5fxgq4xa_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ if (section) {
+ region->offset = 16 * section;
+ region->length = 8;
+ } else {
+ /* section 0 has one byte reserved for bad block mark */
+ region->offset = 1;
+ region->length = 7;
+ }
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops gd5fxgq4xa_ooblayout = {
+ .ecc = gd5fxgq4xa_ooblayout_ecc,
+ .free = gd5fxgq4xa_ooblayout_free,
+};
+
+static int gd5fxgq4xa_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS:
+ /* 1-7 bits are flipped. return the maximum. */
+ return 7;
+
+ case GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS:
+ return 8;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int gd5fxgqx_variant2_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ region->offset = 64;
+ region->length = 64;
+
+ return 0;
+}
+
+static int gd5fxgqx_variant2_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ /* Reserve 1 bytes for the BBM. */
+ region->offset = 1;
+ region->length = 63;
+
+ return 0;
+}
+
+/* Valid for Q4/Q5 and Q6 (untested) devices */
+static const struct mtd_ooblayout_ops gd5fxgqx_variant2_ooblayout = {
+ .ecc = gd5fxgqx_variant2_ooblayout_ecc,
+ .free = gd5fxgqx_variant2_ooblayout_free,
+};
+
+static int gd5fxgq4xc_ooblayout_256_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 128;
+ oobregion->length = 128;
+
+ return 0;
+}
+
+static int gd5fxgq4xc_ooblayout_256_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 1;
+ oobregion->length = 127;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops gd5fxgq4xc_oob_256_ops = {
+ .ecc = gd5fxgq4xc_ooblayout_256_ecc,
+ .free = gd5fxgq4xc_ooblayout_256_free,
+};
+
+static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ u8 status2;
+ struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
+ &status2);
+ int ret;
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS:
+ /*
+ * Read status2 register to determine a more fine grained
+ * bit error status
+ */
+ ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ /*
+ * 4 ... 7 bits are flipped (1..4 can't be detected, so
+ * report the maximum of 4 in this case
+ */
+ /* bits sorted this way (3...0): ECCS1,ECCS0,ECCSE1,ECCSE0 */
+ return ((status & STATUS_ECC_MASK) >> 2) |
+ ((status2 & STATUS_ECC_MASK) >> 4);
+
+ case GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS:
+ return 8;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ u8 status2;
+ struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
+ &status2);
+ int ret;
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case GD5FXGQ5XE_STATUS_ECC_1_4_BITFLIPS:
+ /*
+ * Read status2 register to determine a more fine grained
+ * bit error status
+ */
+ ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ /*
+ * 1 ... 4 bits are flipped (and corrected)
+ */
+ /* bits sorted this way (1...0): ECCSE1, ECCSE0 */
+ return ((status2 & STATUS_ECC_MASK) >> 4) + 1;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int gd5fxgq4ufxxg_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ switch (status & GD5FXGQ4UXFXXG_STATUS_ECC_MASK) {
+ case GD5FXGQ4UXFXXG_STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case GD5FXGQ4UXFXXG_STATUS_ECC_1_3_BITFLIPS:
+ return 3;
+
+ case GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ default: /* (2 << 4) through (6 << 4) are 4-8 corrected errors */
+ return ((status & GD5FXGQ4UXFXXG_STATUS_ECC_MASK) >> 4) + 2;
+ }
+
+ return -EINVAL;
+}
+
+static const struct spinand_info gigadevice_spinand_table[] = {
+ SPINAND_INFO("GD5F1GQ4xA",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf1),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
+ gd5fxgq4xa_ecc_get_status)),
+ SPINAND_INFO("GD5F2GQ4xA",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf2),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
+ gd5fxgq4xa_ecc_get_status)),
+ SPINAND_INFO("GD5F4GQ4xA",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf4),
+ NAND_MEMORG(1, 2048, 64, 64, 4096, 80, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
+ gd5fxgq4xa_ecc_get_status)),
+ SPINAND_INFO("GD5F4GQ4RC",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xa4, 0x68),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgq4xc_oob_256_ops,
+ gd5fxgq4ufxxg_ecc_get_status)),
+ SPINAND_INFO("GD5F4GQ4UC",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xb4, 0x68),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgq4xc_oob_256_ops,
+ gd5fxgq4ufxxg_ecc_get_status)),
+ SPINAND_INFO("GD5F1GQ4UExxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xd1),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq4uexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F1GQ4RExxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xc1),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq4uexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F2GQ4UExxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xd2),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq4uexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F2GQ4RExxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xc2),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq4uexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F1GQ4UFxxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xb1, 0x48),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq4ufxxg_ecc_get_status)),
+ SPINAND_INFO("GD5F1GQ5UExxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x51),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq5xexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F1GQ5RExxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x41),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq5xexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F2GQ5UExxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x52),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_2gq5,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq5xexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F2GQ5RExxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x42),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_2gq5,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq5xexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F4GQ6UExxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x55),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 2, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_2gq5,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq5xexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F4GQ6RExxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x45),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 2, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_2gq5,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq5xexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F1GM7UExxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x91),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq4uexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F1GM7RExxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x81),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq4uexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F2GM7UExxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x92),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq4uexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F2GM7RExxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x82),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq4uexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F4GM8UExxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x95),
+ NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq4uexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F4GM8RExxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x85),
+ NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq4uexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F2GQ5xExxH",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x22),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_2gq5,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq4uexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F1GQ5RExxH",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x21),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq4uexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F1GQ4RExxH",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xc9),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq4uexxg_ecc_get_status)),
+};
+
+static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = {
+};
+
+const struct spinand_manufacturer gigadevice_spinand_manufacturer = {
+ .id = SPINAND_MFR_GIGADEVICE,
+ .name = "GigaDevice",
+ .chips = gigadevice_spinand_table,
+ .nchips = ARRAY_SIZE(gigadevice_spinand_table),
+ .ops = &gigadevice_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
new file mode 100644
index 0000000000..3dfc7e1e52
--- /dev/null
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Macronix
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_MACRONIX 0xC2
+#define MACRONIX_ECCSR_MASK 0x0F
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int mx35lfxge4ab_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ return -ERANGE;
+}
+
+static int mx35lfxge4ab_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ region->offset = 2;
+ region->length = mtd->oobsize - 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops mx35lfxge4ab_ooblayout = {
+ .ecc = mx35lfxge4ab_ooblayout_ecc,
+ .free = mx35lfxge4ab_ooblayout_free,
+};
+
+static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr)
+{
+ struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0x7c, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_DUMMY(1, 1),
+ SPI_MEM_OP_DATA_IN(1, eccsr, 1));
+
+ int ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ *eccsr &= MACRONIX_ECCSR_MASK;
+ return 0;
+}
+
+static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ u8 eccsr;
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ case STATUS_ECC_HAS_BITFLIPS:
+ /*
+ * Let's try to retrieve the real maximum number of bitflips
+ * in order to avoid forcing the wear-leveling layer to move
+ * data around if it's not necessary.
+ */
+ if (mx35lf1ge4ab_get_eccsr(spinand, spinand->scratchbuf))
+ return nanddev_get_ecc_conf(nand)->strength;
+
+ eccsr = *spinand->scratchbuf;
+ if (WARN_ON(eccsr > nanddev_get_ecc_conf(nand)->strength ||
+ !eccsr))
+ return nanddev_get_ecc_conf(nand)->strength;
+
+ return eccsr;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct spinand_info macronix_spinand_table[] = {
+ SPINAND_INFO("MX35LF1GE4AB",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x12),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35LF2GE4AB",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x22),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_INFO("MX35LF2GE4AD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x26),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35LF4GE4AD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x37),
+ NAND_MEMORG(1, 4096, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35LF1G24AD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_INFO("MX35LF2G24AD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_INFO("MX35LF4G24AD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 2, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_INFO("MX31LF1GE4BC",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x1e),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX31UF1GE4BC",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x9e),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+
+ SPINAND_INFO("MX35LF2G14AC",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x20),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35UF4G24AD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb5),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 2, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35UF4GE4AD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb7),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35UF2G14AC",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa0),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35UF2G24AD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa4),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35UF2GE4AD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa6),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35UF2GE4AC",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa2),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35UF1G14AC",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x90),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35UF1G24AD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x94),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35UF1GE4AD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x96),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35UF1GE4AC",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x92),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+
+ SPINAND_INFO("MX31LF2GE4BC",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x2e),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX3UF2GE4BC",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xae),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+};
+
+static const struct spinand_manufacturer_ops macronix_spinand_manuf_ops = {
+};
+
+const struct spinand_manufacturer macronix_spinand_manufacturer = {
+ .id = SPINAND_MFR_MACRONIX,
+ .name = "Macronix",
+ .chips = macronix_spinand_table,
+ .nchips = ARRAY_SIZE(macronix_spinand_table),
+ .ops = &macronix_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
new file mode 100644
index 0000000000..12601bc422
--- /dev/null
+++ b/drivers/mtd/nand/spi/micron.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2017 Micron Technology, Inc.
+ *
+ * Authors:
+ * Peter Pan <peterpandong@micron.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_MICRON 0x2c
+
+#define MICRON_STATUS_ECC_MASK GENMASK(6, 4)
+#define MICRON_STATUS_ECC_NO_BITFLIPS (0 << 4)
+#define MICRON_STATUS_ECC_1TO3_BITFLIPS (1 << 4)
+#define MICRON_STATUS_ECC_4TO6_BITFLIPS (3 << 4)
+#define MICRON_STATUS_ECC_7TO8_BITFLIPS (5 << 4)
+
+#define MICRON_CFG_CR BIT(0)
+
+/*
+ * As per datasheet, die selection is done by the 6th bit of Die
+ * Select Register (Address 0xD0).
+ */
+#define MICRON_DIE_SELECT_REG 0xD0
+
+#define MICRON_SELECT_DIE(x) ((x) << 6)
+
+static SPINAND_OP_VARIANTS(quadio_read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(x4_write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(x4_update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+/* Micron MT29F2G01AAAED Device */
+static SPINAND_OP_VARIANTS(x4_read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(x1_write_cache_variants,
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(x1_update_cache_variants,
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int micron_8_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ region->offset = mtd->oobsize / 2;
+ region->length = mtd->oobsize / 2;
+
+ return 0;
+}
+
+static int micron_8_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ /* Reserve 2 bytes for the BBM. */
+ region->offset = 2;
+ region->length = (mtd->oobsize / 2) - 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops micron_8_ooblayout = {
+ .ecc = micron_8_ooblayout_ecc,
+ .free = micron_8_ooblayout_free,
+};
+
+static int micron_4_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+
+ if (section >= spinand->base.memorg.pagesize /
+ mtd->ecc_step_size)
+ return -ERANGE;
+
+ region->offset = (section * 16) + 8;
+ region->length = 8;
+
+ return 0;
+}
+
+static int micron_4_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+
+ if (section >= spinand->base.memorg.pagesize /
+ mtd->ecc_step_size)
+ return -ERANGE;
+
+ if (section) {
+ region->offset = 16 * section;
+ region->length = 8;
+ } else {
+ /* section 0 has two bytes reserved for the BBM */
+ region->offset = 2;
+ region->length = 6;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops micron_4_ooblayout = {
+ .ecc = micron_4_ooblayout_ecc,
+ .free = micron_4_ooblayout_free,
+};
+
+static int micron_select_target(struct spinand_device *spinand,
+ unsigned int target)
+{
+ struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MICRON_DIE_SELECT_REG,
+ spinand->scratchbuf);
+
+ if (target > 1)
+ return -EINVAL;
+
+ *spinand->scratchbuf = MICRON_SELECT_DIE(target);
+
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
+static int micron_8_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ switch (status & MICRON_STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ case MICRON_STATUS_ECC_1TO3_BITFLIPS:
+ return 3;
+
+ case MICRON_STATUS_ECC_4TO6_BITFLIPS:
+ return 6;
+
+ case MICRON_STATUS_ECC_7TO8_BITFLIPS:
+ return 8;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct spinand_info micron_spinand_table[] = {
+ /* M79A 2Gb 3.3V */
+ SPINAND_INFO("MT29F2G01ABAGD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
+ &x4_write_cache_variants,
+ &x4_update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M79A 2Gb 1.8V */
+ SPINAND_INFO("MT29F2G01ABBGD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x25),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
+ &x4_write_cache_variants,
+ &x4_update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M78A 1Gb 3.3V */
+ SPINAND_INFO("MT29F1G01ABAFD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
+ &x4_write_cache_variants,
+ &x4_update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M78A 1Gb 1.8V */
+ SPINAND_INFO("MT29F1G01ABAFD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x15),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
+ &x4_write_cache_variants,
+ &x4_update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M79A 4Gb 3.3V */
+ SPINAND_INFO("MT29F4G01ADAGD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x36),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 80, 2, 1, 2),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
+ &x4_write_cache_variants,
+ &x4_update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status),
+ SPINAND_SELECT_TARGET(micron_select_target)),
+ /* M70A 4Gb 3.3V */
+ SPINAND_INFO("MT29F4G01ABAFD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x34),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
+ &x4_write_cache_variants,
+ &x4_update_cache_variants),
+ SPINAND_HAS_CR_FEAT_BIT,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M70A 4Gb 1.8V */
+ SPINAND_INFO("MT29F4G01ABBFD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
+ &x4_write_cache_variants,
+ &x4_update_cache_variants),
+ SPINAND_HAS_CR_FEAT_BIT,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M70A 8Gb 3.3V */
+ SPINAND_INFO("MT29F8G01ADAFD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x46),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 2),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
+ &x4_write_cache_variants,
+ &x4_update_cache_variants),
+ SPINAND_HAS_CR_FEAT_BIT,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status),
+ SPINAND_SELECT_TARGET(micron_select_target)),
+ /* M70A 8Gb 1.8V */
+ SPINAND_INFO("MT29F8G01ADBFD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x47),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 2),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
+ &x4_write_cache_variants,
+ &x4_update_cache_variants),
+ SPINAND_HAS_CR_FEAT_BIT,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status),
+ SPINAND_SELECT_TARGET(micron_select_target)),
+ /* M69A 2Gb 3.3V */
+ SPINAND_INFO("MT29F2G01AAAED",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x9F),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 80, 2, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&x4_read_cache_variants,
+ &x1_write_cache_variants,
+ &x1_update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_4_ooblayout, NULL)),
+};
+
+static int micron_spinand_init(struct spinand_device *spinand)
+{
+ /*
+ * M70A device series enable Continuous Read feature at Power-up,
+ * which is not supported. Disable this bit to avoid any possible
+ * failure.
+ */
+ if (spinand->flags & SPINAND_HAS_CR_FEAT_BIT)
+ return spinand_upd_cfg(spinand, MICRON_CFG_CR, 0);
+
+ return 0;
+}
+
+static const struct spinand_manufacturer_ops micron_spinand_manuf_ops = {
+ .init = micron_spinand_init,
+};
+
+const struct spinand_manufacturer micron_spinand_manufacturer = {
+ .id = SPINAND_MFR_MICRON,
+ .name = "Micron",
+ .chips = micron_spinand_table,
+ .nchips = ARRAY_SIZE(micron_spinand_table),
+ .ops = &micron_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/paragon.c b/drivers/mtd/nand/spi/paragon.c
new file mode 100644
index 0000000000..519ade513c
--- /dev/null
+++ b/drivers/mtd/nand/spi/paragon.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Jeff Kletsky
+ *
+ * Author: Jeff Kletsky <git-commits@allycomm.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+
+#define SPINAND_MFR_PARAGON 0xa1
+
+
+#define PN26G0XA_STATUS_ECC_BITMASK (3 << 4)
+
+#define PN26G0XA_STATUS_ECC_NONE_DETECTED (0 << 4)
+#define PN26G0XA_STATUS_ECC_1_7_CORRECTED (1 << 4)
+#define PN26G0XA_STATUS_ECC_ERRORED (2 << 4)
+#define PN26G0XA_STATUS_ECC_8_CORRECTED (3 << 4)
+
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+
+static int pn26g0xa_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = 6 + (15 * section); /* 4 BBM + 2 user bytes */
+ region->length = 13;
+
+ return 0;
+}
+
+static int pn26g0xa_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 4)
+ return -ERANGE;
+
+ if (section == 4) {
+ region->offset = 64;
+ region->length = 64;
+ } else {
+ region->offset = 4 + (15 * section);
+ region->length = 2;
+ }
+
+ return 0;
+}
+
+static int pn26g0xa_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ switch (status & PN26G0XA_STATUS_ECC_BITMASK) {
+ case PN26G0XA_STATUS_ECC_NONE_DETECTED:
+ return 0;
+
+ case PN26G0XA_STATUS_ECC_1_7_CORRECTED:
+ return 7; /* Return upper limit by convention */
+
+ case PN26G0XA_STATUS_ECC_8_CORRECTED:
+ return 8;
+
+ case PN26G0XA_STATUS_ECC_ERRORED:
+ return -EBADMSG;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct mtd_ooblayout_ops pn26g0xa_ooblayout = {
+ .ecc = pn26g0xa_ooblayout_ecc,
+ .free = pn26g0xa_ooblayout_free,
+};
+
+
+static const struct spinand_info paragon_spinand_table[] = {
+ SPINAND_INFO("PN26G01A",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe1),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 21, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&pn26g0xa_ooblayout,
+ pn26g0xa_ecc_get_status)),
+ SPINAND_INFO("PN26G02A",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe2),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 41, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&pn26g0xa_ooblayout,
+ pn26g0xa_ecc_get_status)),
+};
+
+static const struct spinand_manufacturer_ops paragon_spinand_manuf_ops = {
+};
+
+const struct spinand_manufacturer paragon_spinand_manufacturer = {
+ .id = SPINAND_MFR_PARAGON,
+ .name = "Paragon",
+ .chips = paragon_spinand_table,
+ .nchips = ARRAY_SIZE(paragon_spinand_table),
+ .ops = &paragon_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
new file mode 100644
index 0000000000..bbbcaa87c0
--- /dev/null
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 exceet electronics GmbH
+ * Copyright (c) 2018 Kontron Electronics GmbH
+ *
+ * Author: Frieder Schrempf <frieder.schrempf@kontron.de>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+/* Kioxia is new name of Toshiba memory. */
+#define SPINAND_MFR_TOSHIBA 0x98
+#define TOSH_STATUS_ECC_HAS_BITFLIPS_T (3 << 4)
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_x4_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_x4_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+/*
+ * Backward compatibility for 1st generation Serial NAND devices
+ * which don't support Quad Program Load operation.
+ */
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int tx58cxgxsxraix_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 0)
+ return -ERANGE;
+
+ region->offset = mtd->oobsize / 2;
+ region->length = mtd->oobsize / 2;
+
+ return 0;
+}
+
+static int tx58cxgxsxraix_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 0)
+ return -ERANGE;
+
+ /* 2 bytes reserved for BBM */
+ region->offset = 2;
+ region->length = (mtd->oobsize / 2) - 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops tx58cxgxsxraix_ooblayout = {
+ .ecc = tx58cxgxsxraix_ooblayout_ecc,
+ .free = tx58cxgxsxraix_ooblayout_free,
+};
+
+static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ u8 mbf = 0;
+ struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf);
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ case STATUS_ECC_HAS_BITFLIPS:
+ case TOSH_STATUS_ECC_HAS_BITFLIPS_T:
+ /*
+ * Let's try to retrieve the real maximum number of bitflips
+ * in order to avoid forcing the wear-leveling layer to move
+ * data around if it's not necessary.
+ */
+ if (spi_mem_exec_op(spinand->spimem, &op))
+ return nanddev_get_ecc_conf(nand)->strength;
+
+ mbf = *(spinand->scratchbuf) >> 4;
+
+ if (WARN_ON(mbf > nanddev_get_ecc_conf(nand)->strength || !mbf))
+ return nanddev_get_ecc_conf(nand)->strength;
+
+ return mbf;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct spinand_info toshiba_spinand_table[] = {
+ /* 3.3V 1Gb (1st generation) */
+ SPINAND_INFO("TC58CVG0S3HRAIG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xC2),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 3.3V 2Gb (1st generation) */
+ SPINAND_INFO("TC58CVG1S3HRAIG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xCB),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 3.3V 4Gb (1st generation) */
+ SPINAND_INFO("TC58CVG2S0HRAIG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xCD),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 1Gb (1st generation) */
+ SPINAND_INFO("TC58CYG0S3HRAIG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xB2),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 2Gb (1st generation) */
+ SPINAND_INFO("TC58CYG1S3HRAIG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xBB),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 4Gb (1st generation) */
+ SPINAND_INFO("TC58CYG2S0HRAIG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xBD),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+
+ /*
+ * 2nd generation serial nand has HOLD_D which is equivalent to
+ * QE_BIT.
+ */
+ /* 3.3V 1Gb (2nd generation) */
+ SPINAND_INFO("TC58CVG0S3HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE2),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 3.3V 2Gb (2nd generation) */
+ SPINAND_INFO("TC58CVG1S3HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xEB),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 3.3V 4Gb (2nd generation) */
+ SPINAND_INFO("TC58CVG2S0HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xED),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 3.3V 8Gb (2nd generation) */
+ SPINAND_INFO("TH58CVG3S0HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE4),
+ NAND_MEMORG(1, 4096, 256, 64, 4096, 80, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 1Gb (2nd generation) */
+ SPINAND_INFO("TC58CYG0S3HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xD2),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 2Gb (2nd generation) */
+ SPINAND_INFO("TC58CYG1S3HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xDB),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 4Gb (2nd generation) */
+ SPINAND_INFO("TC58CYG2S0HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xDD),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 8Gb (2nd generation) */
+ SPINAND_INFO("TH58CYG3S0HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xD4),
+ NAND_MEMORG(1, 4096, 256, 64, 4096, 80, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 1Gb (1st generation) */
+ SPINAND_INFO("TC58NYG0S3HBAI4",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xA1),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 4Gb (1st generation) */
+ SPINAND_INFO("TH58NYG2S3HBAI4",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xAC),
+ NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 1, 2, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 8Gb (1st generation) */
+ SPINAND_INFO("TH58NYG3S0HBAI6",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xA3),
+ NAND_MEMORG(1, 4096, 256, 64, 4096, 80, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+};
+
+static const struct spinand_manufacturer_ops toshiba_spinand_manuf_ops = {
+};
+
+const struct spinand_manufacturer toshiba_spinand_manufacturer = {
+ .id = SPINAND_MFR_TOSHIBA,
+ .name = "Toshiba",
+ .chips = toshiba_spinand_table,
+ .nchips = ARRAY_SIZE(toshiba_spinand_table),
+ .ops = &toshiba_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
new file mode 100644
index 0000000000..f507e37593
--- /dev/null
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017 exceet electronics GmbH
+ *
+ * Authors:
+ * Frieder Schrempf <frieder.schrempf@exceet.de>
+ * Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_WINBOND 0xEF
+
+#define WINBOND_CFG_BUF_READ BIT(3)
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int w25m02gv_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 8;
+ region->length = 8;
+
+ return 0;
+}
+
+static int w25m02gv_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 2;
+ region->length = 6;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops w25m02gv_ooblayout = {
+ .ecc = w25m02gv_ooblayout_ecc,
+ .free = w25m02gv_ooblayout_free,
+};
+
+static int w25m02gv_select_target(struct spinand_device *spinand,
+ unsigned int target)
+{
+ struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0xc2, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1,
+ spinand->scratchbuf,
+ 1));
+
+ *spinand->scratchbuf = target;
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
+static int w25n02kv_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = 64 + (16 * section);
+ region->length = 13;
+
+ return 0;
+}
+
+static int w25n02kv_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 2;
+ region->length = 14;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops w25n02kv_ooblayout = {
+ .ecc = w25n02kv_ooblayout_ecc,
+ .free = w25n02kv_ooblayout_free,
+};
+
+static int w25n02kv_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ u8 mbf = 0;
+ struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf);
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ case STATUS_ECC_HAS_BITFLIPS:
+ /*
+ * Let's try to retrieve the real maximum number of bitflips
+ * in order to avoid forcing the wear-leveling layer to move
+ * data around if it's not necessary.
+ */
+ if (spi_mem_exec_op(spinand->spimem, &op))
+ return nanddev_get_ecc_conf(nand)->strength;
+
+ mbf = *(spinand->scratchbuf) >> 4;
+
+ if (WARN_ON(mbf > nanddev_get_ecc_conf(nand)->strength || !mbf))
+ return nanddev_get_ecc_conf(nand)->strength;
+
+ return mbf;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct spinand_info winbond_spinand_table[] = {
+ SPINAND_INFO("W25M02GV",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab, 0x21),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 2),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL),
+ SPINAND_SELECT_TARGET(w25m02gv_select_target)),
+ SPINAND_INFO("W25N01GV",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x21),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
+ SPINAND_INFO("W25N02KV",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x22),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)),
+};
+
+static int winbond_spinand_init(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ unsigned int i;
+
+ /*
+ * Make sure all dies are in buffer read mode and not continuous read
+ * mode.
+ */
+ for (i = 0; i < nand->memorg.ntargets; i++) {
+ spinand_select_target(spinand, i);
+ spinand_upd_cfg(spinand, WINBOND_CFG_BUF_READ,
+ WINBOND_CFG_BUF_READ);
+ }
+
+ return 0;
+}
+
+static const struct spinand_manufacturer_ops winbond_spinand_manuf_ops = {
+ .init = winbond_spinand_init,
+};
+
+const struct spinand_manufacturer winbond_spinand_manufacturer = {
+ .id = SPINAND_MFR_WINBOND,
+ .name = "Winbond",
+ .chips = winbond_spinand_table,
+ .nchips = ARRAY_SIZE(winbond_spinand_table),
+ .ops = &winbond_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/xtx.c b/drivers/mtd/nand/spi/xtx.c
new file mode 100644
index 0000000000..3911520f71
--- /dev/null
+++ b/drivers/mtd/nand/spi/xtx.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author:
+ * Felix Matouschek <felix@matouschek.org>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_XTX 0x0B
+
+#define XT26G0XA_STATUS_ECC_MASK GENMASK(5, 2)
+#define XT26G0XA_STATUS_ECC_NO_DETECTED (0 << 2)
+#define XT26G0XA_STATUS_ECC_8_CORRECTED (3 << 4)
+#define XT26G0XA_STATUS_ECC_UNCOR_ERROR (2 << 4)
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int xt26g0xa_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ region->offset = 48;
+ region->length = 16;
+
+ return 0;
+}
+
+static int xt26g0xa_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ region->offset = 1;
+ region->length = 47;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops xt26g0xa_ooblayout = {
+ .ecc = xt26g0xa_ooblayout_ecc,
+ .free = xt26g0xa_ooblayout_free,
+};
+
+static int xt26g0xa_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ status = status & XT26G0XA_STATUS_ECC_MASK;
+
+ switch (status) {
+ case XT26G0XA_STATUS_ECC_NO_DETECTED:
+ return 0;
+ case XT26G0XA_STATUS_ECC_8_CORRECTED:
+ return 8;
+ case XT26G0XA_STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+ default:
+ break;
+ }
+
+ /* At this point values greater than (2 << 4) are invalid */
+ if (status > XT26G0XA_STATUS_ECC_UNCOR_ERROR)
+ return -EINVAL;
+
+ /* (1 << 2) through (7 << 2) are 1-7 corrected errors */
+ return status >> 2;
+}
+
+static const struct spinand_info xtx_spinand_table[] = {
+ SPINAND_INFO("XT26G01A",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xE1),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&xt26g0xa_ooblayout,
+ xt26g0xa_ecc_get_status)),
+ SPINAND_INFO("XT26G02A",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xE2),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&xt26g0xa_ooblayout,
+ xt26g0xa_ecc_get_status)),
+ SPINAND_INFO("XT26G04A",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xE3),
+ NAND_MEMORG(1, 2048, 64, 128, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&xt26g0xa_ooblayout,
+ xt26g0xa_ecc_get_status)),
+};
+
+static const struct spinand_manufacturer_ops xtx_spinand_manuf_ops = {
+};
+
+const struct spinand_manufacturer xtx_spinand_manufacturer = {
+ .id = SPINAND_MFR_XTX,
+ .name = "XTX",
+ .chips = xtx_spinand_table,
+ .nchips = ARRAY_SIZE(xtx_spinand_table),
+ .ops = &xtx_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
new file mode 100644
index 0000000000..64d319e959
--- /dev/null
+++ b/drivers/mtd/nftlcore.c
@@ -0,0 +1,804 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Linux driver for NAND Flash Translation Layer
+ *
+ * Copyright © 1999 Machine Vision Holdings, Inc.
+ * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
+ */
+
+#define PRERELEASE
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/errno.h>
+#include <asm/io.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/hdreg.h>
+#include <linux/blkdev.h>
+
+#include <linux/kmod.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nftl.h>
+#include <linux/mtd/blktrans.h>
+
+/* maximum number of loops while examining next block, to have a
+ chance to detect consistency problems (they should never happen
+ because of the checks done in the mounting */
+
+#define MAX_LOOPS 10000
+
+
+static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+{
+ struct NFTLrecord *nftl;
+ unsigned long temp;
+
+ if (!mtd_type_is_nand(mtd) || mtd->size > UINT_MAX)
+ return;
+ /* OK, this is moderately ugly. But probably safe. Alternatives? */
+ if (memcmp(mtd->name, "DiskOnChip", 10))
+ return;
+
+ pr_debug("NFTL: add_mtd for %s\n", mtd->name);
+
+ nftl = kzalloc(sizeof(struct NFTLrecord), GFP_KERNEL);
+
+ if (!nftl)
+ return;
+
+ nftl->mbd.mtd = mtd;
+ nftl->mbd.devnum = -1;
+
+ nftl->mbd.tr = tr;
+
+ if (NFTL_mount(nftl) < 0) {
+ printk(KERN_WARNING "NFTL: could not mount device\n");
+ kfree(nftl);
+ return;
+ }
+
+ /* OK, it's a new one. Set up all the data structures. */
+
+ /* Calculate geometry */
+ nftl->cylinders = 1024;
+ nftl->heads = 16;
+
+ temp = nftl->cylinders * nftl->heads;
+ nftl->sectors = nftl->mbd.size / temp;
+ if (nftl->mbd.size % temp) {
+ nftl->sectors++;
+ temp = nftl->cylinders * nftl->sectors;
+ nftl->heads = nftl->mbd.size / temp;
+
+ if (nftl->mbd.size % temp) {
+ nftl->heads++;
+ temp = nftl->heads * nftl->sectors;
+ nftl->cylinders = nftl->mbd.size / temp;
+ }
+ }
+
+ if (nftl->mbd.size != nftl->heads * nftl->cylinders * nftl->sectors) {
+ /*
+ Oh no we don't have
+ mbd.size == heads * cylinders * sectors
+ */
+ printk(KERN_WARNING "NFTL: cannot calculate a geometry to "
+ "match size of 0x%lx.\n", nftl->mbd.size);
+ printk(KERN_WARNING "NFTL: using C:%d H:%d S:%d "
+ "(== 0x%lx sects)\n",
+ nftl->cylinders, nftl->heads , nftl->sectors,
+ (long)nftl->cylinders * (long)nftl->heads *
+ (long)nftl->sectors );
+ }
+
+ if (add_mtd_blktrans_dev(&nftl->mbd)) {
+ kfree(nftl->ReplUnitTable);
+ kfree(nftl->EUNtable);
+ kfree(nftl);
+ return;
+ }
+#ifdef PSYCHO_DEBUG
+ printk(KERN_INFO "NFTL: Found new nftl%c\n", nftl->mbd.devnum + 'a');
+#endif
+}
+
+static void nftl_remove_dev(struct mtd_blktrans_dev *dev)
+{
+ struct NFTLrecord *nftl = (void *)dev;
+
+ pr_debug("NFTL: remove_dev (i=%d)\n", dev->devnum);
+
+ del_mtd_blktrans_dev(dev);
+ kfree(nftl->ReplUnitTable);
+ kfree(nftl->EUNtable);
+}
+
+/*
+ * Read oob data from flash
+ */
+int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
+ size_t *retlen, uint8_t *buf)
+{
+ loff_t mask = mtd->writesize - 1;
+ struct mtd_oob_ops ops = { };
+ int res;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooboffs = offs & mask;
+ ops.ooblen = len;
+ ops.oobbuf = buf;
+ ops.datbuf = NULL;
+
+ res = mtd_read_oob(mtd, offs & ~mask, &ops);
+ *retlen = ops.oobretlen;
+ return res;
+}
+
+/*
+ * Write oob data to flash
+ */
+int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
+ size_t *retlen, uint8_t *buf)
+{
+ loff_t mask = mtd->writesize - 1;
+ struct mtd_oob_ops ops = { };
+ int res;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooboffs = offs & mask;
+ ops.ooblen = len;
+ ops.oobbuf = buf;
+ ops.datbuf = NULL;
+
+ res = mtd_write_oob(mtd, offs & ~mask, &ops);
+ *retlen = ops.oobretlen;
+ return res;
+}
+
+#ifdef CONFIG_NFTL_RW
+
+/*
+ * Write data and oob to flash
+ */
+static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
+ size_t *retlen, uint8_t *buf, uint8_t *oob)
+{
+ loff_t mask = mtd->writesize - 1;
+ struct mtd_oob_ops ops = { };
+ int res;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooboffs = offs & mask;
+ ops.ooblen = mtd->oobsize;
+ ops.oobbuf = oob;
+ ops.datbuf = buf;
+ ops.len = len;
+
+ res = mtd_write_oob(mtd, offs & ~mask, &ops);
+ *retlen = ops.retlen;
+ return res;
+}
+
+/* Actual NFTL access routines */
+/* NFTL_findfreeblock: Find a free Erase Unit on the NFTL partition. This function is used
+ * when the give Virtual Unit Chain
+ */
+static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate )
+{
+ /* For a given Virtual Unit Chain: find or create a free block and
+ add it to the chain */
+ /* We're passed the number of the last EUN in the chain, to save us from
+ having to look it up again */
+ u16 pot = nftl->LastFreeEUN;
+ int silly = nftl->nb_blocks;
+
+ /* Normally, we force a fold to happen before we run out of free blocks completely */
+ if (!desperate && nftl->numfreeEUNs < 2) {
+ pr_debug("NFTL_findfreeblock: there are too few free EUNs\n");
+ return BLOCK_NIL;
+ }
+
+ /* Scan for a free block */
+ do {
+ if (nftl->ReplUnitTable[pot] == BLOCK_FREE) {
+ nftl->LastFreeEUN = pot;
+ nftl->numfreeEUNs--;
+ return pot;
+ }
+
+ /* This will probably point to the MediaHdr unit itself,
+ right at the beginning of the partition. But that unit
+ (and the backup unit too) should have the UCI set
+ up so that it's not selected for overwriting */
+ if (++pot > nftl->lastEUN)
+ pot = le16_to_cpu(nftl->MediaHdr.FirstPhysicalEUN);
+
+ if (!silly--) {
+ printk("Argh! No free blocks found! LastFreeEUN = %d, "
+ "FirstEUN = %d\n", nftl->LastFreeEUN,
+ le16_to_cpu(nftl->MediaHdr.FirstPhysicalEUN));
+ return BLOCK_NIL;
+ }
+ } while (pot != nftl->LastFreeEUN);
+
+ return BLOCK_NIL;
+}
+
+static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned pendingblock )
+{
+ struct mtd_info *mtd = nftl->mbd.mtd;
+ u16 BlockMap[MAX_SECTORS_PER_UNIT];
+ unsigned char BlockLastState[MAX_SECTORS_PER_UNIT];
+ unsigned char BlockFreeFound[MAX_SECTORS_PER_UNIT];
+ unsigned int thisEUN;
+ int block;
+ int silly;
+ unsigned int targetEUN;
+ struct nftl_oob oob;
+ int inplace = 1;
+ size_t retlen;
+
+ memset(BlockMap, 0xff, sizeof(BlockMap));
+ memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
+
+ thisEUN = nftl->EUNtable[thisVUC];
+
+ if (thisEUN == BLOCK_NIL) {
+ printk(KERN_WARNING "Trying to fold non-existent "
+ "Virtual Unit Chain %d!\n", thisVUC);
+ return BLOCK_NIL;
+ }
+
+ /* Scan to find the Erase Unit which holds the actual data for each
+ 512-byte block within the Chain.
+ */
+ silly = MAX_LOOPS;
+ targetEUN = BLOCK_NIL;
+ while (thisEUN <= nftl->lastEUN ) {
+ unsigned int status, foldmark;
+
+ targetEUN = thisEUN;
+ for (block = 0; block < nftl->EraseSize / 512; block ++) {
+ nftl_read_oob(mtd, (thisEUN * nftl->EraseSize) +
+ (block * 512), 16 , &retlen,
+ (char *)&oob);
+ if (block == 2) {
+ foldmark = oob.u.c.FoldMark | oob.u.c.FoldMark1;
+ if (foldmark == FOLD_MARK_IN_PROGRESS) {
+ pr_debug("Write Inhibited on EUN %d\n", thisEUN);
+ inplace = 0;
+ } else {
+ /* There's no other reason not to do inplace,
+ except ones that come later. So we don't need
+ to preserve inplace */
+ inplace = 1;
+ }
+ }
+ status = oob.b.Status | oob.b.Status1;
+ BlockLastState[block] = status;
+
+ switch(status) {
+ case SECTOR_FREE:
+ BlockFreeFound[block] = 1;
+ break;
+
+ case SECTOR_USED:
+ if (!BlockFreeFound[block])
+ BlockMap[block] = thisEUN;
+ else
+ printk(KERN_WARNING
+ "SECTOR_USED found after SECTOR_FREE "
+ "in Virtual Unit Chain %d for block %d\n",
+ thisVUC, block);
+ break;
+ case SECTOR_DELETED:
+ if (!BlockFreeFound[block])
+ BlockMap[block] = BLOCK_NIL;
+ else
+ printk(KERN_WARNING
+ "SECTOR_DELETED found after SECTOR_FREE "
+ "in Virtual Unit Chain %d for block %d\n",
+ thisVUC, block);
+ break;
+
+ case SECTOR_IGNORE:
+ break;
+ default:
+ printk("Unknown status for block %d in EUN %d: %x\n",
+ block, thisEUN, status);
+ }
+ }
+
+ if (!silly--) {
+ printk(KERN_WARNING "Infinite loop in Virtual Unit Chain 0x%x\n",
+ thisVUC);
+ return BLOCK_NIL;
+ }
+
+ thisEUN = nftl->ReplUnitTable[thisEUN];
+ }
+
+ if (inplace) {
+ /* We're being asked to be a fold-in-place. Check
+ that all blocks which actually have data associated
+ with them (i.e. BlockMap[block] != BLOCK_NIL) are
+ either already present or SECTOR_FREE in the target
+ block. If not, we're going to have to fold out-of-place
+ anyway.
+ */
+ for (block = 0; block < nftl->EraseSize / 512 ; block++) {
+ if (BlockLastState[block] != SECTOR_FREE &&
+ BlockMap[block] != BLOCK_NIL &&
+ BlockMap[block] != targetEUN) {
+ pr_debug("Setting inplace to 0. VUC %d, "
+ "block %d was %x lastEUN, "
+ "and is in EUN %d (%s) %d\n",
+ thisVUC, block, BlockLastState[block],
+ BlockMap[block],
+ BlockMap[block]== targetEUN ? "==" : "!=",
+ targetEUN);
+ inplace = 0;
+ break;
+ }
+ }
+
+ if (pendingblock >= (thisVUC * (nftl->EraseSize / 512)) &&
+ pendingblock < ((thisVUC + 1)* (nftl->EraseSize / 512)) &&
+ BlockLastState[pendingblock - (thisVUC * (nftl->EraseSize / 512))] !=
+ SECTOR_FREE) {
+ pr_debug("Pending write not free in EUN %d. "
+ "Folding out of place.\n", targetEUN);
+ inplace = 0;
+ }
+ }
+
+ if (!inplace) {
+ pr_debug("Cannot fold Virtual Unit Chain %d in place. "
+ "Trying out-of-place\n", thisVUC);
+ /* We need to find a targetEUN to fold into. */
+ targetEUN = NFTL_findfreeblock(nftl, 1);
+ if (targetEUN == BLOCK_NIL) {
+ /* Ouch. Now we're screwed. We need to do a
+ fold-in-place of another chain to make room
+ for this one. We need a better way of selecting
+ which chain to fold, because makefreeblock will
+ only ask us to fold the same one again.
+ */
+ printk(KERN_WARNING
+ "NFTL_findfreeblock(desperate) returns 0xffff.\n");
+ return BLOCK_NIL;
+ }
+ } else {
+ /* We put a fold mark in the chain we are folding only if we
+ fold in place to help the mount check code. If we do not fold in
+ place, it is possible to find the valid chain by selecting the
+ longer one */
+ oob.u.c.FoldMark = oob.u.c.FoldMark1 = cpu_to_le16(FOLD_MARK_IN_PROGRESS);
+ oob.u.c.unused = 0xffffffff;
+ nftl_write_oob(mtd, (nftl->EraseSize * targetEUN) + 2 * 512 + 8,
+ 8, &retlen, (char *)&oob.u);
+ }
+
+ /* OK. We now know the location of every block in the Virtual Unit Chain,
+ and the Erase Unit into which we are supposed to be copying.
+ Go for it.
+ */
+ pr_debug("Folding chain %d into unit %d\n", thisVUC, targetEUN);
+ for (block = 0; block < nftl->EraseSize / 512 ; block++) {
+ unsigned char movebuf[512];
+ int ret;
+
+ /* If it's in the target EUN already, or if it's pending write, do nothing */
+ if (BlockMap[block] == targetEUN ||
+ (pendingblock == (thisVUC * (nftl->EraseSize / 512) + block))) {
+ continue;
+ }
+
+ /* copy only in non free block (free blocks can only
+ happen in case of media errors or deleted blocks) */
+ if (BlockMap[block] == BLOCK_NIL)
+ continue;
+
+ ret = mtd_read(mtd,
+ (nftl->EraseSize * BlockMap[block]) + (block * 512),
+ 512,
+ &retlen,
+ movebuf);
+ if (ret < 0 && !mtd_is_bitflip(ret)) {
+ ret = mtd_read(mtd,
+ (nftl->EraseSize * BlockMap[block]) + (block * 512),
+ 512,
+ &retlen,
+ movebuf);
+ if (ret != -EIO)
+ printk("Error went away on retry.\n");
+ }
+ memset(&oob, 0xff, sizeof(struct nftl_oob));
+ oob.b.Status = oob.b.Status1 = SECTOR_USED;
+
+ nftl_write(nftl->mbd.mtd, (nftl->EraseSize * targetEUN) +
+ (block * 512), 512, &retlen, movebuf, (char *)&oob);
+ }
+
+ /* add the header so that it is now a valid chain */
+ oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum = cpu_to_le16(thisVUC);
+ oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = BLOCK_NIL;
+
+ nftl_write_oob(mtd, (nftl->EraseSize * targetEUN) + 8,
+ 8, &retlen, (char *)&oob.u);
+
+ /* OK. We've moved the whole lot into the new block. Now we have to free the original blocks. */
+
+ /* At this point, we have two different chains for this Virtual Unit, and no way to tell
+ them apart. If we crash now, we get confused. However, both contain the same data, so we
+ shouldn't actually lose data in this case. It's just that when we load up on a medium which
+ has duplicate chains, we need to free one of the chains because it's not necessary any more.
+ */
+ thisEUN = nftl->EUNtable[thisVUC];
+ pr_debug("Want to erase\n");
+
+ /* For each block in the old chain (except the targetEUN of course),
+ free it and make it available for future use */
+ while (thisEUN <= nftl->lastEUN && thisEUN != targetEUN) {
+ unsigned int EUNtmp;
+
+ EUNtmp = nftl->ReplUnitTable[thisEUN];
+
+ if (NFTL_formatblock(nftl, thisEUN) < 0) {
+ /* could not erase : mark block as reserved
+ */
+ nftl->ReplUnitTable[thisEUN] = BLOCK_RESERVED;
+ } else {
+ /* correctly erased : mark it as free */
+ nftl->ReplUnitTable[thisEUN] = BLOCK_FREE;
+ nftl->numfreeEUNs++;
+ }
+ thisEUN = EUNtmp;
+ }
+
+ /* Make this the new start of chain for thisVUC */
+ nftl->ReplUnitTable[targetEUN] = BLOCK_NIL;
+ nftl->EUNtable[thisVUC] = targetEUN;
+
+ return targetEUN;
+}
+
+static u16 NFTL_makefreeblock( struct NFTLrecord *nftl , unsigned pendingblock)
+{
+ /* This is the part that needs some cleverness applied.
+ For now, I'm doing the minimum applicable to actually
+ get the thing to work.
+ Wear-levelling and other clever stuff needs to be implemented
+ and we also need to do some assessment of the results when
+ the system loses power half-way through the routine.
+ */
+ u16 LongestChain = 0;
+ u16 ChainLength = 0, thislen;
+ u16 chain, EUN;
+
+ for (chain = 0; chain < le32_to_cpu(nftl->MediaHdr.FormattedSize) / nftl->EraseSize; chain++) {
+ EUN = nftl->EUNtable[chain];
+ thislen = 0;
+
+ while (EUN <= nftl->lastEUN) {
+ thislen++;
+ //printk("VUC %d reaches len %d with EUN %d\n", chain, thislen, EUN);
+ EUN = nftl->ReplUnitTable[EUN] & 0x7fff;
+ if (thislen > 0xff00) {
+ printk("Endless loop in Virtual Chain %d: Unit %x\n",
+ chain, EUN);
+ }
+ if (thislen > 0xff10) {
+ /* Actually, don't return failure. Just ignore this chain and
+ get on with it. */
+ thislen = 0;
+ break;
+ }
+ }
+
+ if (thislen > ChainLength) {
+ //printk("New longest chain is %d with length %d\n", chain, thislen);
+ ChainLength = thislen;
+ LongestChain = chain;
+ }
+ }
+
+ if (ChainLength < 2) {
+ printk(KERN_WARNING "No Virtual Unit Chains available for folding. "
+ "Failing request\n");
+ return BLOCK_NIL;
+ }
+
+ return NFTL_foldchain (nftl, LongestChain, pendingblock);
+}
+
+/* NFTL_findwriteunit: Return the unit number into which we can write
+ for this block. Make it available if it isn't already
+*/
+static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
+{
+ u16 lastEUN;
+ u16 thisVUC = block / (nftl->EraseSize / 512);
+ struct mtd_info *mtd = nftl->mbd.mtd;
+ unsigned int writeEUN;
+ unsigned long blockofs = (block * 512) & (nftl->EraseSize -1);
+ size_t retlen;
+ int silly, silly2 = 3;
+ struct nftl_oob oob;
+
+ do {
+ /* Scan the media to find a unit in the VUC which has
+ a free space for the block in question.
+ */
+
+ /* This condition catches the 0x[7f]fff cases, as well as
+ being a sanity check for past-end-of-media access
+ */
+ lastEUN = BLOCK_NIL;
+ writeEUN = nftl->EUNtable[thisVUC];
+ silly = MAX_LOOPS;
+ while (writeEUN <= nftl->lastEUN) {
+ struct nftl_bci bci;
+ size_t retlen;
+ unsigned int status;
+
+ lastEUN = writeEUN;
+
+ nftl_read_oob(mtd,
+ (writeEUN * nftl->EraseSize) + blockofs,
+ 8, &retlen, (char *)&bci);
+
+ pr_debug("Status of block %d in EUN %d is %x\n",
+ block , writeEUN, le16_to_cpu(bci.Status));
+
+ status = bci.Status | bci.Status1;
+ switch(status) {
+ case SECTOR_FREE:
+ return writeEUN;
+
+ case SECTOR_DELETED:
+ case SECTOR_USED:
+ case SECTOR_IGNORE:
+ break;
+ default:
+ // Invalid block. Don't use it any more. Must implement.
+ break;
+ }
+
+ if (!silly--) {
+ printk(KERN_WARNING
+ "Infinite loop in Virtual Unit Chain 0x%x\n",
+ thisVUC);
+ return BLOCK_NIL;
+ }
+
+ /* Skip to next block in chain */
+ writeEUN = nftl->ReplUnitTable[writeEUN];
+ }
+
+ /* OK. We didn't find one in the existing chain, or there
+ is no existing chain. */
+
+ /* Try to find an already-free block */
+ writeEUN = NFTL_findfreeblock(nftl, 0);
+
+ if (writeEUN == BLOCK_NIL) {
+ /* That didn't work - there were no free blocks just
+ waiting to be picked up. We're going to have to fold
+ a chain to make room.
+ */
+
+ /* First remember the start of this chain */
+ //u16 startEUN = nftl->EUNtable[thisVUC];
+
+ //printk("Write to VirtualUnitChain %d, calling makefreeblock()\n", thisVUC);
+ writeEUN = NFTL_makefreeblock(nftl, BLOCK_NIL);
+
+ if (writeEUN == BLOCK_NIL) {
+ /* OK, we accept that the above comment is
+ lying - there may have been free blocks
+ last time we called NFTL_findfreeblock(),
+ but they are reserved for when we're
+ desperate. Well, now we're desperate.
+ */
+ pr_debug("Using desperate==1 to find free EUN to accommodate write to VUC %d\n", thisVUC);
+ writeEUN = NFTL_findfreeblock(nftl, 1);
+ }
+ if (writeEUN == BLOCK_NIL) {
+ /* Ouch. This should never happen - we should
+ always be able to make some room somehow.
+ If we get here, we've allocated more storage
+ space than actual media, or our makefreeblock
+ routine is missing something.
+ */
+ printk(KERN_WARNING "Cannot make free space.\n");
+ return BLOCK_NIL;
+ }
+ //printk("Restarting scan\n");
+ continue;
+ }
+
+ /* We've found a free block. Insert it into the chain. */
+
+ if (lastEUN != BLOCK_NIL) {
+ thisVUC |= 0x8000; /* It's a replacement block */
+ } else {
+ /* The first block in a new chain */
+ nftl->EUNtable[thisVUC] = writeEUN;
+ }
+
+ /* set up the actual EUN we're writing into */
+ /* Both in our cache... */
+ nftl->ReplUnitTable[writeEUN] = BLOCK_NIL;
+
+ /* ... and on the flash itself */
+ nftl_read_oob(mtd, writeEUN * nftl->EraseSize + 8, 8,
+ &retlen, (char *)&oob.u);
+
+ oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum = cpu_to_le16(thisVUC);
+
+ nftl_write_oob(mtd, writeEUN * nftl->EraseSize + 8, 8,
+ &retlen, (char *)&oob.u);
+
+ /* we link the new block to the chain only after the
+ block is ready. It avoids the case where the chain
+ could point to a free block */
+ if (lastEUN != BLOCK_NIL) {
+ /* Both in our cache... */
+ nftl->ReplUnitTable[lastEUN] = writeEUN;
+ /* ... and on the flash itself */
+ nftl_read_oob(mtd, (lastEUN * nftl->EraseSize) + 8,
+ 8, &retlen, (char *)&oob.u);
+
+ oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum
+ = cpu_to_le16(writeEUN);
+
+ nftl_write_oob(mtd, (lastEUN * nftl->EraseSize) + 8,
+ 8, &retlen, (char *)&oob.u);
+ }
+
+ return writeEUN;
+
+ } while (silly2--);
+
+ printk(KERN_WARNING "Error folding to make room for Virtual Unit Chain 0x%x\n",
+ thisVUC);
+ return BLOCK_NIL;
+}
+
+static int nftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block,
+ char *buffer)
+{
+ struct NFTLrecord *nftl = (void *)mbd;
+ u16 writeEUN;
+ unsigned long blockofs = (block * 512) & (nftl->EraseSize - 1);
+ size_t retlen;
+ struct nftl_oob oob;
+
+ writeEUN = NFTL_findwriteunit(nftl, block);
+
+ if (writeEUN == BLOCK_NIL) {
+ printk(KERN_WARNING
+ "NFTL_writeblock(): Cannot find block to write to\n");
+ /* If we _still_ haven't got a block to use, we're screwed */
+ return 1;
+ }
+
+ memset(&oob, 0xff, sizeof(struct nftl_oob));
+ oob.b.Status = oob.b.Status1 = SECTOR_USED;
+
+ nftl_write(nftl->mbd.mtd, (writeEUN * nftl->EraseSize) + blockofs,
+ 512, &retlen, (char *)buffer, (char *)&oob);
+ return 0;
+}
+#endif /* CONFIG_NFTL_RW */
+
+static int nftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
+ char *buffer)
+{
+ struct NFTLrecord *nftl = (void *)mbd;
+ struct mtd_info *mtd = nftl->mbd.mtd;
+ u16 lastgoodEUN;
+ u16 thisEUN = nftl->EUNtable[block / (nftl->EraseSize / 512)];
+ unsigned long blockofs = (block * 512) & (nftl->EraseSize - 1);
+ unsigned int status;
+ int silly = MAX_LOOPS;
+ size_t retlen;
+ struct nftl_bci bci;
+
+ lastgoodEUN = BLOCK_NIL;
+
+ if (thisEUN != BLOCK_NIL) {
+ while (thisEUN < nftl->nb_blocks) {
+ if (nftl_read_oob(mtd, (thisEUN * nftl->EraseSize) +
+ blockofs, 8, &retlen,
+ (char *)&bci) < 0)
+ status = SECTOR_IGNORE;
+ else
+ status = bci.Status | bci.Status1;
+
+ switch (status) {
+ case SECTOR_FREE:
+ /* no modification of a sector should follow a free sector */
+ goto the_end;
+ case SECTOR_DELETED:
+ lastgoodEUN = BLOCK_NIL;
+ break;
+ case SECTOR_USED:
+ lastgoodEUN = thisEUN;
+ break;
+ case SECTOR_IGNORE:
+ break;
+ default:
+ printk("Unknown status for block %ld in EUN %d: %x\n",
+ block, thisEUN, status);
+ break;
+ }
+
+ if (!silly--) {
+ printk(KERN_WARNING "Infinite loop in Virtual Unit Chain 0x%lx\n",
+ block / (nftl->EraseSize / 512));
+ return 1;
+ }
+ thisEUN = nftl->ReplUnitTable[thisEUN];
+ }
+ }
+
+ the_end:
+ if (lastgoodEUN == BLOCK_NIL) {
+ /* the requested block is not on the media, return all 0x00 */
+ memset(buffer, 0, 512);
+ } else {
+ loff_t ptr = (lastgoodEUN * nftl->EraseSize) + blockofs;
+ size_t retlen;
+ int res = mtd_read(mtd, ptr, 512, &retlen, buffer);
+
+ if (res < 0 && !mtd_is_bitflip(res))
+ return -EIO;
+ }
+ return 0;
+}
+
+static int nftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
+{
+ struct NFTLrecord *nftl = (void *)dev;
+
+ geo->heads = nftl->heads;
+ geo->sectors = nftl->sectors;
+ geo->cylinders = nftl->cylinders;
+
+ return 0;
+}
+
+/****************************************************************************
+ *
+ * Module stuff
+ *
+ ****************************************************************************/
+
+
+static struct mtd_blktrans_ops nftl_tr = {
+ .name = "nftl",
+ .major = NFTL_MAJOR,
+ .part_bits = NFTL_PARTN_BITS,
+ .blksize = 512,
+ .getgeo = nftl_getgeo,
+ .readsect = nftl_readblock,
+#ifdef CONFIG_NFTL_RW
+ .writesect = nftl_writeblock,
+#endif
+ .add_mtd = nftl_add_mtd,
+ .remove_dev = nftl_remove_dev,
+ .owner = THIS_MODULE,
+};
+
+module_mtd_blktrans(nftl_tr);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>, Fabrice Bellard <fabrice.bellard@netgem.com> et al.");
+MODULE_DESCRIPTION("Support code for NAND Flash Translation Layer, used on M-Systems DiskOnChip 2000 and Millennium");
+MODULE_ALIAS_BLOCKDEV_MAJOR(NFTL_MAJOR);
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
new file mode 100644
index 0000000000..75e86ed3e6
--- /dev/null
+++ b/drivers/mtd/nftlmount.c
@@ -0,0 +1,782 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * NFTL mount code with extensive checks
+ *
+ * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
+ * Copyright © 2000 Netgem S.A.
+ * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
+ */
+
+#include <linux/kernel.h>
+#include <asm/errno.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nftl.h>
+
+#define SECTORSIZE 512
+
+/* find_boot_record: Find the NFTL Media Header and its Spare copy which contains the
+ * various device information of the NFTL partition and Bad Unit Table. Update
+ * the ReplUnitTable[] table according to the Bad Unit Table. ReplUnitTable[]
+ * is used for management of Erase Unit in other routines in nftl.c and nftlmount.c
+ */
+static int find_boot_record(struct NFTLrecord *nftl)
+{
+ struct nftl_uci1 h1;
+ unsigned int block, boot_record_count = 0;
+ size_t retlen;
+ u8 buf[SECTORSIZE];
+ struct NFTLMediaHeader *mh = &nftl->MediaHdr;
+ struct mtd_info *mtd = nftl->mbd.mtd;
+ unsigned int i;
+
+ /* Assume logical EraseSize == physical erasesize for starting the scan.
+ We'll sort it out later if we find a MediaHeader which says otherwise */
+ /* Actually, we won't. The new DiskOnChip driver has already scanned
+ the MediaHeader and adjusted the virtual erasesize it presents in
+ the mtd device accordingly. We could even get rid of
+ nftl->EraseSize if there were any point in doing so. */
+ nftl->EraseSize = nftl->mbd.mtd->erasesize;
+ nftl->nb_blocks = (u32)nftl->mbd.mtd->size / nftl->EraseSize;
+
+ nftl->MediaUnit = BLOCK_NIL;
+ nftl->SpareMediaUnit = BLOCK_NIL;
+
+ /* search for a valid boot record */
+ for (block = 0; block < nftl->nb_blocks; block++) {
+ int ret;
+
+ /* Check for ANAND header first. Then can whinge if it's found but later
+ checks fail */
+ ret = mtd_read(mtd, block * nftl->EraseSize, SECTORSIZE,
+ &retlen, buf);
+ /* We ignore ret in case the ECC of the MediaHeader is invalid
+ (which is apparently acceptable) */
+ if (retlen != SECTORSIZE) {
+ static int warncount = 5;
+
+ if (warncount) {
+ printk(KERN_WARNING "Block read at 0x%x of mtd%d failed: %d\n",
+ block * nftl->EraseSize, nftl->mbd.mtd->index, ret);
+ if (!--warncount)
+ printk(KERN_WARNING "Further failures for this block will not be printed\n");
+ }
+ continue;
+ }
+
+ if (retlen < 6 || memcmp(buf, "ANAND", 6)) {
+ /* ANAND\0 not found. Continue */
+#if 0
+ printk(KERN_DEBUG "ANAND header not found at 0x%x in mtd%d\n",
+ block * nftl->EraseSize, nftl->mbd.mtd->index);
+#endif
+ continue;
+ }
+
+ /* To be safer with BIOS, also use erase mark as discriminant */
+ ret = nftl_read_oob(mtd, block * nftl->EraseSize +
+ SECTORSIZE + 8, 8, &retlen,
+ (char *)&h1);
+ if (ret < 0) {
+ printk(KERN_WARNING "ANAND header found at 0x%x in mtd%d, but OOB data read failed (err %d)\n",
+ block * nftl->EraseSize, nftl->mbd.mtd->index, ret);
+ continue;
+ }
+
+#if 0 /* Some people seem to have devices without ECC or erase marks
+ on the Media Header blocks. There are enough other sanity
+ checks in here that we can probably do without it.
+ */
+ if (le16_to_cpu(h1.EraseMark | h1.EraseMark1) != ERASE_MARK) {
+ printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but erase mark not present (0x%04x,0x%04x instead)\n",
+ block * nftl->EraseSize, nftl->mbd.mtd->index,
+ le16_to_cpu(h1.EraseMark), le16_to_cpu(h1.EraseMark1));
+ continue;
+ }
+
+ /* Finally reread to check ECC */
+ ret = mtd->read(mtd, block * nftl->EraseSize, SECTORSIZE,
+ &retlen, buf);
+ if (ret < 0) {
+ printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but ECC read failed (err %d)\n",
+ block * nftl->EraseSize, nftl->mbd.mtd->index, ret);
+ continue;
+ }
+
+ /* Paranoia. Check the ANAND header is still there after the ECC read */
+ if (memcmp(buf, "ANAND", 6)) {
+ printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but went away on reread!\n",
+ block * nftl->EraseSize, nftl->mbd.mtd->index);
+ printk(KERN_NOTICE "New data are: %6ph\n", buf);
+ continue;
+ }
+#endif
+ /* OK, we like it. */
+
+ if (boot_record_count) {
+ /* We've already processed one. So we just check if
+ this one is the same as the first one we found */
+ if (memcmp(mh, buf, sizeof(struct NFTLMediaHeader))) {
+ printk(KERN_NOTICE "NFTL Media Headers at 0x%x and 0x%x disagree.\n",
+ nftl->MediaUnit * nftl->EraseSize, block * nftl->EraseSize);
+ /* if (debug) Print both side by side */
+ if (boot_record_count < 2) {
+ /* We haven't yet seen two real ones */
+ return -1;
+ }
+ continue;
+ }
+ if (boot_record_count == 1)
+ nftl->SpareMediaUnit = block;
+
+ /* Mark this boot record (NFTL MediaHeader) block as reserved */
+ nftl->ReplUnitTable[block] = BLOCK_RESERVED;
+
+
+ boot_record_count++;
+ continue;
+ }
+
+ /* This is the first we've seen. Copy the media header structure into place */
+ memcpy(mh, buf, sizeof(struct NFTLMediaHeader));
+
+ /* Do some sanity checks on it */
+#if 0
+The new DiskOnChip driver scans the MediaHeader itself, and presents a virtual
+erasesize based on UnitSizeFactor. So the erasesize we read from the mtd
+device is already correct.
+ if (mh->UnitSizeFactor == 0) {
+ printk(KERN_NOTICE "NFTL: UnitSizeFactor 0x00 detected. This violates the spec but we think we know what it means...\n");
+ } else if (mh->UnitSizeFactor < 0xfc) {
+ printk(KERN_NOTICE "Sorry, we don't support UnitSizeFactor 0x%02x\n",
+ mh->UnitSizeFactor);
+ return -1;
+ } else if (mh->UnitSizeFactor != 0xff) {
+ printk(KERN_NOTICE "WARNING: Support for NFTL with UnitSizeFactor 0x%02x is experimental\n",
+ mh->UnitSizeFactor);
+ nftl->EraseSize = nftl->mbd.mtd->erasesize << (0xff - mh->UnitSizeFactor);
+ nftl->nb_blocks = (u32)nftl->mbd.mtd->size / nftl->EraseSize;
+ }
+#endif
+ nftl->nb_boot_blocks = le16_to_cpu(mh->FirstPhysicalEUN);
+ if ((nftl->nb_boot_blocks + 2) >= nftl->nb_blocks) {
+ printk(KERN_NOTICE "NFTL Media Header sanity check failed:\n");
+ printk(KERN_NOTICE "nb_boot_blocks (%d) + 2 > nb_blocks (%d)\n",
+ nftl->nb_boot_blocks, nftl->nb_blocks);
+ return -1;
+ }
+
+ nftl->numvunits = le32_to_cpu(mh->FormattedSize) / nftl->EraseSize;
+ if (nftl->numvunits > (nftl->nb_blocks - nftl->nb_boot_blocks - 2)) {
+ printk(KERN_NOTICE "NFTL Media Header sanity check failed:\n");
+ printk(KERN_NOTICE "numvunits (%d) > nb_blocks (%d) - nb_boot_blocks(%d) - 2\n",
+ nftl->numvunits, nftl->nb_blocks, nftl->nb_boot_blocks);
+ return -1;
+ }
+
+ nftl->mbd.size = nftl->numvunits * (nftl->EraseSize / SECTORSIZE);
+
+ /* If we're not using the last sectors in the device for some reason,
+ reduce nb_blocks accordingly so we forget they're there */
+ nftl->nb_blocks = le16_to_cpu(mh->NumEraseUnits) + le16_to_cpu(mh->FirstPhysicalEUN);
+
+ /* XXX: will be suppressed */
+ nftl->lastEUN = nftl->nb_blocks - 1;
+
+ /* memory alloc */
+ nftl->EUNtable = kmalloc_array(nftl->nb_blocks, sizeof(u16),
+ GFP_KERNEL);
+ if (!nftl->EUNtable)
+ return -ENOMEM;
+
+ nftl->ReplUnitTable = kmalloc_array(nftl->nb_blocks,
+ sizeof(u16),
+ GFP_KERNEL);
+ if (!nftl->ReplUnitTable) {
+ kfree(nftl->EUNtable);
+ return -ENOMEM;
+ }
+
+ /* mark the bios blocks (blocks before NFTL MediaHeader) as reserved */
+ for (i = 0; i < nftl->nb_boot_blocks; i++)
+ nftl->ReplUnitTable[i] = BLOCK_RESERVED;
+ /* mark all remaining blocks as potentially containing data */
+ for (; i < nftl->nb_blocks; i++) {
+ nftl->ReplUnitTable[i] = BLOCK_NOTEXPLORED;
+ }
+
+ /* Mark this boot record (NFTL MediaHeader) block as reserved */
+ nftl->ReplUnitTable[block] = BLOCK_RESERVED;
+
+ /* read the Bad Erase Unit Table and modify ReplUnitTable[] accordingly */
+ for (i = 0; i < nftl->nb_blocks; i++) {
+#if 0
+The new DiskOnChip driver already scanned the bad block table. Just query it.
+ if ((i & (SECTORSIZE - 1)) == 0) {
+ /* read one sector for every SECTORSIZE of blocks */
+ ret = mtd->read(nftl->mbd.mtd,
+ block * nftl->EraseSize + i +
+ SECTORSIZE, SECTORSIZE,
+ &retlen, buf);
+ if (ret < 0) {
+ printk(KERN_NOTICE "Read of bad sector table failed (err %d)\n",
+ ret);
+ kfree(nftl->ReplUnitTable);
+ kfree(nftl->EUNtable);
+ return -1;
+ }
+ }
+ /* mark the Bad Erase Unit as RESERVED in ReplUnitTable */
+ if (buf[i & (SECTORSIZE - 1)] != 0xff)
+ nftl->ReplUnitTable[i] = BLOCK_RESERVED;
+#endif
+ if (mtd_block_isbad(nftl->mbd.mtd,
+ i * nftl->EraseSize))
+ nftl->ReplUnitTable[i] = BLOCK_RESERVED;
+ }
+
+ nftl->MediaUnit = block;
+ boot_record_count++;
+
+ } /* foreach (block) */
+
+ return boot_record_count?0:-1;
+}
+
+static int memcmpb(void *a, int c, int n)
+{
+ int i;
+ for (i = 0; i < n; i++) {
+ if (c != ((unsigned char *)a)[i])
+ return 1;
+ }
+ return 0;
+}
+
+/* check_free_sector: check if a free sector is actually FREE, i.e. All 0xff in data and oob area */
+static int check_free_sectors(struct NFTLrecord *nftl, unsigned int address, int len,
+ int check_oob)
+{
+ struct mtd_info *mtd = nftl->mbd.mtd;
+ size_t retlen;
+ int i, ret;
+ u8 *buf;
+
+ buf = kmalloc(SECTORSIZE + mtd->oobsize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = -1;
+ for (i = 0; i < len; i += SECTORSIZE) {
+ if (mtd_read(mtd, address, SECTORSIZE, &retlen, buf))
+ goto out;
+ if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
+ goto out;
+
+ if (check_oob) {
+ if(nftl_read_oob(mtd, address, mtd->oobsize,
+ &retlen, &buf[SECTORSIZE]) < 0)
+ goto out;
+ if (memcmpb(buf + SECTORSIZE, 0xff, mtd->oobsize) != 0)
+ goto out;
+ }
+ address += SECTORSIZE;
+ }
+
+ ret = 0;
+
+out:
+ kfree(buf);
+ return ret;
+}
+
+/* NFTL_format: format a Erase Unit by erasing ALL Erase Zones in the Erase Unit and
+ * Update NFTL metadata. Each erase operation is checked with check_free_sectors
+ *
+ * Return: 0 when succeed, -1 on error.
+ *
+ * ToDo: 1. Is it necessary to check_free_sector after erasing ??
+ */
+int NFTL_formatblock(struct NFTLrecord *nftl, int block)
+{
+ size_t retlen;
+ unsigned int nb_erases, erase_mark;
+ struct nftl_uci1 uci;
+ struct erase_info *instr = &nftl->instr;
+ struct mtd_info *mtd = nftl->mbd.mtd;
+
+ /* Read the Unit Control Information #1 for Wear-Leveling */
+ if (nftl_read_oob(mtd, block * nftl->EraseSize + SECTORSIZE + 8,
+ 8, &retlen, (char *)&uci) < 0)
+ goto default_uci1;
+
+ erase_mark = le16_to_cpu ((uci.EraseMark | uci.EraseMark1));
+ if (erase_mark != ERASE_MARK) {
+ default_uci1:
+ uci.EraseMark = cpu_to_le16(ERASE_MARK);
+ uci.EraseMark1 = cpu_to_le16(ERASE_MARK);
+ uci.WearInfo = cpu_to_le32(0);
+ }
+
+ memset(instr, 0, sizeof(struct erase_info));
+
+ /* XXX: use async erase interface, XXX: test return code */
+ instr->addr = block * nftl->EraseSize;
+ instr->len = nftl->EraseSize;
+ if (mtd_erase(mtd, instr)) {
+ printk("Error while formatting block %d\n", block);
+ goto fail;
+ }
+
+ /* increase and write Wear-Leveling info */
+ nb_erases = le32_to_cpu(uci.WearInfo);
+ nb_erases++;
+
+ /* wrap (almost impossible with current flash) or free block */
+ if (nb_erases == 0)
+ nb_erases = 1;
+
+ /* check the "freeness" of Erase Unit before updating metadata
+ * FixMe: is this check really necessary ? since we have check the
+ * return code after the erase operation.
+ */
+ if (check_free_sectors(nftl, instr->addr, nftl->EraseSize, 1) != 0)
+ goto fail;
+
+ uci.WearInfo = le32_to_cpu(nb_erases);
+ if (nftl_write_oob(mtd, block * nftl->EraseSize + SECTORSIZE +
+ 8, 8, &retlen, (char *)&uci) < 0)
+ goto fail;
+ return 0;
+fail:
+ /* could not format, update the bad block table (caller is responsible
+ for setting the ReplUnitTable to BLOCK_RESERVED on failure) */
+ mtd_block_markbad(nftl->mbd.mtd, instr->addr);
+ return -1;
+}
+
+/* check_sectors_in_chain: Check that each sector of a Virtual Unit Chain is correct.
+ * Mark as 'IGNORE' each incorrect sector. This check is only done if the chain
+ * was being folded when NFTL was interrupted.
+ *
+ * The check_free_sectors in this function is necessary. There is a possible
+ * situation that after writing the Data area, the Block Control Information is
+ * not updated according (due to power failure or something) which leaves the block
+ * in an inconsistent state. So we have to check if a block is really FREE in this
+ * case. */
+static void check_sectors_in_chain(struct NFTLrecord *nftl, unsigned int first_block)
+{
+ struct mtd_info *mtd = nftl->mbd.mtd;
+ unsigned int block, i, status;
+ struct nftl_bci bci;
+ int sectors_per_block;
+ size_t retlen;
+
+ sectors_per_block = nftl->EraseSize / SECTORSIZE;
+ block = first_block;
+ for (;;) {
+ for (i = 0; i < sectors_per_block; i++) {
+ if (nftl_read_oob(mtd,
+ block * nftl->EraseSize + i * SECTORSIZE,
+ 8, &retlen, (char *)&bci) < 0)
+ status = SECTOR_IGNORE;
+ else
+ status = bci.Status | bci.Status1;
+
+ switch(status) {
+ case SECTOR_FREE:
+ /* verify that the sector is really free. If not, mark
+ as ignore */
+ if (memcmpb(&bci, 0xff, 8) != 0 ||
+ check_free_sectors(nftl, block * nftl->EraseSize + i * SECTORSIZE,
+ SECTORSIZE, 0) != 0) {
+ printk("Incorrect free sector %d in block %d: "
+ "marking it as ignored\n",
+ i, block);
+
+ /* sector not free actually : mark it as SECTOR_IGNORE */
+ bci.Status = SECTOR_IGNORE;
+ bci.Status1 = SECTOR_IGNORE;
+ nftl_write_oob(mtd, block *
+ nftl->EraseSize +
+ i * SECTORSIZE, 8,
+ &retlen, (char *)&bci);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* proceed to next Erase Unit on the chain */
+ block = nftl->ReplUnitTable[block];
+ if (!(block == BLOCK_NIL || block < nftl->nb_blocks))
+ printk("incorrect ReplUnitTable[] : %d\n", block);
+ if (block == BLOCK_NIL || block >= nftl->nb_blocks)
+ break;
+ }
+}
+
+/* calc_chain_length: Walk through a Virtual Unit Chain and estimate chain length */
+static int calc_chain_length(struct NFTLrecord *nftl, unsigned int first_block)
+{
+ unsigned int length = 0, block = first_block;
+
+ for (;;) {
+ length++;
+ /* avoid infinite loops, although this is guaranteed not to
+ happen because of the previous checks */
+ if (length >= nftl->nb_blocks) {
+ printk("nftl: length too long %d !\n", length);
+ break;
+ }
+
+ block = nftl->ReplUnitTable[block];
+ if (!(block == BLOCK_NIL || block < nftl->nb_blocks))
+ printk("incorrect ReplUnitTable[] : %d\n", block);
+ if (block == BLOCK_NIL || block >= nftl->nb_blocks)
+ break;
+ }
+ return length;
+}
+
+/* format_chain: Format an invalid Virtual Unit chain. It frees all the Erase Units in a
+ * Virtual Unit Chain, i.e. all the units are disconnected.
+ *
+ * It is not strictly correct to begin from the first block of the chain because
+ * if we stop the code, we may see again a valid chain if there was a first_block
+ * flag in a block inside it. But is it really a problem ?
+ *
+ * FixMe: Figure out what the last statement means. What if power failure when we are
+ * in the for (;;) loop formatting blocks ??
+ */
+static void format_chain(struct NFTLrecord *nftl, unsigned int first_block)
+{
+ unsigned int block = first_block, block1;
+
+ printk("Formatting chain at block %d\n", first_block);
+
+ for (;;) {
+ block1 = nftl->ReplUnitTable[block];
+
+ printk("Formatting block %d\n", block);
+ if (NFTL_formatblock(nftl, block) < 0) {
+ /* cannot format !!!! Mark it as Bad Unit */
+ nftl->ReplUnitTable[block] = BLOCK_RESERVED;
+ } else {
+ nftl->ReplUnitTable[block] = BLOCK_FREE;
+ }
+
+ /* goto next block on the chain */
+ block = block1;
+
+ if (!(block == BLOCK_NIL || block < nftl->nb_blocks))
+ printk("incorrect ReplUnitTable[] : %d\n", block);
+ if (block == BLOCK_NIL || block >= nftl->nb_blocks)
+ break;
+ }
+}
+
+/* check_and_mark_free_block: Verify that a block is free in the NFTL sense (valid erase mark) or
+ * totally free (only 0xff).
+ *
+ * Definition: Free Erase Unit -- A properly erased/formatted Free Erase Unit should have meet the
+ * following criteria:
+ * 1. */
+static int check_and_mark_free_block(struct NFTLrecord *nftl, int block)
+{
+ struct mtd_info *mtd = nftl->mbd.mtd;
+ struct nftl_uci1 h1;
+ unsigned int erase_mark;
+ size_t retlen;
+
+ /* check erase mark. */
+ if (nftl_read_oob(mtd, block * nftl->EraseSize + SECTORSIZE + 8, 8,
+ &retlen, (char *)&h1) < 0)
+ return -1;
+
+ erase_mark = le16_to_cpu ((h1.EraseMark | h1.EraseMark1));
+ if (erase_mark != ERASE_MARK) {
+ /* if no erase mark, the block must be totally free. This is
+ possible in two cases : empty filesystem or interrupted erase (very unlikely) */
+ if (check_free_sectors (nftl, block * nftl->EraseSize, nftl->EraseSize, 1) != 0)
+ return -1;
+
+ /* free block : write erase mark */
+ h1.EraseMark = cpu_to_le16(ERASE_MARK);
+ h1.EraseMark1 = cpu_to_le16(ERASE_MARK);
+ h1.WearInfo = cpu_to_le32(0);
+ if (nftl_write_oob(mtd,
+ block * nftl->EraseSize + SECTORSIZE + 8, 8,
+ &retlen, (char *)&h1) < 0)
+ return -1;
+ } else {
+#if 0
+ /* if erase mark present, need to skip it when doing check */
+ for (i = 0; i < nftl->EraseSize; i += SECTORSIZE) {
+ /* check free sector */
+ if (check_free_sectors (nftl, block * nftl->EraseSize + i,
+ SECTORSIZE, 0) != 0)
+ return -1;
+
+ if (nftl_read_oob(mtd, block * nftl->EraseSize + i,
+ 16, &retlen, buf) < 0)
+ return -1;
+ if (i == SECTORSIZE) {
+ /* skip erase mark */
+ if (memcmpb(buf, 0xff, 8))
+ return -1;
+ } else {
+ if (memcmpb(buf, 0xff, 16))
+ return -1;
+ }
+ }
+#endif
+ }
+
+ return 0;
+}
+
+/* get_fold_mark: Read fold mark from Unit Control Information #2, we use FOLD_MARK_IN_PROGRESS
+ * to indicate that we are in the progression of a Virtual Unit Chain folding. If the UCI #2
+ * is FOLD_MARK_IN_PROGRESS when mounting the NFTL, the (previous) folding process is interrupted
+ * for some reason. A clean up/check of the VUC is necessary in this case.
+ *
+ * WARNING: return 0 if read error
+ */
+static int get_fold_mark(struct NFTLrecord *nftl, unsigned int block)
+{
+ struct mtd_info *mtd = nftl->mbd.mtd;
+ struct nftl_uci2 uci;
+ size_t retlen;
+
+ if (nftl_read_oob(mtd, block * nftl->EraseSize + 2 * SECTORSIZE + 8,
+ 8, &retlen, (char *)&uci) < 0)
+ return 0;
+
+ return le16_to_cpu((uci.FoldMark | uci.FoldMark1));
+}
+
+int NFTL_mount(struct NFTLrecord *s)
+{
+ int i;
+ unsigned int first_logical_block, logical_block, rep_block, erase_mark;
+ unsigned int block, first_block, is_first_block;
+ int chain_length, do_format_chain;
+ struct nftl_uci0 h0;
+ struct nftl_uci1 h1;
+ struct mtd_info *mtd = s->mbd.mtd;
+ size_t retlen;
+
+ /* search for NFTL MediaHeader and Spare NFTL Media Header */
+ if (find_boot_record(s) < 0) {
+ printk("Could not find valid boot record\n");
+ return -1;
+ }
+
+ /* init the logical to physical table */
+ for (i = 0; i < s->nb_blocks; i++) {
+ s->EUNtable[i] = BLOCK_NIL;
+ }
+
+ /* first pass : explore each block chain */
+ first_logical_block = 0;
+ for (first_block = 0; first_block < s->nb_blocks; first_block++) {
+ /* if the block was not already explored, we can look at it */
+ if (s->ReplUnitTable[first_block] == BLOCK_NOTEXPLORED) {
+ block = first_block;
+ chain_length = 0;
+ do_format_chain = 0;
+
+ for (;;) {
+ /* read the block header. If error, we format the chain */
+ if (nftl_read_oob(mtd,
+ block * s->EraseSize + 8, 8,
+ &retlen, (char *)&h0) < 0 ||
+ nftl_read_oob(mtd,
+ block * s->EraseSize +
+ SECTORSIZE + 8, 8,
+ &retlen, (char *)&h1) < 0) {
+ s->ReplUnitTable[block] = BLOCK_NIL;
+ do_format_chain = 1;
+ break;
+ }
+
+ logical_block = le16_to_cpu ((h0.VirtUnitNum | h0.SpareVirtUnitNum));
+ rep_block = le16_to_cpu ((h0.ReplUnitNum | h0.SpareReplUnitNum));
+ erase_mark = le16_to_cpu ((h1.EraseMark | h1.EraseMark1));
+
+ is_first_block = !(logical_block >> 15);
+ logical_block = logical_block & 0x7fff;
+
+ /* invalid/free block test */
+ if (erase_mark != ERASE_MARK || logical_block >= s->nb_blocks) {
+ if (chain_length == 0) {
+ /* if not currently in a chain, we can handle it safely */
+ if (check_and_mark_free_block(s, block) < 0) {
+ /* not really free: format it */
+ printk("Formatting block %d\n", block);
+ if (NFTL_formatblock(s, block) < 0) {
+ /* could not format: reserve the block */
+ s->ReplUnitTable[block] = BLOCK_RESERVED;
+ } else {
+ s->ReplUnitTable[block] = BLOCK_FREE;
+ }
+ } else {
+ /* free block: mark it */
+ s->ReplUnitTable[block] = BLOCK_FREE;
+ }
+ /* directly examine the next block. */
+ goto examine_ReplUnitTable;
+ } else {
+ /* the block was in a chain : this is bad. We
+ must format all the chain */
+ printk("Block %d: free but referenced in chain %d\n",
+ block, first_block);
+ s->ReplUnitTable[block] = BLOCK_NIL;
+ do_format_chain = 1;
+ break;
+ }
+ }
+
+ /* we accept only first blocks here */
+ if (chain_length == 0) {
+ /* this block is not the first block in chain :
+ ignore it, it will be included in a chain
+ later, or marked as not explored */
+ if (!is_first_block)
+ goto examine_ReplUnitTable;
+ first_logical_block = logical_block;
+ } else {
+ if (logical_block != first_logical_block) {
+ printk("Block %d: incorrect logical block: %d expected: %d\n",
+ block, logical_block, first_logical_block);
+ /* the chain is incorrect : we must format it,
+ but we need to read it completely */
+ do_format_chain = 1;
+ }
+ if (is_first_block) {
+ /* we accept that a block is marked as first
+ block while being last block in a chain
+ only if the chain is being folded */
+ if (get_fold_mark(s, block) != FOLD_MARK_IN_PROGRESS ||
+ rep_block != 0xffff) {
+ printk("Block %d: incorrectly marked as first block in chain\n",
+ block);
+ /* the chain is incorrect : we must format it,
+ but we need to read it completely */
+ do_format_chain = 1;
+ } else {
+ printk("Block %d: folding in progress - ignoring first block flag\n",
+ block);
+ }
+ }
+ }
+ chain_length++;
+ if (rep_block == 0xffff) {
+ /* no more blocks after */
+ s->ReplUnitTable[block] = BLOCK_NIL;
+ break;
+ } else if (rep_block >= s->nb_blocks) {
+ printk("Block %d: referencing invalid block %d\n",
+ block, rep_block);
+ do_format_chain = 1;
+ s->ReplUnitTable[block] = BLOCK_NIL;
+ break;
+ } else if (s->ReplUnitTable[rep_block] != BLOCK_NOTEXPLORED) {
+ /* same problem as previous 'is_first_block' test:
+ we accept that the last block of a chain has
+ the first_block flag set if folding is in
+ progress. We handle here the case where the
+ last block appeared first */
+ if (s->ReplUnitTable[rep_block] == BLOCK_NIL &&
+ s->EUNtable[first_logical_block] == rep_block &&
+ get_fold_mark(s, first_block) == FOLD_MARK_IN_PROGRESS) {
+ /* EUNtable[] will be set after */
+ printk("Block %d: folding in progress - ignoring first block flag\n",
+ rep_block);
+ s->ReplUnitTable[block] = rep_block;
+ s->EUNtable[first_logical_block] = BLOCK_NIL;
+ } else {
+ printk("Block %d: referencing block %d already in another chain\n",
+ block, rep_block);
+ /* XXX: should handle correctly fold in progress chains */
+ do_format_chain = 1;
+ s->ReplUnitTable[block] = BLOCK_NIL;
+ }
+ break;
+ } else {
+ /* this is OK */
+ s->ReplUnitTable[block] = rep_block;
+ block = rep_block;
+ }
+ }
+
+ /* the chain was completely explored. Now we can decide
+ what to do with it */
+ if (do_format_chain) {
+ /* invalid chain : format it */
+ format_chain(s, first_block);
+ } else {
+ unsigned int first_block1, chain_to_format, chain_length1;
+ int fold_mark;
+
+ /* valid chain : get foldmark */
+ fold_mark = get_fold_mark(s, first_block);
+ if (fold_mark == 0) {
+ /* cannot get foldmark : format the chain */
+ printk("Could read foldmark at block %d\n", first_block);
+ format_chain(s, first_block);
+ } else {
+ if (fold_mark == FOLD_MARK_IN_PROGRESS)
+ check_sectors_in_chain(s, first_block);
+
+ /* now handle the case where we find two chains at the
+ same virtual address : we select the longer one,
+ because the shorter one is the one which was being
+ folded if the folding was not done in place */
+ first_block1 = s->EUNtable[first_logical_block];
+ if (first_block1 != BLOCK_NIL) {
+ /* XXX: what to do if same length ? */
+ chain_length1 = calc_chain_length(s, first_block1);
+ printk("Two chains at blocks %d (len=%d) and %d (len=%d)\n",
+ first_block1, chain_length1, first_block, chain_length);
+
+ if (chain_length >= chain_length1) {
+ chain_to_format = first_block1;
+ s->EUNtable[first_logical_block] = first_block;
+ } else {
+ chain_to_format = first_block;
+ }
+ format_chain(s, chain_to_format);
+ } else {
+ s->EUNtable[first_logical_block] = first_block;
+ }
+ }
+ }
+ }
+ examine_ReplUnitTable:;
+ }
+
+ /* second pass to format unreferenced blocks and init free block count */
+ s->numfreeEUNs = 0;
+ s->LastFreeEUN = le16_to_cpu(s->MediaHdr.FirstPhysicalEUN);
+
+ for (block = 0; block < s->nb_blocks; block++) {
+ if (s->ReplUnitTable[block] == BLOCK_NOTEXPLORED) {
+ printk("Unreferenced block %d, formatting it\n", block);
+ if (NFTL_formatblock(s, block) < 0)
+ s->ReplUnitTable[block] = BLOCK_RESERVED;
+ else
+ s->ReplUnitTable[block] = BLOCK_FREE;
+ }
+ if (s->ReplUnitTable[block] == BLOCK_FREE) {
+ s->numfreeEUNs++;
+ s->LastFreeEUN = block;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig
new file mode 100644
index 0000000000..60738edcd5
--- /dev/null
+++ b/drivers/mtd/parsers/Kconfig
@@ -0,0 +1,222 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config MTD_AR7_PARTS
+ tristate "TI AR7 partitioning parser"
+ help
+ TI AR7 partitioning parser support
+
+config MTD_BCM47XX_PARTS
+ tristate "BCM47XX partitioning parser"
+ depends on BCM47XX || ARCH_BCM_5301X
+ help
+ This provides partitions parser for devices based on BCM47xx
+ boards.
+
+config MTD_BCM63XX_PARTS
+ bool "BCM63XX CFE partitioning parser"
+ depends on BCM63XX || BMIPS_GENERIC || COMPILE_TEST
+ select CRC32
+ select MTD_PARSER_IMAGETAG
+ help
+ This provides partition parsing for BCM63xx devices with CFE
+ bootloaders.
+
+config MTD_BRCM_U_BOOT
+ tristate "Broadcom's U-Boot partition parser"
+ depends on ARCH_BCMBCA || COMPILE_TEST
+ help
+ Broadcom uses a custom way of storing U-Boot environment variables.
+ They are placed inside U-Boot partition itself at unspecified offset.
+ It's possible to locate them by looking for a custom header with a
+ magic value. This driver does that and creates subpartitions for
+ each found environment variables block.
+
+config MTD_CMDLINE_PARTS
+ tristate "Command line partition table parsing"
+ depends on MTD
+ help
+ Allow generic configuration of the MTD partition tables via the kernel
+ command line. Multiple flash resources are supported for hardware where
+ different kinds of flash memory are available.
+
+ You will still need the parsing functions to be called by the driver
+ for your particular device. It won't happen automatically. The
+ SA1100 map driver (CONFIG_MTD_SA1100) has an option for this, for
+ example.
+
+ The format for the command line is as follows:
+
+ mtdparts=<mtddef>[;<mtddef]
+ <mtddef> := <mtd-id>:<partdef>[,<partdef>]
+ <partdef> := <size>[@offset][<name>][ro]
+ <mtd-id> := unique id used in mapping driver/device
+ <size> := standard linux memsize OR "-" to denote all
+ remaining space
+ <name> := (NAME)
+
+ Due to the way Linux handles the command line, no spaces are
+ allowed in the partition definition, including mtd id's and partition
+ names.
+
+ Examples:
+
+ 1 flash resource (mtd-id "sa1100"), with 1 single writable partition:
+ mtdparts=sa1100:-
+
+ Same flash, but 2 named partitions, the first one being read-only:
+ mtdparts=sa1100:256k(ARMboot)ro,-(root)
+
+ If unsure, say 'N'.
+
+config MTD_OF_PARTS
+ tristate "OpenFirmware (device tree) partitioning parser"
+ default y
+ depends on OF
+ help
+ This provides a open firmware device tree partition parser
+ which derives the partition map from the children of the
+ flash memory node, as described in
+ Documentation/devicetree/bindings/mtd/mtd.yaml.
+
+config MTD_OF_PARTS_BCM4908
+ bool "BCM4908 partitioning support"
+ depends on MTD_OF_PARTS && (ARCH_BCMBCA || COMPILE_TEST)
+ default ARCH_BCMBCA
+ help
+ This provides partitions parser for BCM4908 family devices
+ that can have multiple "firmware" partitions. It takes care of
+ finding currently used one and backup ones.
+
+config MTD_OF_PARTS_LINKSYS_NS
+ bool "Linksys Northstar partitioning support"
+ depends on MTD_OF_PARTS && (ARCH_BCM_5301X || ARCH_BCMBCA || COMPILE_TEST)
+ default ARCH_BCM_5301X
+ help
+ This provides partitions parser for Linksys devices based on Broadcom
+ Northstar architecture. Linksys commonly uses fixed flash layout with
+ two "firmware" partitions. Currently used firmware has to be detected
+ using CFE environment variable.
+
+config MTD_PARSER_IMAGETAG
+ tristate "Parser for BCM963XX Image Tag format partitions"
+ depends on BCM63XX || BMIPS_GENERIC || COMPILE_TEST
+ select CRC32
+ help
+ Image Tag is the firmware header used by broadcom on their xDSL line
+ of devices. It is used to describe the offsets and lengths of kernel
+ and rootfs partitions.
+ This driver adds support for parsing a partition with an Image Tag
+ header and creates up to two partitions, kernel and rootfs.
+
+config MTD_AFS_PARTS
+ tristate "ARM Firmware Suite partition parsing"
+ depends on (ARM || ARM64)
+ help
+ The ARM Firmware Suite allows the user to divide flash devices into
+ multiple 'images'. Each such image has a header containing its name
+ and offset/size etc.
+
+ If you need code which can detect and parse these tables, and
+ register MTD 'partitions' corresponding to each image detected,
+ enable this option.
+
+ You will still need the parsing functions to be called by the driver
+ for your particular device. It won't happen automatically. The
+ 'physmap' map driver (CONFIG_MTD_PHYSMAP) does this, for example.
+
+config MTD_PARSER_TPLINK_SAFELOADER
+ tristate "TP-Link Safeloader partitions parser"
+ depends on MTD && (ARCH_BCM_5301X || ATH79 || SOC_MT7620 || SOC_MT7621 || COMPILE_TEST)
+ help
+ TP-Link home routers use flash partitions to store various data. Info
+ about flash space layout is stored in a partitions table using a
+ custom ASCII-based format.
+
+ That format was first found in devices with SafeLoader bootloader and
+ was named after it. Later it was adapted to CFE and U-Boot
+ bootloaders.
+
+ This driver reads partitions table, parses it and creates MTD
+ partitions.
+
+config MTD_PARSER_TRX
+ tristate "Parser for TRX format partitions"
+ depends on MTD && (BCM47XX || ARCH_BCM_5301X || ARCH_MEDIATEK || RALINK || COMPILE_TEST)
+ help
+ TRX is a firmware format used by Broadcom on their devices. It
+ may contain up to 3/4 partitions (depending on the version).
+ This driver will parse TRX header and report at least two partitions:
+ kernel and rootfs.
+
+config MTD_SHARPSL_PARTS
+ tristate "Sharp SL Series NAND flash partition parser"
+ depends on MTD_NAND_SHARPSL || COMPILE_TEST
+ help
+ This provides the read-only FTL logic necessary to read the partition
+ table from the NAND flash of Sharp SL Series (Zaurus) and the MTD
+ partition parser using this code.
+
+config MTD_REDBOOT_PARTS
+ tristate "RedBoot partition table parsing"
+ help
+ RedBoot is a ROM monitor and bootloader which deals with multiple
+ 'images' in flash devices by putting a table one of the erase
+ blocks on the device, similar to a partition table, which gives
+ the offsets, lengths and names of all the images stored in the
+ flash.
+
+ If you need code which can detect and parse this table, and register
+ MTD 'partitions' corresponding to each image in the table, enable
+ this option.
+
+ You will still need the parsing functions to be called by the driver
+ for your particular device. It won't happen automatically. The
+ SA1100 map driver (CONFIG_MTD_SA1100) has an option for this, for
+ example.
+
+if MTD_REDBOOT_PARTS
+
+config MTD_REDBOOT_DIRECTORY_BLOCK
+ int "Location of RedBoot partition table"
+ default "-1"
+ help
+ This option is the Linux counterpart to the
+ CYGNUM_REDBOOT_FIS_DIRECTORY_BLOCK RedBoot compile time
+ option.
+
+ The option specifies which Flash sectors holds the RedBoot
+ partition table. A zero or positive value gives an absolute
+ erase block number. A negative value specifies a number of
+ sectors before the end of the device.
+
+ For example "2" means block number 2, "-1" means the last
+ block and "-2" means the penultimate block.
+
+config MTD_REDBOOT_PARTS_UNALLOCATED
+ bool "Include unallocated flash regions"
+ help
+ If you need to register each unallocated flash region as a MTD
+ 'partition', enable this option.
+
+config MTD_REDBOOT_PARTS_READONLY
+ bool "Force read-only for RedBoot system images"
+ help
+ If you need to force read-only for 'RedBoot', 'RedBoot Config' and
+ 'FIS directory' images, enable this option.
+
+endif # MTD_REDBOOT_PARTS
+
+config MTD_QCOMSMEM_PARTS
+ tristate "Qualcomm SMEM flash partition parser"
+ depends on QCOM_SMEM
+ help
+ This provides support for parsing partitions from Shared Memory (SMEM)
+ for NAND and SPI flash on Qualcomm platforms.
+
+config MTD_SERCOMM_PARTS
+ tristate "Sercomm partition table parser"
+ depends on MTD && RALINK
+ help
+ This provides partitions table parser for devices with Sercomm
+ partition map. This partition table contains real partition
+ offsets, which may differ from device to device depending on the
+ number and location of bad blocks on NAND.
diff --git a/drivers/mtd/parsers/Makefile b/drivers/mtd/parsers/Makefile
new file mode 100644
index 0000000000..0e70b621a1
--- /dev/null
+++ b/drivers/mtd/parsers/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
+obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o
+obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
+obj-$(CONFIG_MTD_BRCM_U_BOOT) += brcm_u-boot.o
+obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
+obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
+ofpart-y += ofpart_core.o
+ofpart-$(CONFIG_MTD_OF_PARTS_BCM4908) += ofpart_bcm4908.o
+ofpart-$(CONFIG_MTD_OF_PARTS_LINKSYS_NS)+= ofpart_linksys_ns.o
+obj-$(CONFIG_MTD_PARSER_IMAGETAG) += parser_imagetag.o
+obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
+obj-$(CONFIG_MTD_PARSER_TPLINK_SAFELOADER) += tplink_safeloader.o
+obj-$(CONFIG_MTD_PARSER_TRX) += parser_trx.o
+obj-$(CONFIG_MTD_SERCOMM_PARTS) += scpart.o
+obj-$(CONFIG_MTD_SHARPSL_PARTS) += sharpslpart.o
+obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
+obj-$(CONFIG_MTD_QCOMSMEM_PARTS) += qcomsmempart.o
diff --git a/drivers/mtd/parsers/afs.c b/drivers/mtd/parsers/afs.c
new file mode 100644
index 0000000000..26116694c8
--- /dev/null
+++ b/drivers/mtd/parsers/afs.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*======================================================================
+
+ drivers/mtd/afs.c: ARM Flash Layout/Partitioning
+
+ Copyright © 2000 ARM Limited
+ Copyright (C) 2019 Linus Walleij
+
+
+ This is access code for flashes using ARM's flash partitioning
+ standards.
+
+======================================================================*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/init.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#define AFSV1_FOOTER_MAGIC 0xA0FFFF9F
+#define AFSV2_FOOTER_MAGIC1 0x464C5348 /* "FLSH" */
+#define AFSV2_FOOTER_MAGIC2 0x464F4F54 /* "FOOT" */
+
+struct footer_v1 {
+ u32 image_info_base; /* Address of first word of ImageFooter */
+ u32 image_start; /* Start of area reserved by this footer */
+ u32 signature; /* 'Magic' number proves it's a footer */
+ u32 type; /* Area type: ARM Image, SIB, customer */
+ u32 checksum; /* Just this structure */
+};
+
+struct image_info_v1 {
+ u32 bootFlags; /* Boot flags, compression etc. */
+ u32 imageNumber; /* Unique number, selects for boot etc. */
+ u32 loadAddress; /* Address program should be loaded to */
+ u32 length; /* Actual size of image */
+ u32 address; /* Image is executed from here */
+ char name[16]; /* Null terminated */
+ u32 headerBase; /* Flash Address of any stripped header */
+ u32 header_length; /* Length of header in memory */
+ u32 headerType; /* AIF, RLF, s-record etc. */
+ u32 checksum; /* Image checksum (inc. this struct) */
+};
+
+static u32 word_sum(void *words, int num)
+{
+ u32 *p = words;
+ u32 sum = 0;
+
+ while (num--)
+ sum += *p++;
+
+ return sum;
+}
+
+static u32 word_sum_v2(u32 *p, u32 num)
+{
+ u32 sum = 0;
+ int i;
+
+ for (i = 0; i < num; i++) {
+ u32 val;
+
+ val = p[i];
+ if (val > ~sum)
+ sum++;
+ sum += val;
+ }
+ return ~sum;
+}
+
+static bool afs_is_v1(struct mtd_info *mtd, u_int off)
+{
+ /* The magic is 12 bytes from the end of the erase block */
+ u_int ptr = off + mtd->erasesize - 12;
+ u32 magic;
+ size_t sz;
+ int ret;
+
+ ret = mtd_read(mtd, ptr, 4, &sz, (u_char *)&magic);
+ if (ret < 0) {
+ printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
+ ptr, ret);
+ return false;
+ }
+ if (ret >= 0 && sz != 4)
+ return false;
+
+ return (magic == AFSV1_FOOTER_MAGIC);
+}
+
+static bool afs_is_v2(struct mtd_info *mtd, u_int off)
+{
+ /* The magic is the 8 last bytes of the erase block */
+ u_int ptr = off + mtd->erasesize - 8;
+ u32 foot[2];
+ size_t sz;
+ int ret;
+
+ ret = mtd_read(mtd, ptr, 8, &sz, (u_char *)foot);
+ if (ret < 0) {
+ printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
+ ptr, ret);
+ return false;
+ }
+ if (ret >= 0 && sz != 8)
+ return false;
+
+ return (foot[0] == AFSV2_FOOTER_MAGIC1 &&
+ foot[1] == AFSV2_FOOTER_MAGIC2);
+}
+
+static int afs_parse_v1_partition(struct mtd_info *mtd,
+ u_int off, struct mtd_partition *part)
+{
+ struct footer_v1 fs;
+ struct image_info_v1 iis;
+ u_int mask;
+ /*
+ * Static checks cannot see that we bail out if we have an error
+ * reading the footer.
+ */
+ u_int iis_ptr;
+ u_int img_ptr;
+ u_int ptr;
+ size_t sz;
+ int ret;
+ int i;
+
+ /*
+ * This is the address mask; we use this to mask off out of
+ * range address bits.
+ */
+ mask = mtd->size - 1;
+
+ ptr = off + mtd->erasesize - sizeof(fs);
+ ret = mtd_read(mtd, ptr, sizeof(fs), &sz, (u_char *)&fs);
+ if (ret >= 0 && sz != sizeof(fs))
+ ret = -EINVAL;
+ if (ret < 0) {
+ printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
+ ptr, ret);
+ return ret;
+ }
+ /*
+ * Check the checksum.
+ */
+ if (word_sum(&fs, sizeof(fs) / sizeof(u32)) != 0xffffffff)
+ return -EINVAL;
+
+ /*
+ * Hide the SIB (System Information Block)
+ */
+ if (fs.type == 2)
+ return 0;
+
+ iis_ptr = fs.image_info_base & mask;
+ img_ptr = fs.image_start & mask;
+
+ /*
+ * Check the image info base. This can not
+ * be located after the footer structure.
+ */
+ if (iis_ptr >= ptr)
+ return 0;
+
+ /*
+ * Check the start of this image. The image
+ * data can not be located after this block.
+ */
+ if (img_ptr > off)
+ return 0;
+
+ /* Read the image info block */
+ memset(&iis, 0, sizeof(iis));
+ ret = mtd_read(mtd, iis_ptr, sizeof(iis), &sz, (u_char *)&iis);
+ if (ret < 0) {
+ printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
+ iis_ptr, ret);
+ return -EINVAL;
+ }
+
+ if (sz != sizeof(iis))
+ return -EINVAL;
+
+ /*
+ * Validate the name - it must be NUL terminated.
+ */
+ for (i = 0; i < sizeof(iis.name); i++)
+ if (iis.name[i] == '\0')
+ break;
+ if (i > sizeof(iis.name))
+ return -EINVAL;
+
+ part->name = kstrdup(iis.name, GFP_KERNEL);
+ if (!part->name)
+ return -ENOMEM;
+
+ part->size = (iis.length + mtd->erasesize - 1) & ~(mtd->erasesize - 1);
+ part->offset = img_ptr;
+ part->mask_flags = 0;
+
+ printk(" mtd: at 0x%08x, %5lluKiB, %8u, %s\n",
+ img_ptr, part->size / 1024,
+ iis.imageNumber, part->name);
+
+ return 0;
+}
+
+static int afs_parse_v2_partition(struct mtd_info *mtd,
+ u_int off, struct mtd_partition *part)
+{
+ u_int ptr;
+ u32 footer[12];
+ u32 imginfo[36];
+ char *name;
+ u32 version;
+ u32 entrypoint;
+ u32 attributes;
+ u32 region_count;
+ u32 block_start;
+ u32 block_end;
+ u32 crc;
+ size_t sz;
+ int ret;
+ int i;
+ int pad = 0;
+
+ pr_debug("Parsing v2 partition @%08x-%08x\n",
+ off, off + mtd->erasesize);
+
+ /* First read the footer */
+ ptr = off + mtd->erasesize - sizeof(footer);
+ ret = mtd_read(mtd, ptr, sizeof(footer), &sz, (u_char *)footer);
+ if ((ret < 0) || (ret >= 0 && sz != sizeof(footer))) {
+ pr_err("AFS: mtd read failed at 0x%x: %d\n",
+ ptr, ret);
+ return -EIO;
+ }
+ name = (char *) &footer[0];
+ version = footer[9];
+ ptr = off + mtd->erasesize - sizeof(footer) - footer[8];
+
+ pr_debug("found image \"%s\", version %08x, info @%08x\n",
+ name, version, ptr);
+
+ /* Then read the image information */
+ ret = mtd_read(mtd, ptr, sizeof(imginfo), &sz, (u_char *)imginfo);
+ if ((ret < 0) || (ret >= 0 && sz != sizeof(imginfo))) {
+ pr_err("AFS: mtd read failed at 0x%x: %d\n",
+ ptr, ret);
+ return -EIO;
+ }
+
+ /* 32bit platforms have 4 bytes padding */
+ crc = word_sum_v2(&imginfo[1], 34);
+ if (!crc) {
+ pr_debug("Padding 1 word (4 bytes)\n");
+ pad = 1;
+ } else {
+ /* 64bit platforms have 8 bytes padding */
+ crc = word_sum_v2(&imginfo[2], 34);
+ if (!crc) {
+ pr_debug("Padding 2 words (8 bytes)\n");
+ pad = 2;
+ }
+ }
+ if (crc) {
+ pr_err("AFS: bad checksum on v2 image info: %08x\n", crc);
+ return -EINVAL;
+ }
+ entrypoint = imginfo[pad];
+ attributes = imginfo[pad+1];
+ region_count = imginfo[pad+2];
+ block_start = imginfo[20];
+ block_end = imginfo[21];
+
+ pr_debug("image entry=%08x, attr=%08x, regions=%08x, "
+ "bs=%08x, be=%08x\n",
+ entrypoint, attributes, region_count,
+ block_start, block_end);
+
+ for (i = 0; i < region_count; i++) {
+ u32 region_load_addr = imginfo[pad + 3 + i*4];
+ u32 region_size = imginfo[pad + 4 + i*4];
+ u32 region_offset = imginfo[pad + 5 + i*4];
+ u32 region_start;
+ u32 region_end;
+
+ pr_debug(" region %d: address: %08x, size: %08x, "
+ "offset: %08x\n",
+ i,
+ region_load_addr,
+ region_size,
+ region_offset);
+
+ region_start = off + region_offset;
+ region_end = region_start + region_size;
+ /* Align partition to end of erase block */
+ region_end += (mtd->erasesize - 1);
+ region_end &= ~(mtd->erasesize -1);
+ pr_debug(" partition start = %08x, partition end = %08x\n",
+ region_start, region_end);
+
+ /* Create one partition per region */
+ part->name = kstrdup(name, GFP_KERNEL);
+ if (!part->name)
+ return -ENOMEM;
+ part->offset = region_start;
+ part->size = region_end - region_start;
+ part->mask_flags = 0;
+ }
+
+ return 0;
+}
+
+static int parse_afs_partitions(struct mtd_info *mtd,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct mtd_partition *parts;
+ u_int off, sz;
+ int ret = 0;
+ int i;
+
+ /* Count the partitions by looping over all erase blocks */
+ for (i = off = sz = 0; off < mtd->size; off += mtd->erasesize) {
+ if (afs_is_v1(mtd, off)) {
+ sz += sizeof(struct mtd_partition);
+ i += 1;
+ }
+ if (afs_is_v2(mtd, off)) {
+ sz += sizeof(struct mtd_partition);
+ i += 1;
+ }
+ }
+
+ if (!i)
+ return 0;
+
+ parts = kzalloc(sz, GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ /*
+ * Identify the partitions
+ */
+ for (i = off = 0; off < mtd->size; off += mtd->erasesize) {
+ if (afs_is_v1(mtd, off)) {
+ ret = afs_parse_v1_partition(mtd, off, &parts[i]);
+ if (ret)
+ goto out_free_parts;
+ i++;
+ }
+ if (afs_is_v2(mtd, off)) {
+ ret = afs_parse_v2_partition(mtd, off, &parts[i]);
+ if (ret)
+ goto out_free_parts;
+ i++;
+ }
+ }
+
+ *pparts = parts;
+ return i;
+
+out_free_parts:
+ while (--i >= 0)
+ kfree(parts[i].name);
+ kfree(parts);
+ *pparts = NULL;
+ return ret;
+}
+
+static const struct of_device_id mtd_parser_afs_of_match_table[] = {
+ { .compatible = "arm,arm-firmware-suite" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtd_parser_afs_of_match_table);
+
+static struct mtd_part_parser afs_parser = {
+ .parse_fn = parse_afs_partitions,
+ .name = "afs",
+ .of_match_table = mtd_parser_afs_of_match_table,
+};
+module_mtd_part_parser(afs_parser);
+
+MODULE_AUTHOR("ARM Ltd");
+MODULE_DESCRIPTION("ARM Firmware Suite partition parser");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/parsers/ar7part.c b/drivers/mtd/parsers/ar7part.c
new file mode 100644
index 0000000000..8cd683711a
--- /dev/null
+++ b/drivers/mtd/parsers/ar7part.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright © 2007 Eugene Konev <ejka@openwrt.org>
+ *
+ * TI AR7 flash partition table.
+ * Based on ar7 map by Felix Fietkau <nbd@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/memblock.h>
+#include <linux/module.h>
+
+#include <uapi/linux/magic.h>
+
+#define AR7_PARTS 4
+#define ROOT_OFFSET 0xe0000
+
+#define LOADER_MAGIC1 le32_to_cpu(0xfeedfa42)
+#define LOADER_MAGIC2 le32_to_cpu(0xfeed1281)
+
+struct ar7_bin_rec {
+ unsigned int checksum;
+ unsigned int length;
+ unsigned int address;
+};
+
+static int create_mtd_partitions(struct mtd_info *master,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct ar7_bin_rec header;
+ unsigned int offset;
+ size_t len;
+ unsigned int pre_size = master->erasesize, post_size = 0;
+ unsigned int root_offset = ROOT_OFFSET;
+
+ int retries = 10;
+ struct mtd_partition *ar7_parts;
+
+ ar7_parts = kcalloc(AR7_PARTS, sizeof(*ar7_parts), GFP_KERNEL);
+ if (!ar7_parts)
+ return -ENOMEM;
+ ar7_parts[0].name = "loader";
+ ar7_parts[0].offset = 0;
+ ar7_parts[0].size = master->erasesize;
+ ar7_parts[0].mask_flags = MTD_WRITEABLE;
+
+ ar7_parts[1].name = "config";
+ ar7_parts[1].offset = 0;
+ ar7_parts[1].size = master->erasesize;
+ ar7_parts[1].mask_flags = 0;
+
+ do { /* Try 10 blocks starting from master->erasesize */
+ offset = pre_size;
+ mtd_read(master, offset, sizeof(header), &len,
+ (uint8_t *)&header);
+ if (!strncmp((char *)&header, "TIENV0.8", 8))
+ ar7_parts[1].offset = pre_size;
+ if (header.checksum == LOADER_MAGIC1)
+ break;
+ if (header.checksum == LOADER_MAGIC2)
+ break;
+ pre_size += master->erasesize;
+ } while (retries--);
+
+ pre_size = offset;
+
+ if (!ar7_parts[1].offset) {
+ ar7_parts[1].offset = master->size - master->erasesize;
+ post_size = master->erasesize;
+ }
+
+ switch (header.checksum) {
+ case LOADER_MAGIC1:
+ while (header.length) {
+ offset += sizeof(header) + header.length;
+ mtd_read(master, offset, sizeof(header), &len,
+ (uint8_t *)&header);
+ }
+ root_offset = offset + sizeof(header) + 4;
+ break;
+ case LOADER_MAGIC2:
+ while (header.length) {
+ offset += sizeof(header) + header.length;
+ mtd_read(master, offset, sizeof(header), &len,
+ (uint8_t *)&header);
+ }
+ root_offset = offset + sizeof(header) + 4 + 0xff;
+ root_offset &= ~(uint32_t)0xff;
+ break;
+ default:
+ printk(KERN_WARNING "Unknown magic: %08x\n", header.checksum);
+ break;
+ }
+
+ mtd_read(master, root_offset, sizeof(header), &len, (u8 *)&header);
+ if (header.checksum != SQUASHFS_MAGIC) {
+ root_offset += master->erasesize - 1;
+ root_offset &= ~(master->erasesize - 1);
+ }
+
+ ar7_parts[2].name = "linux";
+ ar7_parts[2].offset = pre_size;
+ ar7_parts[2].size = master->size - pre_size - post_size;
+ ar7_parts[2].mask_flags = 0;
+
+ ar7_parts[3].name = "rootfs";
+ ar7_parts[3].offset = root_offset;
+ ar7_parts[3].size = master->size - root_offset - post_size;
+ ar7_parts[3].mask_flags = 0;
+
+ *pparts = ar7_parts;
+ return AR7_PARTS;
+}
+
+static struct mtd_part_parser ar7_parser = {
+ .parse_fn = create_mtd_partitions,
+ .name = "ar7part",
+};
+module_mtd_part_parser(ar7_parser);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR( "Felix Fietkau <nbd@openwrt.org>, "
+ "Eugene Konev <ejka@openwrt.org>");
+MODULE_DESCRIPTION("MTD partitioning for TI AR7");
diff --git a/drivers/mtd/parsers/bcm47xxpart.c b/drivers/mtd/parsers/bcm47xxpart.c
new file mode 100644
index 0000000000..13daf9bffd
--- /dev/null
+++ b/drivers/mtd/parsers/bcm47xxpart.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * BCM47XX MTD partitioning
+ *
+ * Copyright © 2012 Rafał Miłecki <zajec5@gmail.com>
+ */
+
+#include <linux/bcm47xx_nvram.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#include <uapi/linux/magic.h>
+
+/*
+ * NAND flash on Netgear R6250 was verified to contain 15 partitions.
+ * This will result in allocating too big array for some old devices, but the
+ * memory will be freed soon anyway (see mtd_device_parse_register).
+ */
+#define BCM47XXPART_MAX_PARTS 20
+
+/*
+ * Amount of bytes we read when analyzing each block of flash memory.
+ * Set it big enough to allow detecting partition and reading important data.
+ */
+#define BCM47XXPART_BYTES_TO_READ 0x4e8
+
+/* Magics */
+#define BOARD_DATA_MAGIC 0x5246504D /* MPFR */
+#define BOARD_DATA_MAGIC2 0xBD0D0BBD
+#define CFE_MAGIC 0x43464531 /* 1EFC */
+#define FACTORY_MAGIC 0x59544346 /* FCTY */
+#define NVRAM_HEADER 0x48534C46 /* FLSH */
+#define POT_MAGIC1 0x54544f50 /* POTT */
+#define POT_MAGIC2 0x504f /* OP */
+#define ML_MAGIC1 0x39685a42
+#define ML_MAGIC2 0x26594131
+#define TRX_MAGIC 0x30524448
+#define SHSQ_MAGIC 0x71736873 /* shsq (weird ZTE H218N endianness) */
+
+static const char * const trx_types[] = { "trx", NULL };
+
+struct trx_header {
+ uint32_t magic;
+ uint32_t length;
+ uint32_t crc32;
+ uint16_t flags;
+ uint16_t version;
+ uint32_t offset[3];
+} __packed;
+
+static void bcm47xxpart_add_part(struct mtd_partition *part, const char *name,
+ u64 offset, uint32_t mask_flags)
+{
+ part->name = name;
+ part->offset = offset;
+ part->mask_flags = mask_flags;
+}
+
+/**
+ * bcm47xxpart_bootpartition - gets index of TRX partition used by bootloader
+ *
+ * Some devices may have more than one TRX partition. In such case one of them
+ * is the main one and another a failsafe one. Bootloader may fallback to the
+ * failsafe firmware if it detects corruption of the main image.
+ *
+ * This function provides info about currently used TRX partition. It's the one
+ * containing kernel started by the bootloader.
+ */
+static int bcm47xxpart_bootpartition(void)
+{
+ char buf[4];
+ int bootpartition;
+
+ /* Check CFE environment variable */
+ if (bcm47xx_nvram_getenv("bootpartition", buf, sizeof(buf)) > 0) {
+ if (!kstrtoint(buf, 0, &bootpartition))
+ return bootpartition;
+ }
+
+ return 0;
+}
+
+static int bcm47xxpart_parse(struct mtd_info *master,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct mtd_partition *parts;
+ uint8_t i, curr_part = 0;
+ uint32_t *buf;
+ size_t bytes_read;
+ uint32_t offset;
+ uint32_t blocksize = master->erasesize;
+ int trx_parts[2]; /* Array with indexes of TRX partitions */
+ int trx_num = 0; /* Number of found TRX partitions */
+ int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
+ int err;
+
+ /*
+ * Some really old flashes (like AT45DB*) had smaller erasesize-s, but
+ * partitions were aligned to at least 0x1000 anyway.
+ */
+ if (blocksize < 0x1000)
+ blocksize = 0x1000;
+
+ /* Alloc */
+ parts = kcalloc(BCM47XXPART_MAX_PARTS, sizeof(struct mtd_partition),
+ GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ buf = kzalloc(BCM47XXPART_BYTES_TO_READ, GFP_KERNEL);
+ if (!buf) {
+ kfree(parts);
+ return -ENOMEM;
+ }
+
+ /* Parse block by block looking for magics */
+ for (offset = 0; offset <= master->size - blocksize;
+ offset += blocksize) {
+ /* Nothing more in higher memory on BCM47XX (MIPS) */
+ if (IS_ENABLED(CONFIG_BCM47XX) && offset >= 0x2000000)
+ break;
+
+ if (curr_part >= BCM47XXPART_MAX_PARTS) {
+ pr_warn("Reached maximum number of partitions, scanning stopped!\n");
+ break;
+ }
+
+ /* Read beginning of the block */
+ err = mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
+ &bytes_read, (uint8_t *)buf);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
+ offset, err);
+ continue;
+ }
+
+ /* Magic or small NVRAM at 0x400 */
+ if ((buf[0x4e0 / 4] == CFE_MAGIC && buf[0x4e4 / 4] == CFE_MAGIC) ||
+ (buf[0x400 / 4] == NVRAM_HEADER)) {
+ bcm47xxpart_add_part(&parts[curr_part++], "boot",
+ offset, MTD_WRITEABLE);
+ continue;
+ }
+
+ /*
+ * board_data starts with board_id which differs across boards,
+ * but we can use 'MPFR' (hopefully) magic at 0x100
+ */
+ if (buf[0x100 / 4] == BOARD_DATA_MAGIC) {
+ bcm47xxpart_add_part(&parts[curr_part++], "board_data",
+ offset, MTD_WRITEABLE);
+ continue;
+ }
+
+ /* Found on Huawei E970 */
+ if (buf[0x000 / 4] == FACTORY_MAGIC) {
+ bcm47xxpart_add_part(&parts[curr_part++], "factory",
+ offset, MTD_WRITEABLE);
+ continue;
+ }
+
+ /* POT(TOP) */
+ if (buf[0x000 / 4] == POT_MAGIC1 &&
+ (buf[0x004 / 4] & 0xFFFF) == POT_MAGIC2) {
+ bcm47xxpart_add_part(&parts[curr_part++], "POT", offset,
+ MTD_WRITEABLE);
+ continue;
+ }
+
+ /* ML */
+ if (buf[0x010 / 4] == ML_MAGIC1 &&
+ buf[0x014 / 4] == ML_MAGIC2) {
+ bcm47xxpart_add_part(&parts[curr_part++], "ML", offset,
+ MTD_WRITEABLE);
+ continue;
+ }
+
+ /* TRX */
+ if (buf[0x000 / 4] == TRX_MAGIC) {
+ struct trx_header *trx;
+ uint32_t last_subpart;
+ uint32_t trx_size;
+
+ if (trx_num >= ARRAY_SIZE(trx_parts))
+ pr_warn("No enough space to store another TRX found at 0x%X\n",
+ offset);
+ else
+ trx_parts[trx_num++] = curr_part;
+ bcm47xxpart_add_part(&parts[curr_part++], "firmware",
+ offset, 0);
+
+ /*
+ * Try to find TRX size. The "length" field isn't fully
+ * reliable as it could be decreased to make CRC32 cover
+ * only part of TRX data. It's commonly used as checksum
+ * can't cover e.g. ever-changing rootfs partition.
+ * Use offsets as helpers for assuming min TRX size.
+ */
+ trx = (struct trx_header *)buf;
+ last_subpart = max3(trx->offset[0], trx->offset[1],
+ trx->offset[2]);
+ trx_size = max(trx->length, last_subpart + blocksize);
+
+ /*
+ * Skip the TRX data. Decrease offset by block size as
+ * the next loop iteration will increase it.
+ */
+ offset += roundup(trx_size, blocksize) - blocksize;
+ continue;
+ }
+
+ /* Squashfs on devices not using TRX */
+ if (le32_to_cpu(buf[0x000 / 4]) == SQUASHFS_MAGIC ||
+ buf[0x000 / 4] == SHSQ_MAGIC) {
+ bcm47xxpart_add_part(&parts[curr_part++], "rootfs",
+ offset, 0);
+ continue;
+ }
+
+ /*
+ * New (ARM?) devices may have NVRAM in some middle block. Last
+ * block will be checked later, so skip it.
+ */
+ if (offset != master->size - blocksize &&
+ buf[0x000 / 4] == NVRAM_HEADER) {
+ bcm47xxpart_add_part(&parts[curr_part++], "nvram",
+ offset, 0);
+ continue;
+ }
+
+ /* Read middle of the block */
+ err = mtd_read(master, offset + (blocksize / 2), 0x4, &bytes_read,
+ (uint8_t *)buf);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
+ offset + (blocksize / 2), err);
+ continue;
+ }
+
+ /* Some devices (ex. WNDR3700v3) don't have a standard 'MPFR' */
+ if (buf[0x000 / 4] == BOARD_DATA_MAGIC2) {
+ bcm47xxpart_add_part(&parts[curr_part++], "board_data",
+ offset, MTD_WRITEABLE);
+ continue;
+ }
+ }
+
+ /* Look for NVRAM at the end of the last block. */
+ for (i = 0; i < ARRAY_SIZE(possible_nvram_sizes); i++) {
+ if (curr_part >= BCM47XXPART_MAX_PARTS) {
+ pr_warn("Reached maximum number of partitions, scanning stopped!\n");
+ break;
+ }
+
+ offset = master->size - possible_nvram_sizes[i];
+ err = mtd_read(master, offset, 0x4, &bytes_read,
+ (uint8_t *)buf);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("mtd_read error while reading (offset 0x%X): %d\n",
+ offset, err);
+ continue;
+ }
+
+ /* Standard NVRAM */
+ if (buf[0] == NVRAM_HEADER) {
+ bcm47xxpart_add_part(&parts[curr_part++], "nvram",
+ master->size - blocksize, 0);
+ break;
+ }
+ }
+
+ kfree(buf);
+
+ /*
+ * Assume that partitions end at the beginning of the one they are
+ * followed by.
+ */
+ for (i = 0; i < curr_part; i++) {
+ u64 next_part_offset = (i < curr_part - 1) ?
+ parts[i + 1].offset : master->size;
+
+ parts[i].size = next_part_offset - parts[i].offset;
+ }
+
+ /* If there was TRX parse it now */
+ for (i = 0; i < trx_num; i++) {
+ struct mtd_partition *trx = &parts[trx_parts[i]];
+
+ if (i == bcm47xxpart_bootpartition())
+ trx->types = trx_types;
+ else
+ trx->name = "failsafe";
+ }
+
+ *pparts = parts;
+ return curr_part;
+};
+
+static const struct of_device_id bcm47xxpart_of_match_table[] = {
+ { .compatible = "brcm,bcm947xx-cfe-partitions" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm47xxpart_of_match_table);
+
+static struct mtd_part_parser bcm47xxpart_mtd_parser = {
+ .parse_fn = bcm47xxpart_parse,
+ .name = "bcm47xxpart",
+ .of_match_table = bcm47xxpart_of_match_table,
+};
+module_mtd_part_parser(bcm47xxpart_mtd_parser);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MTD partitioning for BCM47XX flash memories");
diff --git a/drivers/mtd/parsers/bcm63xxpart.c b/drivers/mtd/parsers/bcm63xxpart.c
new file mode 100644
index 0000000000..6e7b1ae8f5
--- /dev/null
+++ b/drivers/mtd/parsers/bcm63xxpart.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * BCM63XX CFE image tag parser
+ *
+ * Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org>
+ * Mike Albon <malbon@openwrt.org>
+ * Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net>
+ * Copyright © 2011-2013 Jonas Gorski <jonas.gorski@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bcm963xx_nvram.h>
+#include <linux/bcm963xx_tag.h>
+#include <linux/crc32.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+
+#ifdef CONFIG_MIPS
+#include <asm/bootinfo.h>
+#include <asm/fw/cfe/cfe_api.h>
+#endif /* CONFIG_MIPS */
+
+#define BCM963XX_CFE_BLOCK_SIZE SZ_64K /* always at least 64KiB */
+
+#define BCM963XX_CFE_MAGIC_OFFSET 0x4e0
+#define BCM963XX_CFE_VERSION_OFFSET 0x570
+#define BCM963XX_NVRAM_OFFSET 0x580
+
+/* Ensure strings read from flash structs are null terminated */
+#define STR_NULL_TERMINATE(x) \
+ do { char *_str = (x); _str[sizeof(x) - 1] = 0; } while (0)
+
+static inline int bcm63xx_detect_cfe(void)
+{
+ int ret = 0;
+
+#ifdef CONFIG_MIPS
+ ret = (fw_arg3 == CFE_EPTSEAL);
+#endif /* CONFIG_MIPS */
+
+ return ret;
+}
+
+static int bcm63xx_read_nvram(struct mtd_info *master,
+ struct bcm963xx_nvram *nvram)
+{
+ u32 actual_crc, expected_crc;
+ size_t retlen;
+ int ret;
+
+ /* extract nvram data */
+ ret = mtd_read(master, BCM963XX_NVRAM_OFFSET, BCM963XX_NVRAM_V5_SIZE,
+ &retlen, (void *)nvram);
+ if (ret)
+ return ret;
+
+ ret = bcm963xx_nvram_checksum(nvram, &expected_crc, &actual_crc);
+ if (ret)
+ pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n",
+ expected_crc, actual_crc);
+
+ if (!nvram->psi_size)
+ nvram->psi_size = BCM963XX_DEFAULT_PSI_SIZE;
+
+ return 0;
+}
+
+static const char * const bcm63xx_cfe_part_types[] = {
+ "bcm963xx-imagetag",
+ NULL,
+};
+
+static int bcm63xx_parse_cfe_nor_partitions(struct mtd_info *master,
+ const struct mtd_partition **pparts, struct bcm963xx_nvram *nvram)
+{
+ struct mtd_partition *parts;
+ int nrparts = 3, curpart = 0;
+ unsigned int cfelen, nvramlen;
+ unsigned int cfe_erasesize;
+ int i;
+
+ cfe_erasesize = max_t(uint32_t, master->erasesize,
+ BCM963XX_CFE_BLOCK_SIZE);
+
+ cfelen = cfe_erasesize;
+ nvramlen = nvram->psi_size * SZ_1K;
+ nvramlen = roundup(nvramlen, cfe_erasesize);
+
+ parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ /* Start building partition list */
+ parts[curpart].name = "CFE";
+ parts[curpart].offset = 0;
+ parts[curpart].size = cfelen;
+ curpart++;
+
+ parts[curpart].name = "nvram";
+ parts[curpart].offset = master->size - nvramlen;
+ parts[curpart].size = nvramlen;
+ curpart++;
+
+ /* Global partition "linux" to make easy firmware upgrade */
+ parts[curpart].name = "linux";
+ parts[curpart].offset = cfelen;
+ parts[curpart].size = master->size - cfelen - nvramlen;
+ parts[curpart].types = bcm63xx_cfe_part_types;
+
+ for (i = 0; i < nrparts; i++)
+ pr_info("Partition %d is %s offset %llx and length %llx\n", i,
+ parts[i].name, parts[i].offset, parts[i].size);
+
+ *pparts = parts;
+
+ return nrparts;
+}
+
+static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct bcm963xx_nvram *nvram = NULL;
+ int ret;
+
+ if (!bcm63xx_detect_cfe())
+ return -EINVAL;
+
+ nvram = vzalloc(sizeof(*nvram));
+ if (!nvram)
+ return -ENOMEM;
+
+ ret = bcm63xx_read_nvram(master, nvram);
+ if (ret)
+ goto out;
+
+ if (!mtd_type_is_nand(master))
+ ret = bcm63xx_parse_cfe_nor_partitions(master, pparts, nvram);
+ else
+ ret = -EINVAL;
+
+out:
+ vfree(nvram);
+ return ret;
+};
+
+static const struct of_device_id parse_bcm63xx_cfe_match_table[] = {
+ { .compatible = "brcm,bcm963xx-cfe-nor-partitions" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, parse_bcm63xx_cfe_match_table);
+
+static struct mtd_part_parser bcm63xx_cfe_parser = {
+ .parse_fn = bcm63xx_parse_cfe_partitions,
+ .name = "bcm63xxpart",
+ .of_match_table = parse_bcm63xx_cfe_match_table,
+};
+module_mtd_part_parser(bcm63xx_cfe_parser);
+
+MODULE_AUTHOR("Daniel Dickinson <openwrt@cshore.neomailbox.net>");
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_AUTHOR("Mike Albon <malbon@openwrt.org>");
+MODULE_AUTHOR("Jonas Gorski <jonas.gorski@gmail.com");
+MODULE_DESCRIPTION("MTD partitioning for BCM63XX CFE bootloaders");
diff --git a/drivers/mtd/parsers/brcm_u-boot.c b/drivers/mtd/parsers/brcm_u-boot.c
new file mode 100644
index 0000000000..7c338dc7b8
--- /dev/null
+++ b/drivers/mtd/parsers/brcm_u-boot.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2022 Rafał Miłecki <rafal@milecki.pl>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#define BRCM_U_BOOT_MAX_OFFSET 0x200000
+#define BRCM_U_BOOT_STEP 0x1000
+
+#define BRCM_U_BOOT_MAX_PARTS 2
+
+#define BRCM_U_BOOT_MAGIC 0x75456e76 /* uEnv */
+
+struct brcm_u_boot_header {
+ __le32 magic;
+ __le32 length;
+} __packed;
+
+static const char *names[BRCM_U_BOOT_MAX_PARTS] = {
+ "u-boot-env",
+ "u-boot-env-backup",
+};
+
+static int brcm_u_boot_parse(struct mtd_info *mtd,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct brcm_u_boot_header header;
+ struct mtd_partition *parts;
+ size_t bytes_read;
+ size_t offset;
+ int err;
+ int i = 0;
+
+ parts = kcalloc(BRCM_U_BOOT_MAX_PARTS, sizeof(*parts), GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ for (offset = 0;
+ offset < min_t(size_t, mtd->size, BRCM_U_BOOT_MAX_OFFSET);
+ offset += BRCM_U_BOOT_STEP) {
+ err = mtd_read(mtd, offset, sizeof(header), &bytes_read, (uint8_t *)&header);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("Failed to read from %s at 0x%zx: %d\n", mtd->name, offset, err);
+ continue;
+ }
+
+ if (le32_to_cpu(header.magic) != BRCM_U_BOOT_MAGIC)
+ continue;
+
+ parts[i].name = names[i];
+ parts[i].offset = offset;
+ parts[i].size = sizeof(header) + le32_to_cpu(header.length);
+ i++;
+ pr_info("offset:0x%zx magic:0x%08x BINGO\n", offset, header.magic);
+
+ if (i == BRCM_U_BOOT_MAX_PARTS)
+ break;
+ }
+
+ *pparts = parts;
+
+ return i;
+};
+
+static const struct of_device_id brcm_u_boot_of_match_table[] = {
+ { .compatible = "brcm,u-boot" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, brcm_u_boot_of_match_table);
+
+static struct mtd_part_parser brcm_u_boot_mtd_parser = {
+ .parse_fn = brcm_u_boot_parse,
+ .name = "brcm_u-boot",
+ .of_match_table = brcm_u_boot_of_match_table,
+};
+module_mtd_part_parser(brcm_u_boot_mtd_parser);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/parsers/cmdlinepart.c b/drivers/mtd/parsers/cmdlinepart.c
new file mode 100644
index 0000000000..b34856def8
--- /dev/null
+++ b/drivers/mtd/parsers/cmdlinepart.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Read flash partition table from command line
+ *
+ * Copyright © 2002 SYSGO Real-Time Solutions GmbH
+ * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * The format for the command line is as follows:
+ *
+ * mtdparts=<mtddef>[;<mtddef]
+ * <mtddef> := <mtd-id>:<partdef>[,<partdef>]
+ * <partdef> := <size>[@<offset>][<name>][ro][lk][slc]
+ * <mtd-id> := unique name used in mapping driver/device (mtd->name)
+ * <size> := standard linux memsize OR "-" to denote all remaining space
+ * size is automatically truncated at end of device
+ * if specified or truncated size is 0 the part is skipped
+ * <offset> := standard linux memsize
+ * if omitted the part will immediately follow the previous part
+ * or 0 if the first part
+ * <name> := '(' NAME ')'
+ * NAME will appear in /proc/mtd
+ *
+ * <size> and <offset> can be specified such that the parts are out of order
+ * in physical memory and may even overlap.
+ *
+ * The parts are assigned MTD numbers in the order they are specified in the
+ * command line regardless of their order in physical memory.
+ *
+ * Examples:
+ *
+ * 1 NOR Flash, with 1 single writable partition:
+ * edb7312-nor:-
+ *
+ * 1 NOR Flash with 2 partitions, 1 NAND with one
+ * edb7312-nor:256k(ARMboot)ro,-(root);edb7312-nand:-(home)
+ */
+
+#define pr_fmt(fmt) "mtd: " fmt
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/module.h>
+#include <linux/err.h>
+
+/* debug macro */
+#if 0
+#define dbg(x) do { printk("DEBUG-CMDLINE-PART: "); printk x; } while(0)
+#else
+#define dbg(x)
+#endif
+
+
+/* special size referring to all the remaining space in a partition */
+#define SIZE_REMAINING ULLONG_MAX
+#define OFFSET_CONTINUOUS ULLONG_MAX
+
+struct cmdline_mtd_partition {
+ struct cmdline_mtd_partition *next;
+ char *mtd_id;
+ int num_parts;
+ struct mtd_partition *parts;
+};
+
+/* mtdpart_setup() parses into here */
+static struct cmdline_mtd_partition *partitions;
+
+/* the command line passed to mtdpart_setup() */
+static char *mtdparts;
+static char *cmdline;
+static int cmdline_parsed;
+
+/*
+ * Parse one partition definition for an MTD. Since there can be many
+ * comma separated partition definitions, this function calls itself
+ * recursively until no more partition definitions are found. Nice side
+ * effect: the memory to keep the mtd_partition structs and the names
+ * is allocated upon the last definition being found. At that point the
+ * syntax has been verified ok.
+ */
+static struct mtd_partition * newpart(char *s,
+ char **retptr,
+ int *num_parts,
+ int this_part,
+ unsigned char **extra_mem_ptr,
+ int extra_mem_size)
+{
+ struct mtd_partition *parts;
+ unsigned long long size, offset = OFFSET_CONTINUOUS;
+ char *name;
+ int name_len;
+ unsigned char *extra_mem;
+ char delim;
+ unsigned int mask_flags, add_flags;
+
+ /* fetch the partition size */
+ if (*s == '-') {
+ /* assign all remaining space to this partition */
+ size = SIZE_REMAINING;
+ s++;
+ } else {
+ size = memparse(s, &s);
+ if (!size) {
+ pr_err("partition has size 0\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* fetch partition name and flags */
+ mask_flags = 0; /* this is going to be a regular partition */
+ add_flags = 0;
+ delim = 0;
+
+ /* check for offset */
+ if (*s == '@') {
+ s++;
+ offset = memparse(s, &s);
+ }
+
+ /* now look for name */
+ if (*s == '(')
+ delim = ')';
+
+ if (delim) {
+ char *p;
+
+ name = ++s;
+ p = strchr(name, delim);
+ if (!p) {
+ pr_err("no closing %c found in partition name\n", delim);
+ return ERR_PTR(-EINVAL);
+ }
+ name_len = p - name;
+ s = p + 1;
+ } else {
+ name = NULL;
+ name_len = 13; /* Partition_000 */
+ }
+
+ /* record name length for memory allocation later */
+ extra_mem_size += name_len + 1;
+
+ /* test for options */
+ if (strncmp(s, "ro", 2) == 0) {
+ mask_flags |= MTD_WRITEABLE;
+ s += 2;
+ }
+
+ /* if lk is found do NOT unlock the MTD partition*/
+ if (strncmp(s, "lk", 2) == 0) {
+ mask_flags |= MTD_POWERUP_LOCK;
+ s += 2;
+ }
+
+ /* if slc is found use emulated SLC mode on this partition*/
+ if (!strncmp(s, "slc", 3)) {
+ add_flags |= MTD_SLC_ON_MLC_EMULATION;
+ s += 3;
+ }
+
+ /* test if more partitions are following */
+ if (*s == ',') {
+ if (size == SIZE_REMAINING) {
+ pr_err("no partitions allowed after a fill-up partition\n");
+ return ERR_PTR(-EINVAL);
+ }
+ /* more partitions follow, parse them */
+ parts = newpart(s + 1, &s, num_parts, this_part + 1,
+ &extra_mem, extra_mem_size);
+ if (IS_ERR(parts))
+ return parts;
+ } else {
+ /* this is the last partition: allocate space for all */
+ int alloc_size;
+
+ *num_parts = this_part + 1;
+ alloc_size = *num_parts * sizeof(struct mtd_partition) +
+ extra_mem_size;
+
+ parts = kzalloc(alloc_size, GFP_KERNEL);
+ if (!parts)
+ return ERR_PTR(-ENOMEM);
+ extra_mem = (unsigned char *)(parts + *num_parts);
+ }
+
+ /*
+ * enter this partition (offset will be calculated later if it is
+ * OFFSET_CONTINUOUS at this point)
+ */
+ parts[this_part].size = size;
+ parts[this_part].offset = offset;
+ parts[this_part].mask_flags = mask_flags;
+ parts[this_part].add_flags = add_flags;
+ if (name)
+ strscpy(extra_mem, name, name_len + 1);
+ else
+ sprintf(extra_mem, "Partition_%03d", this_part);
+ parts[this_part].name = extra_mem;
+ extra_mem += name_len + 1;
+
+ dbg(("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n",
+ this_part, parts[this_part].name, parts[this_part].offset,
+ parts[this_part].size, parts[this_part].mask_flags));
+
+ /* return (updated) pointer to extra_mem memory */
+ if (extra_mem_ptr)
+ *extra_mem_ptr = extra_mem;
+
+ /* return (updated) pointer command line string */
+ *retptr = s;
+
+ /* return partition table */
+ return parts;
+}
+
+/*
+ * Parse the command line.
+ */
+static int mtdpart_setup_real(char *s)
+{
+ cmdline_parsed = 1;
+
+ for( ; s != NULL; )
+ {
+ struct cmdline_mtd_partition *this_mtd;
+ struct mtd_partition *parts;
+ int mtd_id_len, num_parts;
+ char *p, *mtd_id, *semicol, *open_parenth;
+
+ /*
+ * Replace the first ';' by a NULL char so strrchr can work
+ * properly.
+ */
+ semicol = strchr(s, ';');
+ if (semicol)
+ *semicol = '\0';
+
+ /*
+ * make sure that part-names with ":" will not be handled as
+ * part of the mtd-id with an ":"
+ */
+ open_parenth = strchr(s, '(');
+ if (open_parenth)
+ *open_parenth = '\0';
+
+ mtd_id = s;
+
+ /*
+ * fetch <mtd-id>. We use strrchr to ignore all ':' that could
+ * be present in the MTD name, only the last one is interpreted
+ * as an <mtd-id>/<part-definition> separator.
+ */
+ p = strrchr(s, ':');
+
+ /* Restore the '(' now. */
+ if (open_parenth)
+ *open_parenth = '(';
+
+ /* Restore the ';' now. */
+ if (semicol)
+ *semicol = ';';
+
+ if (!p) {
+ pr_err("no mtd-id\n");
+ return -EINVAL;
+ }
+ mtd_id_len = p - mtd_id;
+
+ dbg(("parsing <%s>\n", p+1));
+
+ /*
+ * parse one mtd. have it reserve memory for the
+ * struct cmdline_mtd_partition and the mtd-id string.
+ */
+ parts = newpart(p + 1, /* cmdline */
+ &s, /* out: updated cmdline ptr */
+ &num_parts, /* out: number of parts */
+ 0, /* first partition */
+ (unsigned char**)&this_mtd, /* out: extra mem */
+ mtd_id_len + 1 + sizeof(*this_mtd) +
+ sizeof(void*)-1 /*alignment*/);
+ if (IS_ERR(parts)) {
+ /*
+ * An error occurred. We're either:
+ * a) out of memory, or
+ * b) in the middle of the partition spec
+ * Either way, this mtd is hosed and we're
+ * unlikely to succeed in parsing any more
+ */
+ return PTR_ERR(parts);
+ }
+
+ /* align this_mtd */
+ this_mtd = (struct cmdline_mtd_partition *)
+ ALIGN((unsigned long)this_mtd, sizeof(void *));
+ /* enter results */
+ this_mtd->parts = parts;
+ this_mtd->num_parts = num_parts;
+ this_mtd->mtd_id = (char*)(this_mtd + 1);
+ strscpy(this_mtd->mtd_id, mtd_id, mtd_id_len + 1);
+
+ /* link into chain */
+ this_mtd->next = partitions;
+ partitions = this_mtd;
+
+ dbg(("mtdid=<%s> num_parts=<%d>\n",
+ this_mtd->mtd_id, this_mtd->num_parts));
+
+
+ /* EOS - we're done */
+ if (*s == 0)
+ break;
+
+ /* does another spec follow? */
+ if (*s != ';') {
+ pr_err("bad character after partition (%c)\n", *s);
+ return -EINVAL;
+ }
+ s++;
+ }
+
+ return 0;
+}
+
+/*
+ * Main function to be called from the MTD mapping driver/device to
+ * obtain the partitioning information. At this point the command line
+ * arguments will actually be parsed and turned to struct mtd_partition
+ * information. It returns partitions for the requested mtd device, or
+ * the first one in the chain if a NULL mtd_id is passed in.
+ */
+static int parse_cmdline_partitions(struct mtd_info *master,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ unsigned long long offset;
+ int i, err;
+ struct cmdline_mtd_partition *part;
+ const char *mtd_id = master->name;
+
+ /* parse command line */
+ if (!cmdline_parsed) {
+ err = mtdpart_setup_real(cmdline);
+ if (err)
+ return err;
+ }
+
+ /*
+ * Search for the partition definition matching master->name.
+ * If master->name is not set, stop at first partition definition.
+ */
+ for (part = partitions; part; part = part->next) {
+ if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id)))
+ break;
+ }
+
+ if (!part)
+ return 0;
+
+ for (i = 0, offset = 0; i < part->num_parts; i++) {
+ if (part->parts[i].offset == OFFSET_CONTINUOUS)
+ part->parts[i].offset = offset;
+ else
+ offset = part->parts[i].offset;
+
+ if (part->parts[i].size == SIZE_REMAINING)
+ part->parts[i].size = master->size - offset;
+
+ if (offset + part->parts[i].size > master->size) {
+ pr_warn("%s: partitioning exceeds flash size, truncating\n",
+ part->mtd_id);
+ part->parts[i].size = master->size - offset;
+ }
+ offset += part->parts[i].size;
+
+ if (part->parts[i].size == 0) {
+ pr_warn("%s: skipping zero sized partition\n",
+ part->mtd_id);
+ part->num_parts--;
+ memmove(&part->parts[i], &part->parts[i + 1],
+ sizeof(*part->parts) * (part->num_parts - i));
+ i--;
+ }
+ }
+
+ *pparts = kmemdup(part->parts, sizeof(*part->parts) * part->num_parts,
+ GFP_KERNEL);
+ if (!*pparts)
+ return -ENOMEM;
+
+ return part->num_parts;
+}
+
+
+/*
+ * This is the handler for our kernel parameter, called from
+ * main.c::checksetup(). Note that we can not yet kmalloc() anything,
+ * so we only save the commandline for later processing.
+ *
+ * This function needs to be visible for bootloaders.
+ */
+static int __init mtdpart_setup(char *s)
+{
+ cmdline = s;
+ return 1;
+}
+
+__setup("mtdparts=", mtdpart_setup);
+
+static struct mtd_part_parser cmdline_parser = {
+ .parse_fn = parse_cmdline_partitions,
+ .name = "cmdlinepart",
+};
+
+static int __init cmdline_parser_init(void)
+{
+ if (mtdparts)
+ mtdpart_setup(mtdparts);
+ register_mtd_parser(&cmdline_parser);
+ return 0;
+}
+
+static void __exit cmdline_parser_exit(void)
+{
+ deregister_mtd_parser(&cmdline_parser);
+}
+
+module_init(cmdline_parser_init);
+module_exit(cmdline_parser_exit);
+
+MODULE_PARM_DESC(mtdparts, "Partitioning specification");
+module_param(mtdparts, charp, 0);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
+MODULE_DESCRIPTION("Command line configuration of MTD partitions");
diff --git a/drivers/mtd/parsers/ofpart_bcm4908.c b/drivers/mtd/parsers/ofpart_bcm4908.c
new file mode 100644
index 0000000000..bb072a0940
--- /dev/null
+++ b/drivers/mtd/parsers/ofpart_bcm4908.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Rafał Miłecki <rafal@milecki.pl>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/slab.h>
+#include <linux/mtd/partitions.h>
+
+#include "ofpart_bcm4908.h"
+
+#define BLPARAMS_FW_OFFSET "NAND_RFS_OFS"
+
+static long long bcm4908_partitions_fw_offset(void)
+{
+ struct device_node *root;
+ struct property *prop;
+ const char *s;
+
+ root = of_find_node_by_path("/");
+ if (!root)
+ return -ENOENT;
+
+ of_property_for_each_string(root, "brcm_blparms", prop, s) {
+ size_t len = strlen(BLPARAMS_FW_OFFSET);
+ unsigned long offset;
+ int err;
+
+ if (strncmp(s, BLPARAMS_FW_OFFSET, len) || s[len] != '=')
+ continue;
+
+ err = kstrtoul(s + len + 1, 0, &offset);
+ if (err) {
+ pr_err("failed to parse %s\n", s + len + 1);
+ of_node_put(root);
+ return err;
+ }
+
+ of_node_put(root);
+ return offset << 10;
+ }
+
+ of_node_put(root);
+ return -ENOENT;
+}
+
+int bcm4908_partitions_post_parse(struct mtd_info *mtd, struct mtd_partition *parts, int nr_parts)
+{
+ long long fw_offset;
+ int i;
+
+ fw_offset = bcm4908_partitions_fw_offset();
+
+ for (i = 0; i < nr_parts; i++) {
+ if (of_device_is_compatible(parts[i].of_node, "brcm,bcm4908-firmware")) {
+ if (fw_offset < 0 || parts[i].offset == fw_offset)
+ parts[i].name = "firmware";
+ else
+ parts[i].name = "backup";
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/mtd/parsers/ofpart_bcm4908.h b/drivers/mtd/parsers/ofpart_bcm4908.h
new file mode 100644
index 0000000000..80f8c08664
--- /dev/null
+++ b/drivers/mtd/parsers/ofpart_bcm4908.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BCM4908_PARTITIONS_H
+#define __BCM4908_PARTITIONS_H
+
+#ifdef CONFIG_MTD_OF_PARTS_BCM4908
+int bcm4908_partitions_post_parse(struct mtd_info *mtd, struct mtd_partition *parts, int nr_parts);
+#else
+static inline int bcm4908_partitions_post_parse(struct mtd_info *mtd, struct mtd_partition *parts,
+ int nr_parts)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#endif
diff --git a/drivers/mtd/parsers/ofpart_core.c b/drivers/mtd/parsers/ofpart_core.c
new file mode 100644
index 0000000000..e7b8e9d0a9
--- /dev/null
+++ b/drivers/mtd/parsers/ofpart_core.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Flash partitions described by the OF (or flattened) device tree
+ *
+ * Copyright © 2006 MontaVista Software Inc.
+ * Author: Vitaly Wool <vwool@ru.mvista.com>
+ *
+ * Revised to handle newer style flash binding by:
+ * Copyright © 2007 David Gibson, IBM Corporation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/slab.h>
+#include <linux/mtd/partitions.h>
+
+#include "ofpart_bcm4908.h"
+#include "ofpart_linksys_ns.h"
+
+struct fixed_partitions_quirks {
+ int (*post_parse)(struct mtd_info *mtd, struct mtd_partition *parts, int nr_parts);
+};
+
+static struct fixed_partitions_quirks bcm4908_partitions_quirks = {
+ .post_parse = bcm4908_partitions_post_parse,
+};
+
+static struct fixed_partitions_quirks linksys_ns_partitions_quirks = {
+ .post_parse = linksys_ns_partitions_post_parse,
+};
+
+static const struct of_device_id parse_ofpart_match_table[];
+
+static bool node_has_compatible(struct device_node *pp)
+{
+ return of_get_property(pp, "compatible", NULL);
+}
+
+static int parse_fixed_partitions(struct mtd_info *master,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ const struct fixed_partitions_quirks *quirks;
+ const struct of_device_id *of_id;
+ struct mtd_partition *parts;
+ struct device_node *mtd_node;
+ struct device_node *ofpart_node;
+ const char *partname;
+ struct device_node *pp;
+ int nr_parts, i, ret = 0;
+ bool dedicated = true;
+
+ /* Pull of_node from the master device node */
+ mtd_node = mtd_get_of_node(master);
+ if (!mtd_node)
+ return 0;
+
+ if (!master->parent) { /* Master */
+ ofpart_node = of_get_child_by_name(mtd_node, "partitions");
+ if (!ofpart_node) {
+ /*
+ * We might get here even when ofpart isn't used at all (e.g.,
+ * when using another parser), so don't be louder than
+ * KERN_DEBUG
+ */
+ pr_debug("%s: 'partitions' subnode not found on %pOF. Trying to parse direct subnodes as partitions.\n",
+ master->name, mtd_node);
+ ofpart_node = mtd_node;
+ dedicated = false;
+ }
+ } else { /* Partition */
+ ofpart_node = mtd_node;
+ }
+
+ of_id = of_match_node(parse_ofpart_match_table, ofpart_node);
+ if (dedicated && !of_id) {
+ /* The 'partitions' subnode might be used by another parser */
+ return 0;
+ }
+
+ quirks = of_id ? of_id->data : NULL;
+
+ /* First count the subnodes */
+ nr_parts = 0;
+ for_each_child_of_node(ofpart_node, pp) {
+ if (!dedicated && node_has_compatible(pp))
+ continue;
+
+ nr_parts++;
+ }
+
+ if (nr_parts == 0)
+ return 0;
+
+ parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ i = 0;
+ for_each_child_of_node(ofpart_node, pp) {
+ const __be32 *reg;
+ int len;
+ int a_cells, s_cells;
+
+ if (!dedicated && node_has_compatible(pp))
+ continue;
+
+ reg = of_get_property(pp, "reg", &len);
+ if (!reg) {
+ if (dedicated) {
+ pr_debug("%s: ofpart partition %pOF (%pOF) missing reg property.\n",
+ master->name, pp,
+ mtd_node);
+ goto ofpart_fail;
+ } else {
+ nr_parts--;
+ continue;
+ }
+ }
+
+ a_cells = of_n_addr_cells(pp);
+ s_cells = of_n_size_cells(pp);
+ if (!dedicated && s_cells == 0) {
+ /*
+ * This is a ugly workaround to not create
+ * regression on devices that are still creating
+ * partitions as direct children of the nand controller.
+ * This can happen in case the nand controller node has
+ * #size-cells equal to 0 and the firmware (e.g.
+ * U-Boot) just add the partitions there assuming
+ * 32-bit addressing.
+ *
+ * If you get this warning your firmware and/or DTS
+ * should be really fixed.
+ *
+ * This is working only for devices smaller than 4GiB.
+ */
+ pr_warn("%s: ofpart partition %pOF (%pOF) #size-cells is wrongly set to <0>, assuming <1> for parsing partitions.\n",
+ master->name, pp, mtd_node);
+ s_cells = 1;
+ }
+ if (len / 4 != a_cells + s_cells) {
+ pr_debug("%s: ofpart partition %pOF (%pOF) error parsing reg property.\n",
+ master->name, pp,
+ mtd_node);
+ goto ofpart_fail;
+ }
+
+ parts[i].offset = of_read_number(reg, a_cells);
+ parts[i].size = of_read_number(reg + a_cells, s_cells);
+ parts[i].of_node = pp;
+
+ partname = of_get_property(pp, "label", &len);
+ if (!partname)
+ partname = of_get_property(pp, "name", &len);
+ parts[i].name = partname;
+
+ if (of_get_property(pp, "read-only", &len))
+ parts[i].mask_flags |= MTD_WRITEABLE;
+
+ if (of_get_property(pp, "lock", &len))
+ parts[i].mask_flags |= MTD_POWERUP_LOCK;
+
+ if (of_property_read_bool(pp, "slc-mode"))
+ parts[i].add_flags |= MTD_SLC_ON_MLC_EMULATION;
+
+ i++;
+ }
+
+ if (!nr_parts)
+ goto ofpart_none;
+
+ if (quirks && quirks->post_parse)
+ quirks->post_parse(master, parts, nr_parts);
+
+ *pparts = parts;
+ return nr_parts;
+
+ofpart_fail:
+ pr_err("%s: error parsing ofpart partition %pOF (%pOF)\n",
+ master->name, pp, mtd_node);
+ ret = -EINVAL;
+ofpart_none:
+ of_node_put(pp);
+ kfree(parts);
+ return ret;
+}
+
+static const struct of_device_id parse_ofpart_match_table[] = {
+ /* Generic */
+ { .compatible = "fixed-partitions" },
+ /* Customized */
+ { .compatible = "brcm,bcm4908-partitions", .data = &bcm4908_partitions_quirks, },
+ { .compatible = "linksys,ns-partitions", .data = &linksys_ns_partitions_quirks, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, parse_ofpart_match_table);
+
+static struct mtd_part_parser ofpart_parser = {
+ .parse_fn = parse_fixed_partitions,
+ .name = "fixed-partitions",
+ .of_match_table = parse_ofpart_match_table,
+};
+
+static int parse_ofoldpart_partitions(struct mtd_info *master,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct mtd_partition *parts;
+ struct device_node *dp;
+ int i, plen, nr_parts;
+ const struct {
+ __be32 offset, len;
+ } *part;
+ const char *names;
+
+ /* Pull of_node from the master device node */
+ dp = mtd_get_of_node(master);
+ if (!dp)
+ return 0;
+
+ part = of_get_property(dp, "partitions", &plen);
+ if (!part)
+ return 0; /* No partitions found */
+
+ pr_warn("Device tree uses obsolete partition map binding: %pOF\n", dp);
+
+ nr_parts = plen / sizeof(part[0]);
+
+ parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ names = of_get_property(dp, "partition-names", &plen);
+
+ for (i = 0; i < nr_parts; i++) {
+ parts[i].offset = be32_to_cpu(part->offset);
+ parts[i].size = be32_to_cpu(part->len) & ~1;
+ /* bit 0 set signifies read only partition */
+ if (be32_to_cpu(part->len) & 1)
+ parts[i].mask_flags = MTD_WRITEABLE;
+
+ if (names && (plen > 0)) {
+ int len = strlen(names) + 1;
+
+ parts[i].name = names;
+ plen -= len;
+ names += len;
+ } else {
+ parts[i].name = "unnamed";
+ }
+
+ part++;
+ }
+
+ *pparts = parts;
+ return nr_parts;
+}
+
+static struct mtd_part_parser ofoldpart_parser = {
+ .parse_fn = parse_ofoldpart_partitions,
+ .name = "ofoldpart",
+};
+
+static int __init ofpart_parser_init(void)
+{
+ register_mtd_parser(&ofpart_parser);
+ register_mtd_parser(&ofoldpart_parser);
+ return 0;
+}
+
+static void __exit ofpart_parser_exit(void)
+{
+ deregister_mtd_parser(&ofpart_parser);
+ deregister_mtd_parser(&ofoldpart_parser);
+}
+
+module_init(ofpart_parser_init);
+module_exit(ofpart_parser_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Parser for MTD partitioning information in device tree");
+MODULE_AUTHOR("Vitaly Wool, David Gibson");
+/*
+ * When MTD core cannot find the requested parser, it tries to load the module
+ * with the same name. Since we provide the ofoldpart parser, we should have
+ * the corresponding alias.
+ */
+MODULE_ALIAS("fixed-partitions");
+MODULE_ALIAS("ofoldpart");
diff --git a/drivers/mtd/parsers/ofpart_linksys_ns.c b/drivers/mtd/parsers/ofpart_linksys_ns.c
new file mode 100644
index 0000000000..318c42d025
--- /dev/null
+++ b/drivers/mtd/parsers/ofpart_linksys_ns.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Rafał Miłecki <rafal@milecki.pl>
+ */
+
+#include <linux/bcm47xx_nvram.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#include "ofpart_linksys_ns.h"
+
+#define NVRAM_BOOT_PART "bootpartition"
+
+static int ofpart_linksys_ns_bootpartition(void)
+{
+ char buf[4];
+ int bootpartition;
+
+ /* Check CFE environment variable */
+ if (bcm47xx_nvram_getenv(NVRAM_BOOT_PART, buf, sizeof(buf)) > 0) {
+ if (!kstrtoint(buf, 0, &bootpartition))
+ return bootpartition;
+ pr_warn("Failed to parse %s value \"%s\"\n", NVRAM_BOOT_PART,
+ buf);
+ } else {
+ pr_warn("Failed to get NVRAM \"%s\"\n", NVRAM_BOOT_PART);
+ }
+
+ return 0;
+}
+
+int linksys_ns_partitions_post_parse(struct mtd_info *mtd,
+ struct mtd_partition *parts,
+ int nr_parts)
+{
+ int bootpartition = ofpart_linksys_ns_bootpartition();
+ int trx_idx = 0;
+ int i;
+
+ for (i = 0; i < nr_parts; i++) {
+ if (of_device_is_compatible(parts[i].of_node, "linksys,ns-firmware")) {
+ if (trx_idx++ == bootpartition)
+ parts[i].name = "firmware";
+ else
+ parts[i].name = "backup";
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/mtd/parsers/ofpart_linksys_ns.h b/drivers/mtd/parsers/ofpart_linksys_ns.h
new file mode 100644
index 0000000000..730c46812e
--- /dev/null
+++ b/drivers/mtd/parsers/ofpart_linksys_ns.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __OFPART_LINKSYS_NS_H
+#define __OFPART_LINKSYS_NS_H
+
+#ifdef CONFIG_MTD_OF_PARTS_LINKSYS_NS
+int linksys_ns_partitions_post_parse(struct mtd_info *mtd,
+ struct mtd_partition *parts,
+ int nr_parts);
+#else
+static inline int linksys_ns_partitions_post_parse(struct mtd_info *mtd,
+ struct mtd_partition *parts,
+ int nr_parts)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#endif
diff --git a/drivers/mtd/parsers/parser_imagetag.c b/drivers/mtd/parsers/parser_imagetag.c
new file mode 100644
index 0000000000..fab0949aab
--- /dev/null
+++ b/drivers/mtd/parsers/parser_imagetag.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * BCM63XX CFE image tag parser
+ *
+ * Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org>
+ * Mike Albon <malbon@openwrt.org>
+ * Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net>
+ * Copyright © 2011-2013 Jonas Gorski <jonas.gorski@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bcm963xx_tag.h>
+#include <linux/crc32.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+
+/* Ensure strings read from flash structs are null terminated */
+#define STR_NULL_TERMINATE(x) \
+ do { char *_str = (x); _str[sizeof(x) - 1] = 0; } while (0)
+
+static int bcm963xx_read_imagetag(struct mtd_info *master, const char *name,
+ loff_t tag_offset, struct bcm_tag *buf)
+{
+ int ret;
+ size_t retlen;
+ u32 computed_crc;
+
+ ret = mtd_read(master, tag_offset, sizeof(*buf), &retlen, (void *)buf);
+ if (ret)
+ return ret;
+
+ if (retlen != sizeof(*buf))
+ return -EIO;
+
+ computed_crc = crc32_le(IMAGETAG_CRC_START, (u8 *)buf,
+ offsetof(struct bcm_tag, header_crc));
+ if (computed_crc == buf->header_crc) {
+ STR_NULL_TERMINATE(buf->board_id);
+ STR_NULL_TERMINATE(buf->tag_version);
+
+ pr_info("%s: CFE image tag found at 0x%llx with version %s, board type %s\n",
+ name, tag_offset, buf->tag_version, buf->board_id);
+
+ return 0;
+ }
+
+ pr_warn("%s: CFE image tag at 0x%llx CRC invalid (expected %08x, actual %08x)\n",
+ name, tag_offset, buf->header_crc, computed_crc);
+ return -EINVAL;
+}
+
+static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ /* CFE, NVRAM and global Linux are always present */
+ int nrparts = 0, curpart = 0;
+ struct bcm_tag *buf = NULL;
+ struct mtd_partition *parts;
+ int ret;
+ unsigned int rootfsaddr, kerneladdr, spareaddr, offset;
+ unsigned int rootfslen, kernellen, sparelen, totallen;
+ int i;
+ bool rootfs_first = false;
+
+ buf = vmalloc(sizeof(struct bcm_tag));
+ if (!buf)
+ return -ENOMEM;
+
+ /* Get the tag */
+ ret = bcm963xx_read_imagetag(master, "rootfs", 0, buf);
+ if (!ret) {
+ STR_NULL_TERMINATE(buf->flash_image_start);
+ if (kstrtouint(buf->flash_image_start, 10, &rootfsaddr) ||
+ rootfsaddr < BCM963XX_EXTENDED_SIZE) {
+ pr_err("invalid rootfs address: %*ph\n",
+ (int)sizeof(buf->flash_image_start),
+ buf->flash_image_start);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ STR_NULL_TERMINATE(buf->kernel_address);
+ if (kstrtouint(buf->kernel_address, 10, &kerneladdr) ||
+ kerneladdr < BCM963XX_EXTENDED_SIZE) {
+ pr_err("invalid kernel address: %*ph\n",
+ (int)sizeof(buf->kernel_address),
+ buf->kernel_address);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ STR_NULL_TERMINATE(buf->kernel_length);
+ if (kstrtouint(buf->kernel_length, 10, &kernellen)) {
+ pr_err("invalid kernel length: %*ph\n",
+ (int)sizeof(buf->kernel_length),
+ buf->kernel_length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ STR_NULL_TERMINATE(buf->total_length);
+ if (kstrtouint(buf->total_length, 10, &totallen)) {
+ pr_err("invalid total length: %*ph\n",
+ (int)sizeof(buf->total_length),
+ buf->total_length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Addresses are flash absolute, so convert to partition
+ * relative addresses. Assume either kernel or rootfs will
+ * directly follow the image tag.
+ */
+ if (rootfsaddr < kerneladdr)
+ offset = rootfsaddr - sizeof(struct bcm_tag);
+ else
+ offset = kerneladdr - sizeof(struct bcm_tag);
+
+ kerneladdr = kerneladdr - offset;
+ rootfsaddr = rootfsaddr - offset;
+ spareaddr = roundup(totallen, master->erasesize);
+
+ if (rootfsaddr < kerneladdr) {
+ /* default Broadcom layout */
+ rootfslen = kerneladdr - rootfsaddr;
+ rootfs_first = true;
+ } else {
+ /* OpenWrt layout */
+ rootfsaddr = kerneladdr + kernellen;
+ rootfslen = spareaddr - rootfsaddr;
+ }
+ } else {
+ goto out;
+ }
+ sparelen = master->size - spareaddr;
+
+ /* Determine number of partitions */
+ if (rootfslen > 0)
+ nrparts++;
+
+ if (kernellen > 0)
+ nrparts++;
+
+ parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
+ if (!parts) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Start building partition list */
+ if (kernellen > 0) {
+ int kernelpart = curpart;
+
+ if (rootfslen > 0 && rootfs_first)
+ kernelpart++;
+ parts[kernelpart].name = "kernel";
+ parts[kernelpart].offset = kerneladdr;
+ parts[kernelpart].size = kernellen;
+ curpart++;
+ }
+
+ if (rootfslen > 0) {
+ int rootfspart = curpart;
+
+ if (kernellen > 0 && rootfs_first)
+ rootfspart--;
+ parts[rootfspart].name = "rootfs";
+ parts[rootfspart].offset = rootfsaddr;
+ parts[rootfspart].size = rootfslen;
+ if (sparelen > 0 && !rootfs_first)
+ parts[rootfspart].size += sparelen;
+ curpart++;
+ }
+
+ for (i = 0; i < nrparts; i++)
+ pr_info("Partition %d is %s offset %llx and length %llx\n", i,
+ parts[i].name, parts[i].offset, parts[i].size);
+
+ pr_info("Spare partition is offset %x and length %x\n", spareaddr,
+ sparelen);
+
+ *pparts = parts;
+ ret = 0;
+
+out:
+ vfree(buf);
+
+ if (ret)
+ return ret;
+
+ return nrparts;
+}
+
+static const struct of_device_id parse_bcm963xx_imagetag_match_table[] = {
+ { .compatible = "brcm,bcm963xx-imagetag" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, parse_bcm963xx_imagetag_match_table);
+
+static struct mtd_part_parser bcm963xx_imagetag_parser = {
+ .parse_fn = bcm963xx_parse_imagetag_partitions,
+ .name = "bcm963xx-imagetag",
+ .of_match_table = parse_bcm963xx_imagetag_match_table,
+};
+module_mtd_part_parser(bcm963xx_imagetag_parser);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Dickinson <openwrt@cshore.neomailbox.net>");
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_AUTHOR("Mike Albon <malbon@openwrt.org>");
+MODULE_AUTHOR("Jonas Gorski <jonas.gorski@gmail.com>");
+MODULE_DESCRIPTION("MTD parser for BCM963XX CFE Image Tag partitions");
diff --git a/drivers/mtd/parsers/parser_trx.c b/drivers/mtd/parsers/parser_trx.c
new file mode 100644
index 0000000000..4814cf218e
--- /dev/null
+++ b/drivers/mtd/parsers/parser_trx.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Parser for TRX format partitions
+ *
+ * Copyright (C) 2012 - 2017 Rafał Miłecki <rafal@milecki.pl>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#define TRX_PARSER_MAX_PARTS 4
+
+/* Magics */
+#define TRX_MAGIC 0x30524448
+#define UBI_EC_MAGIC 0x23494255 /* UBI# */
+
+struct trx_header {
+ uint32_t magic;
+ uint32_t length;
+ uint32_t crc32;
+ uint16_t flags;
+ uint16_t version;
+ uint32_t offset[3];
+} __packed;
+
+static const char *parser_trx_data_part_name(struct mtd_info *master,
+ size_t offset)
+{
+ uint32_t buf;
+ size_t bytes_read;
+ int err;
+
+ err = mtd_read(master, offset, sizeof(buf), &bytes_read,
+ (uint8_t *)&buf);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("mtd_read error while parsing (offset: 0x%zX): %d\n",
+ offset, err);
+ goto out_default;
+ }
+
+ if (buf == UBI_EC_MAGIC)
+ return "ubi";
+
+out_default:
+ return "rootfs";
+}
+
+static int parser_trx_parse(struct mtd_info *mtd,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct device_node *np = mtd_get_of_node(mtd);
+ struct mtd_partition *parts;
+ struct mtd_partition *part;
+ struct trx_header trx;
+ size_t bytes_read;
+ uint8_t curr_part = 0, i = 0;
+ uint32_t trx_magic = TRX_MAGIC;
+ int err;
+
+ /* Get different magic from device tree if specified */
+ err = of_property_read_u32(np, "brcm,trx-magic", &trx_magic);
+ if (err != 0 && err != -EINVAL)
+ pr_err("failed to parse \"brcm,trx-magic\" DT attribute, using default: %d\n", err);
+
+ parts = kcalloc(TRX_PARSER_MAX_PARTS, sizeof(struct mtd_partition),
+ GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ err = mtd_read(mtd, 0, sizeof(trx), &bytes_read, (uint8_t *)&trx);
+ if (err) {
+ pr_err("MTD reading error: %d\n", err);
+ kfree(parts);
+ return err;
+ }
+
+ if (trx.magic != trx_magic) {
+ kfree(parts);
+ return -ENOENT;
+ }
+
+ /* We have LZMA loader if there is address in offset[2] */
+ if (trx.offset[2]) {
+ part = &parts[curr_part++];
+ part->name = "loader";
+ part->offset = trx.offset[i];
+ i++;
+ }
+
+ if (trx.offset[i]) {
+ part = &parts[curr_part++];
+ part->name = "linux";
+ part->offset = trx.offset[i];
+ i++;
+ }
+
+ if (trx.offset[i]) {
+ part = &parts[curr_part++];
+ part->name = parser_trx_data_part_name(mtd, trx.offset[i]);
+ part->offset = trx.offset[i];
+ i++;
+ }
+
+ /*
+ * Assume that every partition ends at the beginning of the one it is
+ * followed by.
+ */
+ for (i = 0; i < curr_part; i++) {
+ u64 next_part_offset = (i < curr_part - 1) ?
+ parts[i + 1].offset : mtd->size;
+
+ parts[i].size = next_part_offset - parts[i].offset;
+ }
+
+ *pparts = parts;
+ return i;
+};
+
+static const struct of_device_id mtd_parser_trx_of_match_table[] = {
+ { .compatible = "brcm,trx" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtd_parser_trx_of_match_table);
+
+static struct mtd_part_parser mtd_parser_trx = {
+ .parse_fn = parser_trx_parse,
+ .name = "trx",
+ .of_match_table = mtd_parser_trx_of_match_table,
+};
+module_mtd_part_parser(mtd_parser_trx);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Parser for TRX format partitions");
diff --git a/drivers/mtd/parsers/qcomsmempart.c b/drivers/mtd/parsers/qcomsmempart.c
new file mode 100644
index 0000000000..4311b89d8d
--- /dev/null
+++ b/drivers/mtd/parsers/qcomsmempart.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Qualcomm SMEM NAND flash partition parser
+ *
+ * Copyright (C) 2020, Linaro Ltd.
+ */
+
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smem.h>
+
+#define SMEM_AARM_PARTITION_TABLE 9
+#define SMEM_APPS 0
+
+#define SMEM_FLASH_PART_MAGIC1 0x55ee73aa
+#define SMEM_FLASH_PART_MAGIC2 0xe35ebddb
+#define SMEM_FLASH_PTABLE_V3 3
+#define SMEM_FLASH_PTABLE_V4 4
+#define SMEM_FLASH_PTABLE_MAX_PARTS_V3 16
+#define SMEM_FLASH_PTABLE_MAX_PARTS_V4 48
+#define SMEM_FLASH_PTABLE_HDR_LEN (4 * sizeof(u32))
+#define SMEM_FLASH_PTABLE_NAME_SIZE 16
+
+/**
+ * struct smem_flash_pentry - SMEM Flash partition entry
+ * @name: Name of the partition
+ * @offset: Offset in blocks
+ * @length: Length of the partition in blocks
+ * @attr: Flags for this partition
+ */
+struct smem_flash_pentry {
+ char name[SMEM_FLASH_PTABLE_NAME_SIZE];
+ __le32 offset;
+ __le32 length;
+ u8 attr;
+} __packed __aligned(4);
+
+/**
+ * struct smem_flash_ptable - SMEM Flash partition table
+ * @magic1: Partition table Magic 1
+ * @magic2: Partition table Magic 2
+ * @version: Partition table version
+ * @numparts: Number of partitions in this ptable
+ * @pentry: Flash partition entries belonging to this ptable
+ */
+struct smem_flash_ptable {
+ __le32 magic1;
+ __le32 magic2;
+ __le32 version;
+ __le32 numparts;
+ struct smem_flash_pentry pentry[SMEM_FLASH_PTABLE_MAX_PARTS_V4];
+} __packed __aligned(4);
+
+static int parse_qcomsmem_part(struct mtd_info *mtd,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ size_t len = SMEM_FLASH_PTABLE_HDR_LEN;
+ int ret, i, j, tmpparts, numparts = 0;
+ struct smem_flash_pentry *pentry;
+ struct smem_flash_ptable *ptable;
+ struct mtd_partition *parts;
+ char *name, *c;
+
+ if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS)
+ && mtd->type == MTD_NORFLASH) {
+ pr_err("%s: SMEM partition parser is incompatible with 4K sectors\n",
+ mtd->name);
+ return -EINVAL;
+ }
+
+ pr_debug("Parsing partition table info from SMEM\n");
+ ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len);
+ if (IS_ERR(ptable)) {
+ if (PTR_ERR(ptable) != -EPROBE_DEFER)
+ pr_err("Error reading partition table header\n");
+ return PTR_ERR(ptable);
+ }
+
+ /* Verify ptable magic */
+ if (le32_to_cpu(ptable->magic1) != SMEM_FLASH_PART_MAGIC1 ||
+ le32_to_cpu(ptable->magic2) != SMEM_FLASH_PART_MAGIC2) {
+ pr_err("Partition table magic verification failed\n");
+ return -EINVAL;
+ }
+
+ /* Ensure that # of partitions is less than the max we have allocated */
+ tmpparts = le32_to_cpu(ptable->numparts);
+ if (tmpparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) {
+ pr_err("Partition numbers exceed the max limit\n");
+ return -EINVAL;
+ }
+
+ /* Find out length of partition data based on table version */
+ if (le32_to_cpu(ptable->version) <= SMEM_FLASH_PTABLE_V3) {
+ len = SMEM_FLASH_PTABLE_HDR_LEN + SMEM_FLASH_PTABLE_MAX_PARTS_V3 *
+ sizeof(struct smem_flash_pentry);
+ } else if (le32_to_cpu(ptable->version) == SMEM_FLASH_PTABLE_V4) {
+ len = SMEM_FLASH_PTABLE_HDR_LEN + SMEM_FLASH_PTABLE_MAX_PARTS_V4 *
+ sizeof(struct smem_flash_pentry);
+ } else {
+ pr_err("Unknown ptable version (%d)", le32_to_cpu(ptable->version));
+ return -EINVAL;
+ }
+
+ /*
+ * Now that the partition table header has been parsed, verified
+ * and the length of the partition table calculated, read the
+ * complete partition table
+ */
+ ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len);
+ if (IS_ERR(ptable)) {
+ pr_err("Error reading partition table\n");
+ return PTR_ERR(ptable);
+ }
+
+ for (i = 0; i < tmpparts; i++) {
+ pentry = &ptable->pentry[i];
+ if (pentry->name[0] != '\0')
+ numparts++;
+ }
+
+ parts = kcalloc(numparts, sizeof(*parts), GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ for (i = 0, j = 0; i < tmpparts; i++) {
+ pentry = &ptable->pentry[i];
+ if (pentry->name[0] == '\0')
+ continue;
+
+ name = kstrdup(pentry->name, GFP_KERNEL);
+ if (!name) {
+ ret = -ENOMEM;
+ goto out_free_parts;
+ }
+
+ /* Convert name to lower case */
+ for (c = name; *c != '\0'; c++)
+ *c = tolower(*c);
+
+ parts[j].name = name;
+ parts[j].offset = le32_to_cpu(pentry->offset) * mtd->erasesize;
+ parts[j].mask_flags = pentry->attr;
+ parts[j].size = le32_to_cpu(pentry->length) * mtd->erasesize;
+ pr_debug("%d: %s offs=0x%08x size=0x%08x attr:0x%08x\n",
+ i, pentry->name, le32_to_cpu(pentry->offset),
+ le32_to_cpu(pentry->length), pentry->attr);
+ j++;
+ }
+
+ pr_debug("SMEM partition table found: ver: %d len: %d\n",
+ le32_to_cpu(ptable->version), tmpparts);
+ *pparts = parts;
+
+ return numparts;
+
+out_free_parts:
+ while (--j >= 0)
+ kfree(parts[j].name);
+ kfree(parts);
+ *pparts = NULL;
+
+ return ret;
+}
+
+static void parse_qcomsmem_cleanup(const struct mtd_partition *pparts,
+ int nr_parts)
+{
+ int i;
+
+ for (i = 0; i < nr_parts; i++)
+ kfree(pparts[i].name);
+
+ kfree(pparts);
+}
+
+static const struct of_device_id qcomsmem_of_match_table[] = {
+ { .compatible = "qcom,smem-part" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, qcomsmem_of_match_table);
+
+static struct mtd_part_parser mtd_parser_qcomsmem = {
+ .parse_fn = parse_qcomsmem_part,
+ .cleanup = parse_qcomsmem_cleanup,
+ .name = "qcomsmem",
+ .of_match_table = qcomsmem_of_match_table,
+};
+module_mtd_part_parser(mtd_parser_qcomsmem);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm SMEM NAND flash partition parser");
diff --git a/drivers/mtd/parsers/redboot.c b/drivers/mtd/parsers/redboot.c
new file mode 100644
index 0000000000..a16b42a885
--- /dev/null
+++ b/drivers/mtd/parsers/redboot.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Parse RedBoot-style Flash Image System (FIS) tables and
+ * produce a Linux partition array to match.
+ *
+ * Copyright © 2001 Red Hat UK Limited
+ * Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/module.h>
+
+struct fis_image_desc {
+ unsigned char name[16]; // Null terminated name
+ u32 flash_base; // Address within FLASH of image
+ u32 mem_base; // Address in memory where it executes
+ u32 size; // Length of image
+ u32 entry_point; // Execution entry point
+ u32 data_length; // Length of actual data
+ unsigned char _pad[256 - (16 + 7 * sizeof(u32))];
+ u32 desc_cksum; // Checksum over image descriptor
+ u32 file_cksum; // Checksum over image data
+};
+
+struct fis_list {
+ struct fis_image_desc *img;
+ struct fis_list *next;
+};
+
+static int directory = CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK;
+module_param(directory, int, 0);
+
+static inline int redboot_checksum(struct fis_image_desc *img)
+{
+ /* RedBoot doesn't actually write the desc_cksum field yet AFAICT */
+ return 1;
+}
+
+static void parse_redboot_of(struct mtd_info *master)
+{
+ struct device_node *np;
+ struct device_node *npart;
+ u32 dirblock;
+ int ret;
+
+ np = mtd_get_of_node(master);
+ if (!np)
+ return;
+
+ npart = of_get_child_by_name(np, "partitions");
+ if (!npart)
+ return;
+
+ ret = of_property_read_u32(npart, "fis-index-block", &dirblock);
+ of_node_put(npart);
+ if (ret)
+ return;
+
+ /*
+ * Assign the block found in the device tree to the local
+ * directory block pointer.
+ */
+ directory = dirblock;
+}
+
+static int parse_redboot_partitions(struct mtd_info *master,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ int nrparts = 0;
+ struct fis_image_desc *buf;
+ struct mtd_partition *parts;
+ struct fis_list *fl = NULL, *tmp_fl;
+ int ret, i;
+ size_t retlen;
+ char *names;
+ char *nullname;
+ int namelen = 0;
+ int nulllen = 0;
+ int numslots;
+ unsigned long offset;
+#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+ static char nullstring[] = "unallocated";
+#endif
+
+ parse_redboot_of(master);
+
+ if (directory < 0) {
+ offset = master->size + directory * master->erasesize;
+ while (mtd_block_isbad(master, offset)) {
+ if (!offset) {
+nogood:
+ pr_notice("Failed to find a non-bad block to check for RedBoot partition table\n");
+ return -EIO;
+ }
+ offset -= master->erasesize;
+ }
+ } else {
+ offset = directory * master->erasesize;
+ while (mtd_block_isbad(master, offset)) {
+ offset += master->erasesize;
+ if (offset == master->size)
+ goto nogood;
+ }
+ }
+ buf = vmalloc(master->erasesize);
+
+ if (!buf)
+ return -ENOMEM;
+
+ pr_notice("Searching for RedBoot partition table in %s at offset 0x%lx\n",
+ master->name, offset);
+
+ ret = mtd_read(master, offset, master->erasesize, &retlen,
+ (void *)buf);
+
+ if (ret)
+ goto out;
+
+ if (retlen != master->erasesize) {
+ ret = -EIO;
+ goto out;
+ }
+
+ numslots = (master->erasesize / sizeof(struct fis_image_desc));
+ for (i = 0; i < numslots; i++) {
+ if (!memcmp(buf[i].name, "FIS directory", 14)) {
+ /* This is apparently the FIS directory entry for the
+ * FIS directory itself. The FIS directory size is
+ * one erase block; if the buf[i].size field is
+ * swab32(erasesize) then we know we are looking at
+ * a byte swapped FIS directory - swap all the entries!
+ * (NOTE: this is 'size' not 'data_length'; size is
+ * the full size of the entry.)
+ */
+
+ /* RedBoot can combine the FIS directory and
+ config partitions into a single eraseblock;
+ we assume wrong-endian if either the swapped
+ 'size' matches the eraseblock size precisely,
+ or if the swapped size actually fits in an
+ eraseblock while the unswapped size doesn't. */
+ if (swab32(buf[i].size) == master->erasesize ||
+ (buf[i].size > master->erasesize
+ && swab32(buf[i].size) < master->erasesize)) {
+ int j;
+ /* Update numslots based on actual FIS directory size */
+ numslots = swab32(buf[i].size) / sizeof(struct fis_image_desc);
+ for (j = 0; j < numslots; ++j) {
+ /* A single 0xff denotes a deleted entry.
+ * Two of them in a row is the end of the table.
+ */
+ if (buf[j].name[0] == 0xff) {
+ if (buf[j].name[1] == 0xff) {
+ break;
+ } else {
+ continue;
+ }
+ }
+
+ /* The unsigned long fields were written with the
+ * wrong byte sex, name and pad have no byte sex.
+ */
+ swab32s(&buf[j].flash_base);
+ swab32s(&buf[j].mem_base);
+ swab32s(&buf[j].size);
+ swab32s(&buf[j].entry_point);
+ swab32s(&buf[j].data_length);
+ swab32s(&buf[j].desc_cksum);
+ swab32s(&buf[j].file_cksum);
+ }
+ } else if (buf[i].size < master->erasesize) {
+ /* Update numslots based on actual FIS directory size */
+ numslots = buf[i].size / sizeof(struct fis_image_desc);
+ }
+ break;
+ }
+ }
+ if (i == numslots) {
+ /* Didn't find it */
+ pr_notice("No RedBoot partition table detected in %s\n",
+ master->name);
+ ret = 0;
+ goto out;
+ }
+
+ for (i = 0; i < numslots; i++) {
+ struct fis_list *new_fl, **prev;
+
+ if (buf[i].name[0] == 0xff) {
+ if (buf[i].name[1] == 0xff) {
+ break;
+ } else {
+ continue;
+ }
+ }
+ if (!redboot_checksum(&buf[i]))
+ break;
+
+ new_fl = kmalloc(sizeof(struct fis_list), GFP_KERNEL);
+ namelen += strlen(buf[i].name) + 1;
+ if (!new_fl) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ new_fl->img = &buf[i];
+ if (data && data->origin)
+ buf[i].flash_base -= data->origin;
+ else
+ buf[i].flash_base &= master->size - 1;
+
+ /* I'm sure the JFFS2 code has done me permanent damage.
+ * I now think the following is _normal_
+ */
+ prev = &fl;
+ while (*prev && (*prev)->img->flash_base < new_fl->img->flash_base)
+ prev = &(*prev)->next;
+ new_fl->next = *prev;
+ *prev = new_fl;
+
+ nrparts++;
+ }
+#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+ if (fl->img->flash_base) {
+ nrparts++;
+ nulllen = sizeof(nullstring);
+ }
+
+ for (tmp_fl = fl; tmp_fl->next; tmp_fl = tmp_fl->next) {
+ if (tmp_fl->img->flash_base + tmp_fl->img->size + master->erasesize <= tmp_fl->next->img->flash_base) {
+ nrparts++;
+ nulllen = sizeof(nullstring);
+ }
+ }
+#endif
+ parts = kzalloc(sizeof(*parts) * nrparts + nulllen + namelen, GFP_KERNEL);
+
+ if (!parts) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ nullname = (char *)&parts[nrparts];
+#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+ if (nulllen > 0)
+ strcpy(nullname, nullstring);
+#endif
+ names = nullname + nulllen;
+
+ i = 0;
+
+#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+ if (fl->img->flash_base) {
+ parts[0].name = nullname;
+ parts[0].size = fl->img->flash_base;
+ parts[0].offset = 0;
+ i++;
+ }
+#endif
+ for ( ; i < nrparts; i++) {
+ parts[i].size = fl->img->size;
+ parts[i].offset = fl->img->flash_base;
+ parts[i].name = names;
+
+ strcpy(names, fl->img->name);
+#ifdef CONFIG_MTD_REDBOOT_PARTS_READONLY
+ if (!memcmp(names, "RedBoot", 8) ||
+ !memcmp(names, "RedBoot config", 15) ||
+ !memcmp(names, "FIS directory", 14)) {
+ parts[i].mask_flags = MTD_WRITEABLE;
+ }
+#endif
+ names += strlen(names) + 1;
+
+#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+ if (fl->next && fl->img->flash_base + fl->img->size + master->erasesize <= fl->next->img->flash_base) {
+ i++;
+ parts[i].offset = parts[i - 1].size + parts[i - 1].offset;
+ parts[i].size = fl->next->img->flash_base - parts[i].offset;
+ parts[i].name = nullname;
+ }
+#endif
+ tmp_fl = fl;
+ fl = fl->next;
+ kfree(tmp_fl);
+ }
+ ret = nrparts;
+ *pparts = parts;
+ out:
+ while (fl) {
+ struct fis_list *old = fl;
+
+ fl = fl->next;
+ kfree(old);
+ }
+ vfree(buf);
+ return ret;
+}
+
+static const struct of_device_id mtd_parser_redboot_of_match_table[] = {
+ { .compatible = "redboot-fis" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtd_parser_redboot_of_match_table);
+
+static struct mtd_part_parser redboot_parser = {
+ .parse_fn = parse_redboot_partitions,
+ .name = "RedBoot",
+ .of_match_table = mtd_parser_redboot_of_match_table,
+};
+module_mtd_part_parser(redboot_parser);
+
+/* mtd parsers will request the module by parser name */
+MODULE_ALIAS("RedBoot");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("Parsing code for RedBoot Flash Image System (FIS) tables");
diff --git a/drivers/mtd/parsers/scpart.c b/drivers/mtd/parsers/scpart.c
new file mode 100644
index 0000000000..6e5e11c370
--- /dev/null
+++ b/drivers/mtd/parsers/scpart.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * drivers/mtd/scpart.c: Sercomm Partition Parser
+ *
+ * Copyright (C) 2018 NOGUCHI Hiroshi
+ * Copyright (C) 2022 Mikhail Zhilkin
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/module.h>
+
+#define MOD_NAME "scpart"
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) MOD_NAME ": " fmt
+
+#define ID_ALREADY_FOUND 0xffffffffUL
+
+#define MAP_OFFS_IN_BLK 0x800
+#define MAP_MIRROR_NUM 2
+
+static const char sc_part_magic[] = {
+ 'S', 'C', 'F', 'L', 'M', 'A', 'P', 'O', 'K', '\0',
+};
+#define PART_MAGIC_LEN sizeof(sc_part_magic)
+
+/* assumes that all fields are set by CPU native endian */
+struct sc_part_desc {
+ uint32_t part_id;
+ uint32_t part_offs;
+ uint32_t part_bytes;
+};
+
+static uint32_t scpart_desc_is_valid(struct sc_part_desc *pdesc)
+{
+ return ((pdesc->part_id != 0xffffffffUL) &&
+ (pdesc->part_offs != 0xffffffffUL) &&
+ (pdesc->part_bytes != 0xffffffffUL));
+}
+
+static int scpart_scan_partmap(struct mtd_info *master, loff_t partmap_offs,
+ struct sc_part_desc **ppdesc)
+{
+ int cnt = 0;
+ int res = 0;
+ int res2;
+ uint32_t offs;
+ size_t retlen;
+ struct sc_part_desc *pdesc = NULL;
+ struct sc_part_desc *tmpdesc;
+ uint8_t *buf;
+
+ buf = kzalloc(master->erasesize, GFP_KERNEL);
+ if (!buf) {
+ res = -ENOMEM;
+ goto out;
+ }
+
+ res2 = mtd_read(master, partmap_offs, master->erasesize, &retlen, buf);
+ if (res2 || retlen != master->erasesize) {
+ res = -EIO;
+ goto free;
+ }
+
+ for (offs = MAP_OFFS_IN_BLK;
+ offs < master->erasesize - sizeof(*tmpdesc);
+ offs += sizeof(*tmpdesc)) {
+ tmpdesc = (struct sc_part_desc *)&buf[offs];
+ if (!scpart_desc_is_valid(tmpdesc))
+ break;
+ cnt++;
+ }
+
+ if (cnt > 0) {
+ int bytes = cnt * sizeof(*pdesc);
+
+ pdesc = kcalloc(cnt, sizeof(*pdesc), GFP_KERNEL);
+ if (!pdesc) {
+ res = -ENOMEM;
+ goto free;
+ }
+ memcpy(pdesc, &(buf[MAP_OFFS_IN_BLK]), bytes);
+
+ *ppdesc = pdesc;
+ res = cnt;
+ }
+
+free:
+ kfree(buf);
+
+out:
+ return res;
+}
+
+static int scpart_find_partmap(struct mtd_info *master,
+ struct sc_part_desc **ppdesc)
+{
+ int magic_found = 0;
+ int res = 0;
+ int res2;
+ loff_t offs = 0;
+ size_t retlen;
+ uint8_t rdbuf[PART_MAGIC_LEN];
+
+ while ((magic_found < MAP_MIRROR_NUM) &&
+ (offs < master->size) &&
+ !mtd_block_isbad(master, offs)) {
+ res2 = mtd_read(master, offs, PART_MAGIC_LEN, &retlen, rdbuf);
+ if (res2 || retlen != PART_MAGIC_LEN) {
+ res = -EIO;
+ goto out;
+ }
+ if (!memcmp(rdbuf, sc_part_magic, PART_MAGIC_LEN)) {
+ pr_debug("Signature found at 0x%llx\n", offs);
+ magic_found++;
+ res = scpart_scan_partmap(master, offs, ppdesc);
+ if (res > 0)
+ goto out;
+ }
+ offs += master->erasesize;
+ }
+
+out:
+ if (res > 0)
+ pr_info("Valid 'SC PART MAP' (%d partitions) found at 0x%llx\n", res, offs);
+ else
+ pr_info("No valid 'SC PART MAP' was found\n");
+
+ return res;
+}
+
+static int scpart_parse(struct mtd_info *master,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ const char *partname;
+ int n;
+ int nr_scparts;
+ int nr_parts = 0;
+ int res = 0;
+ struct sc_part_desc *scpart_map = NULL;
+ struct mtd_partition *parts = NULL;
+ struct device_node *mtd_node;
+ struct device_node *ofpart_node;
+ struct device_node *pp;
+
+ mtd_node = mtd_get_of_node(master);
+ if (!mtd_node) {
+ res = -ENOENT;
+ goto out;
+ }
+
+ ofpart_node = of_get_child_by_name(mtd_node, "partitions");
+ if (!ofpart_node) {
+ pr_info("%s: 'partitions' subnode not found on %pOF.\n",
+ master->name, mtd_node);
+ res = -ENOENT;
+ goto out;
+ }
+
+ nr_scparts = scpart_find_partmap(master, &scpart_map);
+ if (nr_scparts <= 0) {
+ pr_info("No any partitions was found in 'SC PART MAP'.\n");
+ res = -ENOENT;
+ goto free;
+ }
+
+ parts = kcalloc(of_get_child_count(ofpart_node), sizeof(*parts),
+ GFP_KERNEL);
+ if (!parts) {
+ res = -ENOMEM;
+ goto free;
+ }
+
+ for_each_child_of_node(ofpart_node, pp) {
+ u32 scpart_id;
+
+ if (of_property_read_u32(pp, "sercomm,scpart-id", &scpart_id))
+ continue;
+
+ for (n = 0 ; n < nr_scparts ; n++)
+ if ((scpart_map[n].part_id != ID_ALREADY_FOUND) &&
+ (scpart_id == scpart_map[n].part_id))
+ break;
+ if (n >= nr_scparts)
+ /* not match */
+ continue;
+
+ /* add the partition found in OF into MTD partition array */
+ parts[nr_parts].offset = scpart_map[n].part_offs;
+ parts[nr_parts].size = scpart_map[n].part_bytes;
+ parts[nr_parts].of_node = pp;
+
+ if (!of_property_read_string(pp, "label", &partname))
+ parts[nr_parts].name = partname;
+ if (of_property_read_bool(pp, "read-only"))
+ parts[nr_parts].mask_flags |= MTD_WRITEABLE;
+ if (of_property_read_bool(pp, "lock"))
+ parts[nr_parts].mask_flags |= MTD_POWERUP_LOCK;
+
+ /* mark as 'done' */
+ scpart_map[n].part_id = ID_ALREADY_FOUND;
+
+ nr_parts++;
+ }
+
+ if (nr_parts > 0) {
+ *pparts = parts;
+ res = nr_parts;
+ } else
+ pr_info("No partition in OF matches partition ID with 'SC PART MAP'.\n");
+
+ of_node_put(pp);
+
+free:
+ of_node_put(ofpart_node);
+ kfree(scpart_map);
+ if (res <= 0)
+ kfree(parts);
+
+out:
+ return res;
+}
+
+static const struct of_device_id scpart_parser_of_match_table[] = {
+ { .compatible = "sercomm,sc-partitions" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, scpart_parser_of_match_table);
+
+static struct mtd_part_parser scpart_parser = {
+ .parse_fn = scpart_parse,
+ .name = "scpart",
+ .of_match_table = scpart_parser_of_match_table,
+};
+module_mtd_part_parser(scpart_parser);
+
+/* mtd parsers will request the module by parser name */
+MODULE_ALIAS("scpart");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("NOGUCHI Hiroshi <drvlabo@gmail.com>");
+MODULE_AUTHOR("Mikhail Zhilkin <csharper2005@gmail.com>");
+MODULE_DESCRIPTION("Sercomm partition parser");
diff --git a/drivers/mtd/parsers/sharpslpart.c b/drivers/mtd/parsers/sharpslpart.c
new file mode 100644
index 0000000000..671a61845b
--- /dev/null
+++ b/drivers/mtd/parsers/sharpslpart.c
@@ -0,0 +1,399 @@
+/*
+ * sharpslpart.c - MTD partition parser for NAND flash using the SHARP FTL
+ * for logical addressing, as used on the PXA models of the SHARP SL Series.
+ *
+ * Copyright (C) 2017 Andrea Adami <andrea.adami@gmail.com>
+ *
+ * Based on SHARP GPL 2.4 sources:
+ * http://support.ezaurus.com/developer/source/source_dl.asp
+ * drivers/mtd/nand/sharp_sl_logical.c
+ * linux/include/asm-arm/sharp_nand_logical.h
+ *
+ * Copyright (C) 2002 SHARP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/sizes.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+/* oob structure */
+#define NAND_NOOB_LOGADDR_00 8
+#define NAND_NOOB_LOGADDR_01 9
+#define NAND_NOOB_LOGADDR_10 10
+#define NAND_NOOB_LOGADDR_11 11
+#define NAND_NOOB_LOGADDR_20 12
+#define NAND_NOOB_LOGADDR_21 13
+
+#define BLOCK_IS_RESERVED 0xffff
+#define BLOCK_UNMASK_COMPLEMENT 1
+
+/* factory defaults */
+#define SHARPSL_NAND_PARTS 3
+#define SHARPSL_FTL_PART_SIZE (7 * SZ_1M)
+#define SHARPSL_PARTINFO1_LADDR 0x00060000
+#define SHARPSL_PARTINFO2_LADDR 0x00064000
+
+#define BOOT_MAGIC 0x424f4f54
+#define FSRO_MAGIC 0x4653524f
+#define FSRW_MAGIC 0x46535257
+
+/**
+ * struct sharpsl_ftl - Sharp FTL Logical Table
+ * @logmax: number of logical blocks
+ * @log2phy: the logical-to-physical table
+ *
+ * Structure containing the logical-to-physical translation table
+ * used by the SHARP SL FTL.
+ */
+struct sharpsl_ftl {
+ unsigned int logmax;
+ unsigned int *log2phy;
+};
+
+/* verify that the OOB bytes 8 to 15 are free and available for the FTL */
+static int sharpsl_nand_check_ooblayout(struct mtd_info *mtd)
+{
+ u8 freebytes = 0;
+ int section = 0;
+
+ while (true) {
+ struct mtd_oob_region oobfree = { };
+ int ret, i;
+
+ ret = mtd_ooblayout_free(mtd, section++, &oobfree);
+ if (ret)
+ break;
+
+ if (!oobfree.length || oobfree.offset > 15 ||
+ (oobfree.offset + oobfree.length) < 8)
+ continue;
+
+ i = oobfree.offset >= 8 ? oobfree.offset : 8;
+ for (; i < oobfree.offset + oobfree.length && i < 16; i++)
+ freebytes |= BIT(i - 8);
+
+ if (freebytes == 0xff)
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int sharpsl_nand_read_oob(struct mtd_info *mtd, loff_t offs, u8 *buf)
+{
+ struct mtd_oob_ops ops = { };
+ int ret;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooblen = mtd->oobsize;
+ ops.oobbuf = buf;
+
+ ret = mtd_read_oob(mtd, offs, &ops);
+ if (ret != 0 || mtd->oobsize != ops.oobretlen)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * The logical block number assigned to a physical block is stored in the OOB
+ * of the first page, in 3 16-bit copies with the following layout:
+ *
+ * 01234567 89abcdef
+ * -------- --------
+ * ECC BB xyxyxy
+ *
+ * When reading we check that the first two copies agree.
+ * In case of error, matching is tried using the following pairs.
+ * Reserved values 0xffff mean the block is kept for wear leveling.
+ *
+ * 01234567 89abcdef
+ * -------- --------
+ * ECC BB xyxy oob[8]==oob[10] && oob[9]==oob[11] -> byte0=8 byte1=9
+ * ECC BB xyxy oob[10]==oob[12] && oob[11]==oob[13] -> byte0=10 byte1=11
+ * ECC BB xy xy oob[12]==oob[8] && oob[13]==oob[9] -> byte0=12 byte1=13
+ */
+static int sharpsl_nand_get_logical_num(u8 *oob)
+{
+ u16 us;
+ int good0, good1;
+
+ if (oob[NAND_NOOB_LOGADDR_00] == oob[NAND_NOOB_LOGADDR_10] &&
+ oob[NAND_NOOB_LOGADDR_01] == oob[NAND_NOOB_LOGADDR_11]) {
+ good0 = NAND_NOOB_LOGADDR_00;
+ good1 = NAND_NOOB_LOGADDR_01;
+ } else if (oob[NAND_NOOB_LOGADDR_10] == oob[NAND_NOOB_LOGADDR_20] &&
+ oob[NAND_NOOB_LOGADDR_11] == oob[NAND_NOOB_LOGADDR_21]) {
+ good0 = NAND_NOOB_LOGADDR_10;
+ good1 = NAND_NOOB_LOGADDR_11;
+ } else if (oob[NAND_NOOB_LOGADDR_20] == oob[NAND_NOOB_LOGADDR_00] &&
+ oob[NAND_NOOB_LOGADDR_21] == oob[NAND_NOOB_LOGADDR_01]) {
+ good0 = NAND_NOOB_LOGADDR_20;
+ good1 = NAND_NOOB_LOGADDR_21;
+ } else {
+ return -EINVAL;
+ }
+
+ us = oob[good0] | oob[good1] << 8;
+
+ /* parity check */
+ if (hweight16(us) & BLOCK_UNMASK_COMPLEMENT)
+ return -EINVAL;
+
+ /* reserved */
+ if (us == BLOCK_IS_RESERVED)
+ return BLOCK_IS_RESERVED;
+
+ return (us >> 1) & GENMASK(9, 0);
+}
+
+static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl)
+{
+ unsigned int block_num, phymax;
+ int i, ret, log_num;
+ loff_t block_adr;
+ u8 *oob;
+
+ oob = kzalloc(mtd->oobsize, GFP_KERNEL);
+ if (!oob)
+ return -ENOMEM;
+
+ phymax = mtd_div_by_eb(SHARPSL_FTL_PART_SIZE, mtd);
+
+ /* FTL reserves 5% of the blocks + 1 spare */
+ ftl->logmax = ((phymax * 95) / 100) - 1;
+
+ ftl->log2phy = kmalloc_array(ftl->logmax, sizeof(*ftl->log2phy),
+ GFP_KERNEL);
+ if (!ftl->log2phy) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ /* initialize ftl->log2phy */
+ for (i = 0; i < ftl->logmax; i++)
+ ftl->log2phy[i] = UINT_MAX;
+
+ /* create physical-logical table */
+ for (block_num = 0; block_num < phymax; block_num++) {
+ block_adr = (loff_t)block_num * mtd->erasesize;
+
+ if (mtd_block_isbad(mtd, block_adr))
+ continue;
+
+ if (sharpsl_nand_read_oob(mtd, block_adr, oob))
+ continue;
+
+ /* get logical block */
+ log_num = sharpsl_nand_get_logical_num(oob);
+
+ /* cut-off errors and skip the out-of-range values */
+ if (log_num > 0 && log_num < ftl->logmax) {
+ if (ftl->log2phy[log_num] == UINT_MAX)
+ ftl->log2phy[log_num] = block_num;
+ }
+ }
+
+ pr_info("Sharp SL FTL: %d blocks used (%d logical, %d reserved)\n",
+ phymax, ftl->logmax, phymax - ftl->logmax);
+
+ ret = 0;
+exit:
+ kfree(oob);
+ return ret;
+}
+
+static void sharpsl_nand_cleanup_ftl(struct sharpsl_ftl *ftl)
+{
+ kfree(ftl->log2phy);
+}
+
+static int sharpsl_nand_read_laddr(struct mtd_info *mtd,
+ loff_t from,
+ size_t len,
+ void *buf,
+ struct sharpsl_ftl *ftl)
+{
+ unsigned int log_num, final_log_num;
+ unsigned int block_num;
+ loff_t block_adr;
+ loff_t block_ofs;
+ size_t retlen;
+ int err;
+
+ log_num = mtd_div_by_eb((u32)from, mtd);
+ final_log_num = mtd_div_by_eb(((u32)from + len - 1), mtd);
+
+ if (len <= 0 || log_num >= ftl->logmax || final_log_num > log_num)
+ return -EINVAL;
+
+ block_num = ftl->log2phy[log_num];
+ block_adr = (loff_t)block_num * mtd->erasesize;
+ block_ofs = mtd_mod_by_eb((u32)from, mtd);
+
+ err = mtd_read(mtd, block_adr + block_ofs, len, &retlen, buf);
+ /* Ignore corrected ECC errors */
+ if (mtd_is_bitflip(err))
+ err = 0;
+
+ if (!err && retlen != len)
+ err = -EIO;
+
+ if (err)
+ pr_err("sharpslpart: error, read failed at %#llx\n",
+ block_adr + block_ofs);
+
+ return err;
+}
+
+/*
+ * MTD Partition Parser
+ *
+ * Sample values read from SL-C860
+ *
+ * # cat /proc/mtd
+ * dev: size erasesize name
+ * mtd0: 006d0000 00020000 "Filesystem"
+ * mtd1: 00700000 00004000 "smf"
+ * mtd2: 03500000 00004000 "root"
+ * mtd3: 04400000 00004000 "home"
+ *
+ * PARTITIONINFO1
+ * 0x00060000: 00 00 00 00 00 00 70 00 42 4f 4f 54 00 00 00 00 ......p.BOOT....
+ * 0x00060010: 00 00 70 00 00 00 c0 03 46 53 52 4f 00 00 00 00 ..p.....FSRO....
+ * 0x00060020: 00 00 c0 03 00 00 00 04 46 53 52 57 00 00 00 00 ........FSRW....
+ */
+struct sharpsl_nand_partinfo {
+ __le32 start;
+ __le32 end;
+ __be32 magic;
+ u32 reserved;
+};
+
+static int sharpsl_nand_read_partinfo(struct mtd_info *master,
+ loff_t from,
+ size_t len,
+ struct sharpsl_nand_partinfo *buf,
+ struct sharpsl_ftl *ftl)
+{
+ int ret;
+
+ ret = sharpsl_nand_read_laddr(master, from, len, buf, ftl);
+ if (ret)
+ return ret;
+
+ /* check for magics */
+ if (be32_to_cpu(buf[0].magic) != BOOT_MAGIC ||
+ be32_to_cpu(buf[1].magic) != FSRO_MAGIC ||
+ be32_to_cpu(buf[2].magic) != FSRW_MAGIC) {
+ pr_err("sharpslpart: magic values mismatch\n");
+ return -EINVAL;
+ }
+
+ /* fixup for hardcoded value 64 MiB (for older models) */
+ buf[2].end = cpu_to_le32(master->size);
+
+ /* extra sanity check */
+ if (le32_to_cpu(buf[0].end) <= le32_to_cpu(buf[0].start) ||
+ le32_to_cpu(buf[1].start) < le32_to_cpu(buf[0].end) ||
+ le32_to_cpu(buf[1].end) <= le32_to_cpu(buf[1].start) ||
+ le32_to_cpu(buf[2].start) < le32_to_cpu(buf[1].end) ||
+ le32_to_cpu(buf[2].end) <= le32_to_cpu(buf[2].start)) {
+ pr_err("sharpslpart: partition sizes mismatch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sharpsl_parse_mtd_partitions(struct mtd_info *master,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct sharpsl_ftl ftl;
+ struct sharpsl_nand_partinfo buf[SHARPSL_NAND_PARTS];
+ struct mtd_partition *sharpsl_nand_parts;
+ int err;
+
+ /* check that OOB bytes 8 to 15 used by the FTL are actually free */
+ err = sharpsl_nand_check_ooblayout(master);
+ if (err)
+ return err;
+
+ /* init logical mgmt (FTL) */
+ err = sharpsl_nand_init_ftl(master, &ftl);
+ if (err)
+ return err;
+
+ /* read and validate first partition table */
+ pr_info("sharpslpart: try reading first partition table\n");
+ err = sharpsl_nand_read_partinfo(master,
+ SHARPSL_PARTINFO1_LADDR,
+ sizeof(buf), buf, &ftl);
+ if (err) {
+ /* fallback: read second partition table */
+ pr_warn("sharpslpart: first partition table is invalid, retry using the second\n");
+ err = sharpsl_nand_read_partinfo(master,
+ SHARPSL_PARTINFO2_LADDR,
+ sizeof(buf), buf, &ftl);
+ }
+
+ /* cleanup logical mgmt (FTL) */
+ sharpsl_nand_cleanup_ftl(&ftl);
+
+ if (err) {
+ pr_err("sharpslpart: both partition tables are invalid\n");
+ return err;
+ }
+
+ sharpsl_nand_parts = kcalloc(SHARPSL_NAND_PARTS,
+ sizeof(*sharpsl_nand_parts),
+ GFP_KERNEL);
+ if (!sharpsl_nand_parts)
+ return -ENOMEM;
+
+ /* original names */
+ sharpsl_nand_parts[0].name = "smf";
+ sharpsl_nand_parts[0].offset = le32_to_cpu(buf[0].start);
+ sharpsl_nand_parts[0].size = le32_to_cpu(buf[0].end) -
+ le32_to_cpu(buf[0].start);
+
+ sharpsl_nand_parts[1].name = "root";
+ sharpsl_nand_parts[1].offset = le32_to_cpu(buf[1].start);
+ sharpsl_nand_parts[1].size = le32_to_cpu(buf[1].end) -
+ le32_to_cpu(buf[1].start);
+
+ sharpsl_nand_parts[2].name = "home";
+ sharpsl_nand_parts[2].offset = le32_to_cpu(buf[2].start);
+ sharpsl_nand_parts[2].size = le32_to_cpu(buf[2].end) -
+ le32_to_cpu(buf[2].start);
+
+ *pparts = sharpsl_nand_parts;
+ return SHARPSL_NAND_PARTS;
+}
+
+static struct mtd_part_parser sharpsl_mtd_parser = {
+ .parse_fn = sharpsl_parse_mtd_partitions,
+ .name = "sharpslpart",
+};
+module_mtd_part_parser(sharpsl_mtd_parser);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrea Adami <andrea.adami@gmail.com>");
+MODULE_DESCRIPTION("MTD partitioning for NAND flash on Sharp SL Series");
diff --git a/drivers/mtd/parsers/tplink_safeloader.c b/drivers/mtd/parsers/tplink_safeloader.c
new file mode 100644
index 0000000000..1c689dafca
--- /dev/null
+++ b/drivers/mtd/parsers/tplink_safeloader.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2022 Rafał Miłecki <rafal@milecki.pl>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#define TPLINK_SAFELOADER_DATA_OFFSET 4
+#define TPLINK_SAFELOADER_MAX_PARTS 32
+
+struct safeloader_cmn_header {
+ __be32 size;
+ uint32_t unused;
+} __packed;
+
+static void *mtd_parser_tplink_safeloader_read_table(struct mtd_info *mtd)
+{
+ struct safeloader_cmn_header hdr;
+ struct device_node *np;
+ size_t bytes_read;
+ size_t size;
+ u32 offset;
+ char *buf;
+ int err;
+
+ np = mtd_get_of_node(mtd);
+ if (mtd_is_partition(mtd))
+ of_node_get(np);
+ else
+ np = of_get_child_by_name(np, "partitions");
+
+ if (of_property_read_u32(np, "partitions-table-offset", &offset)) {
+ pr_err("Failed to get partitions table offset\n");
+ goto err_put;
+ }
+
+ err = mtd_read(mtd, offset, sizeof(hdr), &bytes_read, (uint8_t *)&hdr);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("Failed to read from %s at 0x%x\n", mtd->name, offset);
+ goto err_put;
+ }
+
+ size = be32_to_cpu(hdr.size);
+
+ buf = kmalloc(size + 1, GFP_KERNEL);
+ if (!buf)
+ goto err_put;
+
+ err = mtd_read(mtd, offset + sizeof(hdr), size, &bytes_read, buf);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("Failed to read from %s at 0x%zx\n", mtd->name, offset + sizeof(hdr));
+ goto err_kfree;
+ }
+
+ buf[size] = '\0';
+
+ of_node_put(np);
+
+ return buf;
+
+err_kfree:
+ kfree(buf);
+err_put:
+ of_node_put(np);
+ return NULL;
+}
+
+static int mtd_parser_tplink_safeloader_parse(struct mtd_info *mtd,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct mtd_partition *parts;
+ char name[65];
+ size_t offset;
+ size_t bytes;
+ char *buf;
+ int idx;
+ int err;
+
+ parts = kcalloc(TPLINK_SAFELOADER_MAX_PARTS, sizeof(*parts), GFP_KERNEL);
+ if (!parts) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ buf = mtd_parser_tplink_safeloader_read_table(mtd);
+ if (!buf) {
+ err = -ENOENT;
+ goto err_free_parts;
+ }
+
+ for (idx = 0, offset = TPLINK_SAFELOADER_DATA_OFFSET;
+ idx < TPLINK_SAFELOADER_MAX_PARTS &&
+ sscanf(buf + offset, "partition %64s base 0x%llx size 0x%llx%zn\n",
+ name, &parts[idx].offset, &parts[idx].size, &bytes) == 3;
+ idx++, offset += bytes + 1) {
+ parts[idx].name = kstrdup(name, GFP_KERNEL);
+ if (!parts[idx].name) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+ }
+
+ if (idx == TPLINK_SAFELOADER_MAX_PARTS)
+ pr_warn("Reached maximum number of partitions!\n");
+
+ kfree(buf);
+
+ *pparts = parts;
+
+ return idx;
+
+err_free:
+ for (idx -= 1; idx >= 0; idx--)
+ kfree(parts[idx].name);
+err_free_parts:
+ kfree(parts);
+err_out:
+ return err;
+};
+
+static void mtd_parser_tplink_safeloader_cleanup(const struct mtd_partition *pparts,
+ int nr_parts)
+{
+ int i;
+
+ for (i = 0; i < nr_parts; i++)
+ kfree(pparts[i].name);
+
+ kfree(pparts);
+}
+
+static const struct of_device_id mtd_parser_tplink_safeloader_of_match_table[] = {
+ { .compatible = "tplink,safeloader-partitions" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtd_parser_tplink_safeloader_of_match_table);
+
+static struct mtd_part_parser mtd_parser_tplink_safeloader = {
+ .parse_fn = mtd_parser_tplink_safeloader_parse,
+ .cleanup = mtd_parser_tplink_safeloader_cleanup,
+ .name = "tplink-safeloader",
+ .of_match_table = mtd_parser_tplink_safeloader_of_match_table,
+};
+module_mtd_part_parser(mtd_parser_tplink_safeloader);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
new file mode 100644
index 0000000000..c546f8c5f2
--- /dev/null
+++ b/drivers/mtd/rfd_ftl.c
@@ -0,0 +1,830 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * rfd_ftl.c -- resident flash disk (flash translation layer)
+ *
+ * Copyright © 2005 Sean Young <sean@mess.org>
+ *
+ * This type of flash translation layer (FTL) is used by the Embedded BIOS
+ * by General Software. It is known as the Resident Flash Disk (RFD), see:
+ *
+ * http://www.gensw.com/pages/prod/bios/rfd.htm
+ *
+ * based on ftl.c
+ */
+
+#include <linux/hdreg.h>
+#include <linux/init.h>
+#include <linux/mtd/blktrans.h>
+#include <linux/mtd/mtd.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+
+#include <asm/types.h>
+
+static int block_size = 0;
+module_param(block_size, int, 0);
+MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
+
+#define PREFIX "rfd_ftl: "
+
+/* This major has been assigned by device@lanana.org */
+#ifndef RFD_FTL_MAJOR
+#define RFD_FTL_MAJOR 256
+#endif
+
+/* Maximum number of partitions in an FTL region */
+#define PART_BITS 4
+
+/* An erase unit should start with this value */
+#define RFD_MAGIC 0x9193
+
+/* the second value is 0xffff or 0xffc8; function unknown */
+
+/* the third value is always 0xffff, ignored */
+
+/* next is an array of mapping for each corresponding sector */
+#define HEADER_MAP_OFFSET 3
+#define SECTOR_DELETED 0x0000
+#define SECTOR_ZERO 0xfffe
+#define SECTOR_FREE 0xffff
+
+#define SECTOR_SIZE 512
+
+#define SECTORS_PER_TRACK 63
+
+struct block {
+ enum {
+ BLOCK_OK,
+ BLOCK_ERASING,
+ BLOCK_ERASED,
+ BLOCK_UNUSED,
+ BLOCK_FAILED
+ } state;
+ int free_sectors;
+ int used_sectors;
+ int erases;
+ u_long offset;
+};
+
+struct partition {
+ struct mtd_blktrans_dev mbd;
+
+ u_int block_size; /* size of erase unit */
+ u_int total_blocks; /* number of erase units */
+ u_int header_sectors_per_block; /* header sectors in erase unit */
+ u_int data_sectors_per_block; /* data sectors in erase unit */
+ u_int sector_count; /* sectors in translated disk */
+ u_int header_size; /* bytes in header sector */
+ int reserved_block; /* block next up for reclaim */
+ int current_block; /* block to write to */
+ u16 *header_cache; /* cached header */
+
+ int is_reclaiming;
+ int cylinders;
+ int errors;
+ u_long *sector_map;
+ struct block *blocks;
+};
+
+static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf);
+
+static int build_block_map(struct partition *part, int block_no)
+{
+ struct block *block = &part->blocks[block_no];
+ int i;
+
+ block->offset = part->block_size * block_no;
+
+ if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
+ block->state = BLOCK_UNUSED;
+ return -ENOENT;
+ }
+
+ block->state = BLOCK_OK;
+
+ for (i=0; i<part->data_sectors_per_block; i++) {
+ u16 entry;
+
+ entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
+
+ if (entry == SECTOR_DELETED)
+ continue;
+
+ if (entry == SECTOR_FREE) {
+ block->free_sectors++;
+ continue;
+ }
+
+ if (entry == SECTOR_ZERO)
+ entry = 0;
+
+ if (entry >= part->sector_count) {
+ printk(KERN_WARNING PREFIX
+ "'%s': unit #%d: entry %d corrupt, "
+ "sector %d out of range\n",
+ part->mbd.mtd->name, block_no, i, entry);
+ continue;
+ }
+
+ if (part->sector_map[entry] != -1) {
+ printk(KERN_WARNING PREFIX
+ "'%s': more than one entry for sector %d\n",
+ part->mbd.mtd->name, entry);
+ part->errors = 1;
+ continue;
+ }
+
+ part->sector_map[entry] = block->offset +
+ (i + part->header_sectors_per_block) * SECTOR_SIZE;
+
+ block->used_sectors++;
+ }
+
+ if (block->free_sectors == part->data_sectors_per_block)
+ part->reserved_block = block_no;
+
+ return 0;
+}
+
+static int scan_header(struct partition *part)
+{
+ int sectors_per_block;
+ int i, rc = -ENOMEM;
+ int blocks_found;
+ size_t retlen;
+
+ sectors_per_block = part->block_size / SECTOR_SIZE;
+ part->total_blocks = (u32)part->mbd.mtd->size / part->block_size;
+
+ if (part->total_blocks < 2)
+ return -ENOENT;
+
+ /* each erase block has three bytes header, followed by the map */
+ part->header_sectors_per_block =
+ ((HEADER_MAP_OFFSET + sectors_per_block) *
+ sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE;
+
+ part->data_sectors_per_block = sectors_per_block -
+ part->header_sectors_per_block;
+
+ part->header_size = (HEADER_MAP_OFFSET +
+ part->data_sectors_per_block) * sizeof(u16);
+
+ part->cylinders = (part->data_sectors_per_block *
+ (part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
+
+ part->sector_count = part->cylinders * SECTORS_PER_TRACK;
+
+ part->current_block = -1;
+ part->reserved_block = -1;
+ part->is_reclaiming = 0;
+
+ part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
+ if (!part->header_cache)
+ goto err;
+
+ part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
+ GFP_KERNEL);
+ if (!part->blocks)
+ goto err;
+
+ part->sector_map = vmalloc(array_size(sizeof(u_long),
+ part->sector_count));
+ if (!part->sector_map)
+ goto err;
+
+ for (i=0; i<part->sector_count; i++)
+ part->sector_map[i] = -1;
+
+ for (i=0, blocks_found=0; i<part->total_blocks; i++) {
+ rc = mtd_read(part->mbd.mtd, i * part->block_size,
+ part->header_size, &retlen,
+ (u_char *)part->header_cache);
+
+ if (!rc && retlen != part->header_size)
+ rc = -EIO;
+
+ if (rc)
+ goto err;
+
+ if (!build_block_map(part, i))
+ blocks_found++;
+ }
+
+ if (blocks_found == 0) {
+ printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n",
+ part->mbd.mtd->name);
+ rc = -ENOENT;
+ goto err;
+ }
+
+ if (part->reserved_block == -1) {
+ printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n",
+ part->mbd.mtd->name);
+
+ part->errors = 1;
+ }
+
+ return 0;
+
+err:
+ vfree(part->sector_map);
+ kfree(part->header_cache);
+ kfree(part->blocks);
+
+ return rc;
+}
+
+static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
+{
+ struct partition *part = container_of(dev, struct partition, mbd);
+ u_long addr;
+ size_t retlen;
+ int rc;
+
+ if (sector >= part->sector_count)
+ return -EIO;
+
+ addr = part->sector_map[sector];
+ if (addr != -1) {
+ rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
+ (u_char *)buf);
+ if (!rc && retlen != SECTOR_SIZE)
+ rc = -EIO;
+
+ if (rc) {
+ printk(KERN_WARNING PREFIX "error reading '%s' at "
+ "0x%lx\n", part->mbd.mtd->name, addr);
+ return rc;
+ }
+ } else
+ memset(buf, 0, SECTOR_SIZE);
+
+ return 0;
+}
+
+static int erase_block(struct partition *part, int block)
+{
+ struct erase_info *erase;
+ int rc;
+
+ erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
+ if (!erase)
+ return -ENOMEM;
+
+ erase->addr = part->blocks[block].offset;
+ erase->len = part->block_size;
+
+ part->blocks[block].state = BLOCK_ERASING;
+ part->blocks[block].free_sectors = 0;
+
+ rc = mtd_erase(part->mbd.mtd, erase);
+ if (rc) {
+ printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
+ "failed\n", (unsigned long long)erase->addr,
+ (unsigned long long)erase->len, part->mbd.mtd->name);
+ part->blocks[block].state = BLOCK_FAILED;
+ part->blocks[block].free_sectors = 0;
+ part->blocks[block].used_sectors = 0;
+ } else {
+ u16 magic = cpu_to_le16(RFD_MAGIC);
+ size_t retlen;
+
+ part->blocks[block].state = BLOCK_ERASED;
+ part->blocks[block].free_sectors = part->data_sectors_per_block;
+ part->blocks[block].used_sectors = 0;
+ part->blocks[block].erases++;
+
+ rc = mtd_write(part->mbd.mtd, part->blocks[block].offset,
+ sizeof(magic), &retlen, (u_char *)&magic);
+ if (!rc && retlen != sizeof(magic))
+ rc = -EIO;
+
+ if (rc) {
+ pr_err(PREFIX "'%s': unable to write RFD header at 0x%lx\n",
+ part->mbd.mtd->name, part->blocks[block].offset);
+ part->blocks[block].state = BLOCK_FAILED;
+ } else {
+ part->blocks[block].state = BLOCK_OK;
+ }
+ }
+
+ kfree(erase);
+
+ return rc;
+}
+
+static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
+{
+ void *sector_data;
+ u16 *map;
+ size_t retlen;
+ int i, rc = -ENOMEM;
+
+ part->is_reclaiming = 1;
+
+ sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL);
+ if (!sector_data)
+ goto err3;
+
+ map = kmalloc(part->header_size, GFP_KERNEL);
+ if (!map)
+ goto err2;
+
+ rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset,
+ part->header_size, &retlen, (u_char *)map);
+
+ if (!rc && retlen != part->header_size)
+ rc = -EIO;
+
+ if (rc) {
+ printk(KERN_ERR PREFIX "error reading '%s' at "
+ "0x%lx\n", part->mbd.mtd->name,
+ part->blocks[block_no].offset);
+
+ goto err;
+ }
+
+ for (i=0; i<part->data_sectors_per_block; i++) {
+ u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]);
+ u_long addr;
+
+
+ if (entry == SECTOR_FREE || entry == SECTOR_DELETED)
+ continue;
+
+ if (entry == SECTOR_ZERO)
+ entry = 0;
+
+ /* already warned about and ignored in build_block_map() */
+ if (entry >= part->sector_count)
+ continue;
+
+ addr = part->blocks[block_no].offset +
+ (i + part->header_sectors_per_block) * SECTOR_SIZE;
+
+ if (*old_sector == addr) {
+ *old_sector = -1;
+ if (!part->blocks[block_no].used_sectors--) {
+ rc = erase_block(part, block_no);
+ break;
+ }
+ continue;
+ }
+ rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
+ sector_data);
+
+ if (!rc && retlen != SECTOR_SIZE)
+ rc = -EIO;
+
+ if (rc) {
+ printk(KERN_ERR PREFIX "'%s': Unable to "
+ "read sector for relocation\n",
+ part->mbd.mtd->name);
+
+ goto err;
+ }
+
+ rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
+ entry, sector_data);
+
+ if (rc)
+ goto err;
+ }
+
+err:
+ kfree(map);
+err2:
+ kfree(sector_data);
+err3:
+ part->is_reclaiming = 0;
+
+ return rc;
+}
+
+static int reclaim_block(struct partition *part, u_long *old_sector)
+{
+ int block, best_block, score, old_sector_block;
+ int rc;
+
+ /* we have a race if sync doesn't exist */
+ mtd_sync(part->mbd.mtd);
+
+ score = 0x7fffffff; /* MAX_INT */
+ best_block = -1;
+ if (*old_sector != -1)
+ old_sector_block = *old_sector / part->block_size;
+ else
+ old_sector_block = -1;
+
+ for (block=0; block<part->total_blocks; block++) {
+ int this_score;
+
+ if (block == part->reserved_block)
+ continue;
+
+ /*
+ * Postpone reclaiming if there is a free sector as
+ * more removed sectors is more efficient (have to move
+ * less).
+ */
+ if (part->blocks[block].free_sectors)
+ return 0;
+
+ this_score = part->blocks[block].used_sectors;
+
+ if (block == old_sector_block)
+ this_score--;
+ else {
+ /* no point in moving a full block */
+ if (part->blocks[block].used_sectors ==
+ part->data_sectors_per_block)
+ continue;
+ }
+
+ this_score += part->blocks[block].erases;
+
+ if (this_score < score) {
+ best_block = block;
+ score = this_score;
+ }
+ }
+
+ if (best_block == -1)
+ return -ENOSPC;
+
+ part->current_block = -1;
+ part->reserved_block = best_block;
+
+ pr_debug("reclaim_block: reclaiming block #%d with %d used "
+ "%d free sectors\n", best_block,
+ part->blocks[best_block].used_sectors,
+ part->blocks[best_block].free_sectors);
+
+ if (part->blocks[best_block].used_sectors)
+ rc = move_block_contents(part, best_block, old_sector);
+ else
+ rc = erase_block(part, best_block);
+
+ return rc;
+}
+
+/*
+ * IMPROVE: It would be best to choose the block with the most deleted sectors,
+ * because if we fill that one up first it'll have the most chance of having
+ * the least live sectors at reclaim.
+ */
+static int find_free_block(struct partition *part)
+{
+ int block, stop;
+
+ block = part->current_block == -1 ?
+ jiffies % part->total_blocks : part->current_block;
+ stop = block;
+
+ do {
+ if (part->blocks[block].free_sectors &&
+ block != part->reserved_block)
+ return block;
+
+ if (part->blocks[block].state == BLOCK_UNUSED)
+ erase_block(part, block);
+
+ if (++block >= part->total_blocks)
+ block = 0;
+
+ } while (block != stop);
+
+ return -1;
+}
+
+static int find_writable_block(struct partition *part, u_long *old_sector)
+{
+ int rc, block;
+ size_t retlen;
+
+ block = find_free_block(part);
+
+ if (block == -1) {
+ if (!part->is_reclaiming) {
+ rc = reclaim_block(part, old_sector);
+ if (rc)
+ goto err;
+
+ block = find_free_block(part);
+ }
+
+ if (block == -1) {
+ rc = -ENOSPC;
+ goto err;
+ }
+ }
+
+ rc = mtd_read(part->mbd.mtd, part->blocks[block].offset,
+ part->header_size, &retlen,
+ (u_char *)part->header_cache);
+
+ if (!rc && retlen != part->header_size)
+ rc = -EIO;
+
+ if (rc) {
+ printk(KERN_ERR PREFIX "'%s': unable to read header at "
+ "0x%lx\n", part->mbd.mtd->name,
+ part->blocks[block].offset);
+ goto err;
+ }
+
+ part->current_block = block;
+
+err:
+ return rc;
+}
+
+static int mark_sector_deleted(struct partition *part, u_long old_addr)
+{
+ int block, offset, rc;
+ u_long addr;
+ size_t retlen;
+ u16 del = cpu_to_le16(SECTOR_DELETED);
+
+ block = old_addr / part->block_size;
+ offset = (old_addr % part->block_size) / SECTOR_SIZE -
+ part->header_sectors_per_block;
+
+ addr = part->blocks[block].offset +
+ (HEADER_MAP_OFFSET + offset) * sizeof(u16);
+ rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen,
+ (u_char *)&del);
+
+ if (!rc && retlen != sizeof(del))
+ rc = -EIO;
+
+ if (rc) {
+ printk(KERN_ERR PREFIX "error writing '%s' at "
+ "0x%lx\n", part->mbd.mtd->name, addr);
+ goto err;
+ }
+ if (block == part->current_block)
+ part->header_cache[offset + HEADER_MAP_OFFSET] = del;
+
+ part->blocks[block].used_sectors--;
+
+ if (!part->blocks[block].used_sectors &&
+ !part->blocks[block].free_sectors)
+ rc = erase_block(part, block);
+
+err:
+ return rc;
+}
+
+static int find_free_sector(const struct partition *part, const struct block *block)
+{
+ int i, stop;
+
+ i = stop = part->data_sectors_per_block - block->free_sectors;
+
+ do {
+ if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
+ == SECTOR_FREE)
+ return i;
+
+ if (++i == part->data_sectors_per_block)
+ i = 0;
+ }
+ while(i != stop);
+
+ return -1;
+}
+
+static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr)
+{
+ struct partition *part = container_of(dev, struct partition, mbd);
+ struct block *block;
+ u_long addr;
+ int i;
+ int rc;
+ size_t retlen;
+ u16 entry;
+
+ if (part->current_block == -1 ||
+ !part->blocks[part->current_block].free_sectors) {
+
+ rc = find_writable_block(part, old_addr);
+ if (rc)
+ goto err;
+ }
+
+ block = &part->blocks[part->current_block];
+
+ i = find_free_sector(part, block);
+
+ if (i < 0) {
+ rc = -ENOSPC;
+ goto err;
+ }
+
+ addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
+ block->offset;
+ rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
+ (u_char *)buf);
+
+ if (!rc && retlen != SECTOR_SIZE)
+ rc = -EIO;
+
+ if (rc) {
+ printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
+ part->mbd.mtd->name, addr);
+ goto err;
+ }
+
+ part->sector_map[sector] = addr;
+
+ entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector);
+
+ part->header_cache[i + HEADER_MAP_OFFSET] = entry;
+
+ addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
+ rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen,
+ (u_char *)&entry);
+
+ if (!rc && retlen != sizeof(entry))
+ rc = -EIO;
+
+ if (rc) {
+ printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
+ part->mbd.mtd->name, addr);
+ goto err;
+ }
+ block->used_sectors++;
+ block->free_sectors--;
+
+err:
+ return rc;
+}
+
+static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
+{
+ struct partition *part = container_of(dev, struct partition, mbd);
+ u_long old_addr;
+ int i;
+ int rc = 0;
+
+ pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector);
+
+ if (part->reserved_block == -1) {
+ rc = -EACCES;
+ goto err;
+ }
+
+ if (sector >= part->sector_count) {
+ rc = -EIO;
+ goto err;
+ }
+
+ old_addr = part->sector_map[sector];
+
+ for (i=0; i<SECTOR_SIZE; i++) {
+ if (!buf[i])
+ continue;
+
+ rc = do_writesect(dev, sector, buf, &old_addr);
+ if (rc)
+ goto err;
+ break;
+ }
+
+ if (i == SECTOR_SIZE)
+ part->sector_map[sector] = -1;
+
+ if (old_addr != -1)
+ rc = mark_sector_deleted(part, old_addr);
+
+err:
+ return rc;
+}
+
+static int rfd_ftl_discardsect(struct mtd_blktrans_dev *dev,
+ unsigned long sector, unsigned int nr_sects)
+{
+ struct partition *part = container_of(dev, struct partition, mbd);
+ u_long addr;
+ int rc;
+
+ while (nr_sects) {
+ if (sector >= part->sector_count)
+ return -EIO;
+
+ addr = part->sector_map[sector];
+
+ if (addr != -1) {
+ rc = mark_sector_deleted(part, addr);
+ if (rc)
+ return rc;
+
+ part->sector_map[sector] = -1;
+ }
+
+ sector++;
+ nr_sects--;
+ }
+
+ return 0;
+}
+
+static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
+{
+ struct partition *part = container_of(dev, struct partition, mbd);
+
+ geo->heads = 1;
+ geo->sectors = SECTORS_PER_TRACK;
+ geo->cylinders = part->cylinders;
+
+ return 0;
+}
+
+static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+{
+ struct partition *part;
+
+ if ((mtd->type != MTD_NORFLASH && mtd->type != MTD_RAM) ||
+ mtd->size > UINT_MAX)
+ return;
+
+ part = kzalloc(sizeof(struct partition), GFP_KERNEL);
+ if (!part)
+ return;
+
+ part->mbd.mtd = mtd;
+
+ if (block_size)
+ part->block_size = block_size;
+ else {
+ if (!mtd->erasesize) {
+ printk(KERN_WARNING PREFIX "please provide block_size");
+ goto out;
+ } else
+ part->block_size = mtd->erasesize;
+ }
+
+ if (scan_header(part) == 0) {
+ part->mbd.size = part->sector_count;
+ part->mbd.tr = tr;
+ part->mbd.devnum = -1;
+ if (!(mtd->flags & MTD_WRITEABLE))
+ part->mbd.readonly = 1;
+ else if (part->errors) {
+ printk(KERN_WARNING PREFIX "'%s': errors found, "
+ "setting read-only\n", mtd->name);
+ part->mbd.readonly = 1;
+ }
+
+ printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n",
+ mtd->name, mtd->type, mtd->flags);
+
+ if (!add_mtd_blktrans_dev(&part->mbd))
+ return;
+ }
+out:
+ kfree(part);
+}
+
+static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
+{
+ struct partition *part = container_of(dev, struct partition, mbd);
+ int i;
+
+ for (i=0; i<part->total_blocks; i++) {
+ pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n",
+ part->mbd.mtd->name, i, part->blocks[i].erases);
+ }
+
+ vfree(part->sector_map);
+ kfree(part->header_cache);
+ kfree(part->blocks);
+ del_mtd_blktrans_dev(&part->mbd);
+}
+
+static struct mtd_blktrans_ops rfd_ftl_tr = {
+ .name = "rfd",
+ .major = RFD_FTL_MAJOR,
+ .part_bits = PART_BITS,
+ .blksize = SECTOR_SIZE,
+
+ .readsect = rfd_ftl_readsect,
+ .writesect = rfd_ftl_writesect,
+ .discard = rfd_ftl_discardsect,
+ .getgeo = rfd_ftl_getgeo,
+ .add_mtd = rfd_ftl_add_mtd,
+ .remove_dev = rfd_ftl_remove_dev,
+ .owner = THIS_MODULE,
+};
+
+module_mtd_blktrans(rfd_ftl_tr);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sean Young <sean@mess.org>");
+MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, "
+ "used by General Software's Embedded BIOS");
+
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
new file mode 100644
index 0000000000..b5b3c4c44a
--- /dev/null
+++ b/drivers/mtd/sm_ftl.c
@@ -0,0 +1,1299 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * SmartMedia/xD translation layer
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/hdreg.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/sysfs.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
+#include "nand/raw/sm_common.h"
+#include "sm_ftl.h"
+
+
+
+static struct workqueue_struct *cache_flush_workqueue;
+
+static int cache_timeout = 1000;
+module_param(cache_timeout, int, S_IRUGO);
+MODULE_PARM_DESC(cache_timeout,
+ "Timeout (in ms) for cache flush (1000 ms default");
+
+static int debug;
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+
+/* ------------------- sysfs attributes ---------------------------------- */
+struct sm_sysfs_attribute {
+ struct device_attribute dev_attr;
+ char *data;
+ int len;
+};
+
+static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sm_sysfs_attribute *sm_attr =
+ container_of(attr, struct sm_sysfs_attribute, dev_attr);
+
+ strncpy(buf, sm_attr->data, sm_attr->len);
+ return sm_attr->len;
+}
+
+
+#define NUM_ATTRIBUTES 1
+#define SM_CIS_VENDOR_OFFSET 0x59
+static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
+{
+ struct attribute_group *attr_group;
+ struct attribute **attributes;
+ struct sm_sysfs_attribute *vendor_attribute;
+ char *vendor;
+
+ vendor = kstrndup(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
+ SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET, GFP_KERNEL);
+ if (!vendor)
+ goto error1;
+
+ /* Initialize sysfs attributes */
+ vendor_attribute =
+ kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL);
+ if (!vendor_attribute)
+ goto error2;
+
+ sysfs_attr_init(&vendor_attribute->dev_attr.attr);
+
+ vendor_attribute->data = vendor;
+ vendor_attribute->len = strlen(vendor);
+ vendor_attribute->dev_attr.attr.name = "vendor";
+ vendor_attribute->dev_attr.attr.mode = S_IRUGO;
+ vendor_attribute->dev_attr.show = sm_attr_show;
+
+
+ /* Create array of pointers to the attributes */
+ attributes = kcalloc(NUM_ATTRIBUTES + 1, sizeof(struct attribute *),
+ GFP_KERNEL);
+ if (!attributes)
+ goto error3;
+ attributes[0] = &vendor_attribute->dev_attr.attr;
+
+ /* Finally create the attribute group */
+ attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
+ if (!attr_group)
+ goto error4;
+ attr_group->attrs = attributes;
+ return attr_group;
+error4:
+ kfree(attributes);
+error3:
+ kfree(vendor_attribute);
+error2:
+ kfree(vendor);
+error1:
+ return NULL;
+}
+
+static void sm_delete_sysfs_attributes(struct sm_ftl *ftl)
+{
+ struct attribute **attributes = ftl->disk_attributes->attrs;
+ int i;
+
+ for (i = 0; attributes[i] ; i++) {
+
+ struct device_attribute *dev_attr = container_of(attributes[i],
+ struct device_attribute, attr);
+
+ struct sm_sysfs_attribute *sm_attr =
+ container_of(dev_attr,
+ struct sm_sysfs_attribute, dev_attr);
+
+ kfree(sm_attr->data);
+ kfree(sm_attr);
+ }
+
+ kfree(ftl->disk_attributes->attrs);
+ kfree(ftl->disk_attributes);
+}
+
+
+/* ----------------------- oob helpers -------------------------------------- */
+
+static int sm_get_lba(uint8_t *lba)
+{
+ /* check fixed bits */
+ if ((lba[0] & 0xF8) != 0x10)
+ return -2;
+
+ /* check parity - endianness doesn't matter */
+ if (hweight16(*(uint16_t *)lba) & 1)
+ return -2;
+
+ return (lba[1] >> 1) | ((lba[0] & 0x07) << 7);
+}
+
+
+/*
+ * Read LBA associated with block
+ * returns -1, if block is erased
+ * returns -2 if error happens
+ */
+static int sm_read_lba(struct sm_oob *oob)
+{
+ static const uint32_t erased_pattern[4] = {
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
+
+ uint16_t lba_test;
+ int lba;
+
+ /* First test for erased block */
+ if (!memcmp(oob, erased_pattern, SM_OOB_SIZE))
+ return -1;
+
+ /* Now check is both copies of the LBA differ too much */
+ lba_test = *(uint16_t *)oob->lba_copy1 ^ *(uint16_t*)oob->lba_copy2;
+ if (lba_test && !is_power_of_2(lba_test))
+ return -2;
+
+ /* And read it */
+ lba = sm_get_lba(oob->lba_copy1);
+
+ if (lba == -2)
+ lba = sm_get_lba(oob->lba_copy2);
+
+ return lba;
+}
+
+static void sm_write_lba(struct sm_oob *oob, uint16_t lba)
+{
+ uint8_t tmp[2];
+
+ WARN_ON(lba >= 1000);
+
+ tmp[0] = 0x10 | ((lba >> 7) & 0x07);
+ tmp[1] = (lba << 1) & 0xFF;
+
+ if (hweight16(*(uint16_t *)tmp) & 0x01)
+ tmp[1] |= 1;
+
+ oob->lba_copy1[0] = oob->lba_copy2[0] = tmp[0];
+ oob->lba_copy1[1] = oob->lba_copy2[1] = tmp[1];
+}
+
+
+/* Make offset from parts */
+static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset)
+{
+ WARN_ON(boffset & (SM_SECTOR_SIZE - 1));
+ WARN_ON(zone < 0 || zone >= ftl->zone_count);
+ WARN_ON(block >= ftl->zone_size);
+ WARN_ON(boffset >= ftl->block_size);
+
+ if (block == -1)
+ return -1;
+
+ return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset;
+}
+
+/* Breaks offset into parts */
+static void sm_break_offset(struct sm_ftl *ftl, loff_t loffset,
+ int *zone, int *block, int *boffset)
+{
+ u64 offset = loffset;
+ *boffset = do_div(offset, ftl->block_size);
+ *block = do_div(offset, ftl->max_lba);
+ *zone = offset >= ftl->zone_count ? -1 : offset;
+}
+
+/* ---------------------- low level IO ------------------------------------- */
+
+static int sm_correct_sector(uint8_t *buffer, struct sm_oob *oob)
+{
+ bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC);
+ uint8_t ecc[3];
+
+ ecc_sw_hamming_calculate(buffer, SM_SMALL_PAGE, ecc, sm_order);
+ if (ecc_sw_hamming_correct(buffer, ecc, oob->ecc1, SM_SMALL_PAGE,
+ sm_order) < 0)
+ return -EIO;
+
+ buffer += SM_SMALL_PAGE;
+
+ ecc_sw_hamming_calculate(buffer, SM_SMALL_PAGE, ecc, sm_order);
+ if (ecc_sw_hamming_correct(buffer, ecc, oob->ecc2, SM_SMALL_PAGE,
+ sm_order) < 0)
+ return -EIO;
+ return 0;
+}
+
+/* Reads a sector + oob*/
+static int sm_read_sector(struct sm_ftl *ftl,
+ int zone, int block, int boffset,
+ uint8_t *buffer, struct sm_oob *oob)
+{
+ struct mtd_info *mtd = ftl->trans->mtd;
+ struct mtd_oob_ops ops = { };
+ struct sm_oob tmp_oob;
+ int ret = -EIO;
+ int try = 0;
+
+ /* FTL can contain -1 entries that are by default filled with bits */
+ if (block == -1) {
+ if (buffer)
+ memset(buffer, 0xFF, SM_SECTOR_SIZE);
+ return 0;
+ }
+
+ /* User might not need the oob, but we do for data verification */
+ if (!oob)
+ oob = &tmp_oob;
+
+ ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB;
+ ops.ooboffs = 0;
+ ops.ooblen = SM_OOB_SIZE;
+ ops.oobbuf = (void *)oob;
+ ops.len = SM_SECTOR_SIZE;
+ ops.datbuf = buffer;
+
+again:
+ if (try++) {
+ /* Avoid infinite recursion on CIS reads, sm_recheck_media
+ * won't help anyway
+ */
+ if (zone == 0 && block == ftl->cis_block && boffset ==
+ ftl->cis_boffset)
+ return ret;
+
+ /* Test if media is stable */
+ if (try == 3 || sm_recheck_media(ftl))
+ return ret;
+ }
+
+ /* Unfortunately, oob read will _always_ succeed,
+ * despite card removal.....
+ */
+ ret = mtd_read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
+
+ /* Test for unknown errors */
+ if (ret != 0 && !mtd_is_bitflip_or_eccerr(ret)) {
+ dbg("read of block %d at zone %d, failed due to error (%d)",
+ block, zone, ret);
+ goto again;
+ }
+
+ /* Do a basic test on the oob, to guard against returned garbage */
+ if (oob->reserved != 0xFFFFFFFF && !is_power_of_2(~oob->reserved))
+ goto again;
+
+ /* This should never happen, unless there is a bug in the mtd driver */
+ WARN_ON(ops.oobretlen != SM_OOB_SIZE);
+ WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
+
+ if (!buffer)
+ return 0;
+
+ /* Test if sector marked as bad */
+ if (!sm_sector_valid(oob)) {
+ dbg("read of block %d at zone %d, failed because it is marked"
+ " as bad" , block, zone);
+ goto again;
+ }
+
+ /* Test ECC*/
+ if (mtd_is_eccerr(ret) ||
+ (ftl->smallpagenand && sm_correct_sector(buffer, oob))) {
+
+ dbg("read of block %d at zone %d, failed due to ECC error",
+ block, zone);
+ goto again;
+ }
+
+ return 0;
+}
+
+/* Writes a sector to media */
+static int sm_write_sector(struct sm_ftl *ftl,
+ int zone, int block, int boffset,
+ uint8_t *buffer, struct sm_oob *oob)
+{
+ struct mtd_oob_ops ops = { };
+ struct mtd_info *mtd = ftl->trans->mtd;
+ int ret;
+
+ BUG_ON(ftl->readonly);
+
+ if (zone == 0 && (block == ftl->cis_block || block == 0)) {
+ dbg("attempted to write the CIS!");
+ return -EIO;
+ }
+
+ if (ftl->unstable)
+ return -EIO;
+
+ ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB;
+ ops.len = SM_SECTOR_SIZE;
+ ops.datbuf = buffer;
+ ops.ooboffs = 0;
+ ops.ooblen = SM_OOB_SIZE;
+ ops.oobbuf = (void *)oob;
+
+ ret = mtd_write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
+
+ /* Now we assume that hardware will catch write bitflip errors */
+
+ if (ret) {
+ dbg("write to block %d at zone %d, failed with error %d",
+ block, zone, ret);
+
+ sm_recheck_media(ftl);
+ return ret;
+ }
+
+ /* This should never happen, unless there is a bug in the driver */
+ WARN_ON(ops.oobretlen != SM_OOB_SIZE);
+ WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
+
+ return 0;
+}
+
+/* ------------------------ block IO ------------------------------------- */
+
+/* Write a block using data and lba, and invalid sector bitmap */
+static int sm_write_block(struct sm_ftl *ftl, uint8_t *buf,
+ int zone, int block, int lba,
+ unsigned long invalid_bitmap)
+{
+ bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC);
+ struct sm_oob oob;
+ int boffset;
+ int retry = 0;
+
+ /* Initialize the oob with requested values */
+ memset(&oob, 0xFF, SM_OOB_SIZE);
+ sm_write_lba(&oob, lba);
+restart:
+ if (ftl->unstable)
+ return -EIO;
+
+ for (boffset = 0; boffset < ftl->block_size;
+ boffset += SM_SECTOR_SIZE) {
+
+ oob.data_status = 0xFF;
+
+ if (test_bit(boffset / SM_SECTOR_SIZE, &invalid_bitmap)) {
+
+ sm_printk("sector %d of block at LBA %d of zone %d"
+ " couldn't be read, marking it as invalid",
+ boffset / SM_SECTOR_SIZE, lba, zone);
+
+ oob.data_status = 0;
+ }
+
+ if (ftl->smallpagenand) {
+ ecc_sw_hamming_calculate(buf + boffset,
+ SM_SMALL_PAGE, oob.ecc1,
+ sm_order);
+
+ ecc_sw_hamming_calculate(buf + boffset + SM_SMALL_PAGE,
+ SM_SMALL_PAGE, oob.ecc2,
+ sm_order);
+ }
+ if (!sm_write_sector(ftl, zone, block, boffset,
+ buf + boffset, &oob))
+ continue;
+
+ if (!retry) {
+
+ /* If write fails. try to erase the block */
+ /* This is safe, because we never write in blocks
+ * that contain valuable data.
+ * This is intended to repair block that are marked
+ * as erased, but that isn't fully erased
+ */
+
+ if (sm_erase_block(ftl, zone, block, 0))
+ return -EIO;
+
+ retry = 1;
+ goto restart;
+ } else {
+ sm_mark_block_bad(ftl, zone, block);
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+
+/* Mark whole block at offset 'offs' as bad. */
+static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block)
+{
+ struct sm_oob oob;
+ int boffset;
+
+ memset(&oob, 0xFF, SM_OOB_SIZE);
+ oob.block_status = 0xF0;
+
+ if (ftl->unstable)
+ return;
+
+ if (sm_recheck_media(ftl))
+ return;
+
+ sm_printk("marking block %d of zone %d as bad", block, zone);
+
+ /* We aren't checking the return value, because we don't care */
+ /* This also fails on fake xD cards, but I guess these won't expose
+ * any bad blocks till fail completely
+ */
+ for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE)
+ sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
+}
+
+/*
+ * Erase a block within a zone
+ * If erase succeeds, it updates free block fifo, otherwise marks block as bad
+ */
+static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
+ int put_free)
+{
+ struct ftl_zone *zone = &ftl->zones[zone_num];
+ struct mtd_info *mtd = ftl->trans->mtd;
+ struct erase_info erase;
+
+ erase.addr = sm_mkoffset(ftl, zone_num, block, 0);
+ erase.len = ftl->block_size;
+
+ if (ftl->unstable)
+ return -EIO;
+
+ BUG_ON(ftl->readonly);
+
+ if (zone_num == 0 && (block == ftl->cis_block || block == 0)) {
+ sm_printk("attempted to erase the CIS!");
+ return -EIO;
+ }
+
+ if (mtd_erase(mtd, &erase)) {
+ sm_printk("erase of block %d in zone %d failed",
+ block, zone_num);
+ goto error;
+ }
+
+ if (put_free)
+ kfifo_in(&zone->free_sectors,
+ (const unsigned char *)&block, sizeof(block));
+
+ return 0;
+error:
+ sm_mark_block_bad(ftl, zone_num, block);
+ return -EIO;
+}
+
+/* Thoroughly test that block is valid. */
+static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
+{
+ int boffset;
+ struct sm_oob oob;
+ int lbas[] = { -3, 0, 0, 0 };
+ int i = 0;
+ int test_lba;
+
+
+ /* First just check that block doesn't look fishy */
+ /* Only blocks that are valid or are sliced in two parts, are
+ * accepted
+ */
+ for (boffset = 0; boffset < ftl->block_size;
+ boffset += SM_SECTOR_SIZE) {
+
+ /* This shouldn't happen anyway */
+ if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob))
+ return -2;
+
+ test_lba = sm_read_lba(&oob);
+
+ if (lbas[i] != test_lba)
+ lbas[++i] = test_lba;
+
+ /* If we found three different LBAs, something is fishy */
+ if (i == 3)
+ return -EIO;
+ }
+
+ /* If the block is sliced (partially erased usually) erase it */
+ if (i == 2) {
+ sm_erase_block(ftl, zone, block, 1);
+ return 1;
+ }
+
+ return 0;
+}
+
+/* ----------------- media scanning --------------------------------- */
+static const struct chs_entry chs_table[] = {
+ { 1, 125, 4, 4 },
+ { 2, 125, 4, 8 },
+ { 4, 250, 4, 8 },
+ { 8, 250, 4, 16 },
+ { 16, 500, 4, 16 },
+ { 32, 500, 8, 16 },
+ { 64, 500, 8, 32 },
+ { 128, 500, 16, 32 },
+ { 256, 1000, 16, 32 },
+ { 512, 1015, 32, 63 },
+ { 1024, 985, 33, 63 },
+ { 2048, 985, 33, 63 },
+ { 0 },
+};
+
+
+static const uint8_t cis_signature[] = {
+ 0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
+};
+/* Find out media parameters.
+ * This ideally has to be based on nand id, but for now device size is enough
+ */
+static int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
+{
+ int i;
+ int size_in_megs = mtd->size / (1024 * 1024);
+
+ ftl->readonly = mtd->type == MTD_ROM;
+
+ /* Manual settings for very old devices */
+ ftl->zone_count = 1;
+ ftl->smallpagenand = 0;
+
+ switch (size_in_megs) {
+ case 1:
+ /* 1 MiB flash/rom SmartMedia card (256 byte pages)*/
+ ftl->zone_size = 256;
+ ftl->max_lba = 250;
+ ftl->block_size = 8 * SM_SECTOR_SIZE;
+ ftl->smallpagenand = 1;
+
+ break;
+ case 2:
+ /* 2 MiB flash SmartMedia (256 byte pages)*/
+ if (mtd->writesize == SM_SMALL_PAGE) {
+ ftl->zone_size = 512;
+ ftl->max_lba = 500;
+ ftl->block_size = 8 * SM_SECTOR_SIZE;
+ ftl->smallpagenand = 1;
+ /* 2 MiB rom SmartMedia */
+ } else {
+
+ if (!ftl->readonly)
+ return -ENODEV;
+
+ ftl->zone_size = 256;
+ ftl->max_lba = 250;
+ ftl->block_size = 16 * SM_SECTOR_SIZE;
+ }
+ break;
+ case 4:
+ /* 4 MiB flash/rom SmartMedia device */
+ ftl->zone_size = 512;
+ ftl->max_lba = 500;
+ ftl->block_size = 16 * SM_SECTOR_SIZE;
+ break;
+ case 8:
+ /* 8 MiB flash/rom SmartMedia device */
+ ftl->zone_size = 1024;
+ ftl->max_lba = 1000;
+ ftl->block_size = 16 * SM_SECTOR_SIZE;
+ }
+
+ /* Minimum xD size is 16MiB. Also, all xD cards have standard zone
+ * sizes. SmartMedia cards exist up to 128 MiB and have same layout
+ */
+ if (size_in_megs >= 16) {
+ ftl->zone_count = size_in_megs / 16;
+ ftl->zone_size = 1024;
+ ftl->max_lba = 1000;
+ ftl->block_size = 32 * SM_SECTOR_SIZE;
+ }
+
+ /* Test for proper write,erase and oob sizes */
+ if (mtd->erasesize > ftl->block_size)
+ return -ENODEV;
+
+ if (mtd->writesize > SM_SECTOR_SIZE)
+ return -ENODEV;
+
+ if (ftl->smallpagenand && mtd->oobsize < SM_SMALL_OOB_SIZE)
+ return -ENODEV;
+
+ if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE)
+ return -ENODEV;
+
+ /* We use OOB */
+ if (!mtd_has_oob(mtd))
+ return -ENODEV;
+
+ /* Find geometry information */
+ for (i = 0 ; i < ARRAY_SIZE(chs_table) ; i++) {
+ if (chs_table[i].size == size_in_megs) {
+ ftl->cylinders = chs_table[i].cyl;
+ ftl->heads = chs_table[i].head;
+ ftl->sectors = chs_table[i].sec;
+ return 0;
+ }
+ }
+
+ sm_printk("media has unknown size : %dMiB", size_in_megs);
+ ftl->cylinders = 985;
+ ftl->heads = 33;
+ ftl->sectors = 63;
+ return 0;
+}
+
+/* Validate the CIS */
+static int sm_read_cis(struct sm_ftl *ftl)
+{
+ struct sm_oob oob;
+
+ if (sm_read_sector(ftl,
+ 0, ftl->cis_block, ftl->cis_boffset, ftl->cis_buffer, &oob))
+ return -EIO;
+
+ if (!sm_sector_valid(&oob) || !sm_block_valid(&oob))
+ return -EIO;
+
+ if (!memcmp(ftl->cis_buffer + ftl->cis_page_offset,
+ cis_signature, sizeof(cis_signature))) {
+ return 0;
+ }
+
+ return -EIO;
+}
+
+/* Scan the media for the CIS */
+static int sm_find_cis(struct sm_ftl *ftl)
+{
+ struct sm_oob oob;
+ int block, boffset;
+ int block_found = 0;
+ int cis_found = 0;
+
+ /* Search for first valid block */
+ for (block = 0 ; block < ftl->zone_size - ftl->max_lba ; block++) {
+
+ if (sm_read_sector(ftl, 0, block, 0, NULL, &oob))
+ continue;
+
+ if (!sm_block_valid(&oob))
+ continue;
+ block_found = 1;
+ break;
+ }
+
+ if (!block_found)
+ return -EIO;
+
+ /* Search for first valid sector in this block */
+ for (boffset = 0 ; boffset < ftl->block_size;
+ boffset += SM_SECTOR_SIZE) {
+
+ if (sm_read_sector(ftl, 0, block, boffset, NULL, &oob))
+ continue;
+
+ if (!sm_sector_valid(&oob))
+ continue;
+ break;
+ }
+
+ if (boffset == ftl->block_size)
+ return -EIO;
+
+ ftl->cis_block = block;
+ ftl->cis_boffset = boffset;
+ ftl->cis_page_offset = 0;
+
+ cis_found = !sm_read_cis(ftl);
+
+ if (!cis_found) {
+ ftl->cis_page_offset = SM_SMALL_PAGE;
+ cis_found = !sm_read_cis(ftl);
+ }
+
+ if (cis_found) {
+ dbg("CIS block found at offset %x",
+ block * ftl->block_size +
+ boffset + ftl->cis_page_offset);
+ return 0;
+ }
+ return -EIO;
+}
+
+/* Basic test to determine if underlying mtd device if functional */
+static int sm_recheck_media(struct sm_ftl *ftl)
+{
+ if (sm_read_cis(ftl)) {
+
+ if (!ftl->unstable) {
+ sm_printk("media unstable, not allowing writes");
+ ftl->unstable = 1;
+ }
+ return -EIO;
+ }
+ return 0;
+}
+
+/* Initialize a FTL zone */
+static int sm_init_zone(struct sm_ftl *ftl, int zone_num)
+{
+ struct ftl_zone *zone = &ftl->zones[zone_num];
+ struct sm_oob oob;
+ uint16_t block;
+ int lba;
+ int i = 0;
+ int len;
+
+ dbg("initializing zone %d", zone_num);
+
+ /* Allocate memory for FTL table */
+ zone->lba_to_phys_table = kmalloc_array(ftl->max_lba, 2, GFP_KERNEL);
+
+ if (!zone->lba_to_phys_table)
+ return -ENOMEM;
+ memset(zone->lba_to_phys_table, -1, ftl->max_lba * 2);
+
+
+ /* Allocate memory for free sectors FIFO */
+ if (kfifo_alloc(&zone->free_sectors, ftl->zone_size * 2, GFP_KERNEL)) {
+ kfree(zone->lba_to_phys_table);
+ return -ENOMEM;
+ }
+
+ /* Now scan the zone */
+ for (block = 0 ; block < ftl->zone_size ; block++) {
+
+ /* Skip blocks till the CIS (including) */
+ if (zone_num == 0 && block <= ftl->cis_block)
+ continue;
+
+ /* Read the oob of first sector */
+ if (sm_read_sector(ftl, zone_num, block, 0, NULL, &oob)) {
+ kfifo_free(&zone->free_sectors);
+ kfree(zone->lba_to_phys_table);
+ return -EIO;
+ }
+
+ /* Test to see if block is erased. It is enough to test
+ * first sector, because erase happens in one shot
+ */
+ if (sm_block_erased(&oob)) {
+ kfifo_in(&zone->free_sectors,
+ (unsigned char *)&block, 2);
+ continue;
+ }
+
+ /* If block is marked as bad, skip it */
+ /* This assumes we can trust first sector*/
+ /* However the way the block valid status is defined, ensures
+ * very low probability of failure here
+ */
+ if (!sm_block_valid(&oob)) {
+ dbg("PH %04d <-> <marked bad>", block);
+ continue;
+ }
+
+
+ lba = sm_read_lba(&oob);
+
+ /* Invalid LBA means that block is damaged. */
+ /* We can try to erase it, or mark it as bad, but
+ * lets leave that to recovery application
+ */
+ if (lba == -2 || lba >= ftl->max_lba) {
+ dbg("PH %04d <-> LBA %04d(bad)", block, lba);
+ continue;
+ }
+
+
+ /* If there is no collision,
+ * just put the sector in the FTL table
+ */
+ if (zone->lba_to_phys_table[lba] < 0) {
+ dbg_verbose("PH %04d <-> LBA %04d", block, lba);
+ zone->lba_to_phys_table[lba] = block;
+ continue;
+ }
+
+ sm_printk("collision"
+ " of LBA %d between blocks %d and %d in zone %d",
+ lba, zone->lba_to_phys_table[lba], block, zone_num);
+
+ /* Test that this block is valid*/
+ if (sm_check_block(ftl, zone_num, block))
+ continue;
+
+ /* Test now the old block */
+ if (sm_check_block(ftl, zone_num,
+ zone->lba_to_phys_table[lba])) {
+ zone->lba_to_phys_table[lba] = block;
+ continue;
+ }
+
+ /* If both blocks are valid and share same LBA, it means that
+ * they hold different versions of same data. It not
+ * known which is more recent, thus just erase one of them
+ */
+ sm_printk("both blocks are valid, erasing the later");
+ sm_erase_block(ftl, zone_num, block, 1);
+ }
+
+ dbg("zone initialized");
+ zone->initialized = 1;
+
+ /* No free sectors, means that the zone is heavily damaged, write won't
+ * work, but it can still can be (partially) read
+ */
+ if (!kfifo_len(&zone->free_sectors)) {
+ sm_printk("no free blocks in zone %d", zone_num);
+ return 0;
+ }
+
+ /* Randomize first block we write to */
+ get_random_bytes(&i, 2);
+ i %= (kfifo_len(&zone->free_sectors) / 2);
+
+ while (i--) {
+ len = kfifo_out(&zone->free_sectors,
+ (unsigned char *)&block, 2);
+ WARN_ON(len != 2);
+ kfifo_in(&zone->free_sectors, (const unsigned char *)&block, 2);
+ }
+ return 0;
+}
+
+/* Get and automatically initialize an FTL mapping for one zone */
+static struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
+{
+ struct ftl_zone *zone;
+ int error;
+
+ BUG_ON(zone_num >= ftl->zone_count);
+ zone = &ftl->zones[zone_num];
+
+ if (!zone->initialized) {
+ error = sm_init_zone(ftl, zone_num);
+
+ if (error)
+ return ERR_PTR(error);
+ }
+ return zone;
+}
+
+
+/* ----------------- cache handling ------------------------------------------*/
+
+/* Initialize the one block cache */
+static void sm_cache_init(struct sm_ftl *ftl)
+{
+ ftl->cache_data_invalid_bitmap = 0xFFFFFFFF;
+ ftl->cache_clean = 1;
+ ftl->cache_zone = -1;
+ ftl->cache_block = -1;
+ /*memset(ftl->cache_data, 0xAA, ftl->block_size);*/
+}
+
+/* Put sector in one block cache */
+static void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
+{
+ memcpy(ftl->cache_data + boffset, buffer, SM_SECTOR_SIZE);
+ clear_bit(boffset / SM_SECTOR_SIZE, &ftl->cache_data_invalid_bitmap);
+ ftl->cache_clean = 0;
+}
+
+/* Read a sector from the cache */
+static int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
+{
+ if (test_bit(boffset / SM_SECTOR_SIZE,
+ &ftl->cache_data_invalid_bitmap))
+ return -1;
+
+ memcpy(buffer, ftl->cache_data + boffset, SM_SECTOR_SIZE);
+ return 0;
+}
+
+/* Write the cache to hardware */
+static int sm_cache_flush(struct sm_ftl *ftl)
+{
+ struct ftl_zone *zone;
+
+ int sector_num;
+ uint16_t write_sector;
+ int zone_num = ftl->cache_zone;
+ int block_num;
+
+ if (ftl->cache_clean)
+ return 0;
+
+ if (ftl->unstable)
+ return -EIO;
+
+ BUG_ON(zone_num < 0);
+ zone = &ftl->zones[zone_num];
+ block_num = zone->lba_to_phys_table[ftl->cache_block];
+
+
+ /* Try to read all unread areas of the cache block*/
+ for_each_set_bit(sector_num, &ftl->cache_data_invalid_bitmap,
+ ftl->block_size / SM_SECTOR_SIZE) {
+
+ if (!sm_read_sector(ftl,
+ zone_num, block_num, sector_num * SM_SECTOR_SIZE,
+ ftl->cache_data + sector_num * SM_SECTOR_SIZE, NULL))
+ clear_bit(sector_num,
+ &ftl->cache_data_invalid_bitmap);
+ }
+restart:
+
+ if (ftl->unstable)
+ return -EIO;
+
+ /* If there are no spare blocks, */
+ /* we could still continue by erasing/writing the current block,
+ * but for such worn out media it doesn't worth the trouble,
+ * and the dangers
+ */
+ if (kfifo_out(&zone->free_sectors,
+ (unsigned char *)&write_sector, 2) != 2) {
+ dbg("no free sectors for write!");
+ return -EIO;
+ }
+
+
+ if (sm_write_block(ftl, ftl->cache_data, zone_num, write_sector,
+ ftl->cache_block, ftl->cache_data_invalid_bitmap))
+ goto restart;
+
+ /* Update the FTL table */
+ zone->lba_to_phys_table[ftl->cache_block] = write_sector;
+
+ /* Write successful, so erase and free the old block */
+ if (block_num > 0)
+ sm_erase_block(ftl, zone_num, block_num, 1);
+
+ sm_cache_init(ftl);
+ return 0;
+}
+
+
+/* flush timer, runs a second after last write */
+static void sm_cache_flush_timer(struct timer_list *t)
+{
+ struct sm_ftl *ftl = from_timer(ftl, t, timer);
+ queue_work(cache_flush_workqueue, &ftl->flush_work);
+}
+
+/* cache flush work, kicked by timer */
+static void sm_cache_flush_work(struct work_struct *work)
+{
+ struct sm_ftl *ftl = container_of(work, struct sm_ftl, flush_work);
+ mutex_lock(&ftl->mutex);
+ sm_cache_flush(ftl);
+ mutex_unlock(&ftl->mutex);
+ return;
+}
+
+/* ---------------- outside interface -------------------------------------- */
+
+/* outside interface: read a sector */
+static int sm_read(struct mtd_blktrans_dev *dev,
+ unsigned long sect_no, char *buf)
+{
+ struct sm_ftl *ftl = dev->priv;
+ struct ftl_zone *zone;
+ int error = 0, in_cache = 0;
+ int zone_num, block, boffset;
+
+ sm_break_offset(ftl, sect_no << 9, &zone_num, &block, &boffset);
+ mutex_lock(&ftl->mutex);
+
+
+ zone = sm_get_zone(ftl, zone_num);
+ if (IS_ERR(zone)) {
+ error = PTR_ERR(zone);
+ goto unlock;
+ }
+
+ /* Have to look at cache first */
+ if (ftl->cache_zone == zone_num && ftl->cache_block == block) {
+ in_cache = 1;
+ if (!sm_cache_get(ftl, buf, boffset))
+ goto unlock;
+ }
+
+ /* Translate the block and return if doesn't exist in the table */
+ block = zone->lba_to_phys_table[block];
+
+ if (block == -1) {
+ memset(buf, 0xFF, SM_SECTOR_SIZE);
+ goto unlock;
+ }
+
+ if (sm_read_sector(ftl, zone_num, block, boffset, buf, NULL)) {
+ error = -EIO;
+ goto unlock;
+ }
+
+ if (in_cache)
+ sm_cache_put(ftl, buf, boffset);
+unlock:
+ mutex_unlock(&ftl->mutex);
+ return error;
+}
+
+/* outside interface: write a sector */
+static int sm_write(struct mtd_blktrans_dev *dev,
+ unsigned long sec_no, char *buf)
+{
+ struct sm_ftl *ftl = dev->priv;
+ struct ftl_zone *zone;
+ int error = 0, zone_num, block, boffset;
+
+ BUG_ON(ftl->readonly);
+ sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset);
+
+ /* No need in flush thread running now */
+ del_timer(&ftl->timer);
+ mutex_lock(&ftl->mutex);
+
+ zone = sm_get_zone(ftl, zone_num);
+ if (IS_ERR(zone)) {
+ error = PTR_ERR(zone);
+ goto unlock;
+ }
+
+ /* If entry is not in cache, flush it */
+ if (ftl->cache_block != block || ftl->cache_zone != zone_num) {
+
+ error = sm_cache_flush(ftl);
+ if (error)
+ goto unlock;
+
+ ftl->cache_block = block;
+ ftl->cache_zone = zone_num;
+ }
+
+ sm_cache_put(ftl, buf, boffset);
+unlock:
+ mod_timer(&ftl->timer, jiffies + msecs_to_jiffies(cache_timeout));
+ mutex_unlock(&ftl->mutex);
+ return error;
+}
+
+/* outside interface: flush everything */
+static int sm_flush(struct mtd_blktrans_dev *dev)
+{
+ struct sm_ftl *ftl = dev->priv;
+ int retval;
+
+ mutex_lock(&ftl->mutex);
+ retval = sm_cache_flush(ftl);
+ mutex_unlock(&ftl->mutex);
+ return retval;
+}
+
+/* outside interface: device is released */
+static void sm_release(struct mtd_blktrans_dev *dev)
+{
+ struct sm_ftl *ftl = dev->priv;
+
+ del_timer_sync(&ftl->timer);
+ cancel_work_sync(&ftl->flush_work);
+ mutex_lock(&ftl->mutex);
+ sm_cache_flush(ftl);
+ mutex_unlock(&ftl->mutex);
+}
+
+/* outside interface: get geometry */
+static int sm_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
+{
+ struct sm_ftl *ftl = dev->priv;
+ geo->heads = ftl->heads;
+ geo->sectors = ftl->sectors;
+ geo->cylinders = ftl->cylinders;
+ return 0;
+}
+
+/* external interface: main initialization function */
+static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+{
+ struct mtd_blktrans_dev *trans;
+ struct sm_ftl *ftl;
+
+ /* Allocate & initialize our private structure */
+ ftl = kzalloc(sizeof(struct sm_ftl), GFP_KERNEL);
+ if (!ftl)
+ goto error1;
+
+
+ mutex_init(&ftl->mutex);
+ timer_setup(&ftl->timer, sm_cache_flush_timer, 0);
+ INIT_WORK(&ftl->flush_work, sm_cache_flush_work);
+
+ /* Read media information */
+ if (sm_get_media_info(ftl, mtd)) {
+ dbg("found unsupported mtd device, aborting");
+ goto error2;
+ }
+
+
+ /* Allocate temporary CIS buffer for read retry support */
+ ftl->cis_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
+ if (!ftl->cis_buffer)
+ goto error2;
+
+ /* Allocate zone array, it will be initialized on demand */
+ ftl->zones = kcalloc(ftl->zone_count, sizeof(struct ftl_zone),
+ GFP_KERNEL);
+ if (!ftl->zones)
+ goto error3;
+
+ /* Allocate the cache*/
+ ftl->cache_data = kzalloc(ftl->block_size, GFP_KERNEL);
+
+ if (!ftl->cache_data)
+ goto error4;
+
+ sm_cache_init(ftl);
+
+
+ /* Allocate upper layer structure and initialize it */
+ trans = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL);
+ if (!trans)
+ goto error5;
+
+ ftl->trans = trans;
+ trans->priv = ftl;
+
+ trans->tr = tr;
+ trans->mtd = mtd;
+ trans->devnum = -1;
+ trans->size = (ftl->block_size * ftl->max_lba * ftl->zone_count) >> 9;
+ trans->readonly = ftl->readonly;
+
+ if (sm_find_cis(ftl)) {
+ dbg("CIS not found on mtd device, aborting");
+ goto error6;
+ }
+
+ ftl->disk_attributes = sm_create_sysfs_attributes(ftl);
+ if (!ftl->disk_attributes)
+ goto error6;
+ trans->disk_attributes = ftl->disk_attributes;
+
+ sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d",
+ (int)(mtd->size / (1024 * 1024)), mtd->index);
+
+ dbg("FTL layout:");
+ dbg("%d zone(s), each consists of %d blocks (+%d spares)",
+ ftl->zone_count, ftl->max_lba,
+ ftl->zone_size - ftl->max_lba);
+ dbg("each block consists of %d bytes",
+ ftl->block_size);
+
+
+ /* Register device*/
+ if (add_mtd_blktrans_dev(trans)) {
+ dbg("error in mtdblktrans layer");
+ goto error6;
+ }
+ return;
+error6:
+ kfree(trans);
+error5:
+ kfree(ftl->cache_data);
+error4:
+ kfree(ftl->zones);
+error3:
+ kfree(ftl->cis_buffer);
+error2:
+ kfree(ftl);
+error1:
+ return;
+}
+
+/* main interface: device {surprise,} removal */
+static void sm_remove_dev(struct mtd_blktrans_dev *dev)
+{
+ struct sm_ftl *ftl = dev->priv;
+ int i;
+
+ del_mtd_blktrans_dev(dev);
+ ftl->trans = NULL;
+
+ for (i = 0 ; i < ftl->zone_count; i++) {
+
+ if (!ftl->zones[i].initialized)
+ continue;
+
+ kfree(ftl->zones[i].lba_to_phys_table);
+ kfifo_free(&ftl->zones[i].free_sectors);
+ }
+
+ sm_delete_sysfs_attributes(ftl);
+ kfree(ftl->cis_buffer);
+ kfree(ftl->zones);
+ kfree(ftl->cache_data);
+ kfree(ftl);
+}
+
+static struct mtd_blktrans_ops sm_ftl_ops = {
+ .name = "smblk",
+ .major = 0,
+ .part_bits = SM_FTL_PARTN_BITS,
+ .blksize = SM_SECTOR_SIZE,
+ .getgeo = sm_getgeo,
+
+ .add_mtd = sm_add_mtd,
+ .remove_dev = sm_remove_dev,
+
+ .readsect = sm_read,
+ .writesect = sm_write,
+
+ .flush = sm_flush,
+ .release = sm_release,
+
+ .owner = THIS_MODULE,
+};
+
+static __init int sm_module_init(void)
+{
+ int error = 0;
+
+ cache_flush_workqueue = create_freezable_workqueue("smflush");
+ if (!cache_flush_workqueue)
+ return -ENOMEM;
+
+ error = register_mtd_blktrans(&sm_ftl_ops);
+ if (error)
+ destroy_workqueue(cache_flush_workqueue);
+ return error;
+
+}
+
+static void __exit sm_module_exit(void)
+{
+ destroy_workqueue(cache_flush_workqueue);
+ deregister_mtd_blktrans(&sm_ftl_ops);
+}
+
+module_init(sm_module_init);
+module_exit(sm_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
+MODULE_DESCRIPTION("Smartmedia/xD mtd translation layer");
diff --git a/drivers/mtd/sm_ftl.h b/drivers/mtd/sm_ftl.h
new file mode 100644
index 0000000000..6aed8b60de
--- /dev/null
+++ b/drivers/mtd/sm_ftl.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * SmartMedia/xD translation layer
+ *
+ * Based loosly on ssfdc.c which is
+ * © 2005 Eptar srl
+ * Author: Claudio Lanconelli <lanconelli.claudio@eptar.com>
+ */
+
+#include <linux/mtd/blktrans.h>
+#include <linux/kfifo.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/mtd/mtd.h>
+
+
+
+struct ftl_zone {
+ bool initialized;
+ int16_t *lba_to_phys_table; /* LBA to physical table */
+ struct kfifo free_sectors; /* queue of free sectors */
+};
+
+struct sm_ftl {
+ struct mtd_blktrans_dev *trans;
+
+ struct mutex mutex; /* protects the structure */
+ struct ftl_zone *zones; /* FTL tables for each zone */
+
+ /* Media information */
+ int block_size; /* block size in bytes */
+ int zone_size; /* zone size in blocks */
+ int zone_count; /* number of zones */
+ int max_lba; /* maximum lba in a zone */
+ int smallpagenand; /* 256 bytes/page nand */
+ bool readonly; /* is FS readonly */
+ bool unstable;
+ int cis_block; /* CIS block location */
+ int cis_boffset; /* CIS offset in the block */
+ int cis_page_offset; /* CIS offset in the page */
+ void *cis_buffer; /* tmp buffer for cis reads */
+
+ /* Cache */
+ int cache_block; /* block number of cached block */
+ int cache_zone; /* zone of cached block */
+ unsigned char *cache_data; /* cached block data */
+ long unsigned int cache_data_invalid_bitmap;
+ bool cache_clean;
+ struct work_struct flush_work;
+ struct timer_list timer;
+
+ /* Geometry stuff */
+ int heads;
+ int sectors;
+ int cylinders;
+
+ struct attribute_group *disk_attributes;
+};
+
+struct chs_entry {
+ unsigned long size;
+ unsigned short cyl;
+ unsigned char head;
+ unsigned char sec;
+};
+
+
+#define SM_FTL_PARTN_BITS 3
+
+#define sm_printk(format, ...) \
+ printk(KERN_WARNING "sm_ftl" ": " format "\n", ## __VA_ARGS__)
+
+#define dbg(format, ...) \
+ if (debug) \
+ printk(KERN_DEBUG "sm_ftl" ": " format "\n", ## __VA_ARGS__)
+
+#define dbg_verbose(format, ...) \
+ if (debug > 1) \
+ printk(KERN_DEBUG "sm_ftl" ": " format "\n", ## __VA_ARGS__)
+
+
+static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
+ int put_free);
+static void sm_mark_block_bad(struct sm_ftl *ftl, int zone_num, int block);
+
+static int sm_recheck_media(struct sm_ftl *ftl);
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
new file mode 100644
index 0000000000..24cd25de2b
--- /dev/null
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -0,0 +1,73 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig MTD_SPI_NOR
+ tristate "SPI NOR device support"
+ depends on MTD
+ depends on MTD && SPI_MASTER
+ select SPI_MEM
+ help
+ This is the framework for the SPI NOR which can be used by the SPI
+ device drivers and the SPI NOR device driver.
+
+if MTD_SPI_NOR
+
+config MTD_SPI_NOR_USE_4K_SECTORS
+ bool "Use small 4096 B erase sectors"
+ default y
+ help
+ Many flash memories support erasing small (4096 B) sectors. Depending
+ on the usage this feature may provide performance gain in comparison
+ to erasing whole blocks (32/64 KiB).
+ Changing a small part of the flash's contents is usually faster with
+ small sectors. On the other hand erasing should be faster when using
+ 64 KiB block instead of 16 × 4 KiB sectors.
+
+ Please note that some tools/drivers/filesystems may not work with
+ 4096 B erase size (e.g. UBIFS requires 15 KiB as a minimum).
+
+choice
+ prompt "Software write protection at boot"
+ default MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE
+
+config MTD_SPI_NOR_SWP_DISABLE
+ bool "Disable SWP on any flashes (legacy behavior)"
+ help
+ This option disables the software write protection on any SPI
+ flashes at boot-up.
+
+ Depending on the flash chip this either clears the block protection
+ bits or does a "Global Unprotect" command.
+
+ Don't use this if you intent to use the software write protection
+ of your SPI flash. This is only to keep backwards compatibility.
+
+config MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE
+ bool "Disable SWP on flashes w/ volatile protection bits"
+ help
+ Some SPI flashes have volatile block protection bits, ie. after a
+ power-up or a reset the flash is software write protected by
+ default.
+
+ This option disables the software write protection for these kind
+ of flashes while keeping it enabled for any other SPI flashes
+ which have non-volatile write protection bits.
+
+ If the software write protection will be disabled depending on
+ the flash either the block protection bits are cleared or a
+ "Global Unprotect" command is issued.
+
+ If you are unsure, select this option.
+
+config MTD_SPI_NOR_SWP_KEEP
+ bool "Keep software write protection as is"
+ help
+ If you select this option the software write protection of any
+ SPI flashes will not be changed. If your flash is software write
+ protected or will be automatically software write protected after
+ power-up you have to manually unlock it before you are able to
+ write to it.
+
+endchoice
+
+source "drivers/mtd/spi-nor/controllers/Kconfig"
+
+endif # MTD_SPI_NOR
diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile
new file mode 100644
index 0000000000..e347b435a0
--- /dev/null
+++ b/drivers/mtd/spi-nor/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+
+spi-nor-objs := core.o sfdp.o swp.o otp.o sysfs.o
+spi-nor-objs += atmel.o
+spi-nor-objs += catalyst.o
+spi-nor-objs += eon.o
+spi-nor-objs += esmt.o
+spi-nor-objs += everspin.o
+spi-nor-objs += fujitsu.o
+spi-nor-objs += gigadevice.o
+spi-nor-objs += intel.o
+spi-nor-objs += issi.o
+spi-nor-objs += macronix.o
+spi-nor-objs += micron-st.o
+spi-nor-objs += spansion.o
+spi-nor-objs += sst.o
+spi-nor-objs += winbond.o
+spi-nor-objs += xilinx.o
+spi-nor-objs += xmc.o
+spi-nor-$(CONFIG_DEBUG_FS) += debugfs.o
+obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
+
+obj-$(CONFIG_MTD_SPI_NOR) += controllers/
diff --git a/drivers/mtd/spi-nor/atmel.c b/drivers/mtd/spi-nor/atmel.c
new file mode 100644
index 0000000000..58968c1e7d
--- /dev/null
+++ b/drivers/mtd/spi-nor/atmel.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+#define ATMEL_SR_GLOBAL_PROTECT_MASK GENMASK(5, 2)
+
+/*
+ * The Atmel AT25FS010/AT25FS040 parts have some weird configuration for the
+ * block protection bits. We don't support them. But legacy behavior in linux
+ * is to unlock the whole flash array on startup. Therefore, we have to support
+ * exactly this operation.
+ */
+static int at25fs_nor_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ return -EOPNOTSUPP;
+}
+
+static int at25fs_nor_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ int ret;
+
+ /* We only support unlocking the whole flash array */
+ if (ofs || len != nor->params->size)
+ return -EINVAL;
+
+ /* Write 0x00 to the status register to disable write protection */
+ ret = spi_nor_write_sr_and_check(nor, 0);
+ if (ret)
+ dev_dbg(nor->dev, "unable to clear BP bits, WP# asserted?\n");
+
+ return ret;
+}
+
+static int at25fs_nor_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ return -EOPNOTSUPP;
+}
+
+static const struct spi_nor_locking_ops at25fs_nor_locking_ops = {
+ .lock = at25fs_nor_lock,
+ .unlock = at25fs_nor_unlock,
+ .is_locked = at25fs_nor_is_locked,
+};
+
+static int at25fs_nor_late_init(struct spi_nor *nor)
+{
+ nor->params->locking_ops = &at25fs_nor_locking_ops;
+
+ return 0;
+}
+
+static const struct spi_nor_fixups at25fs_nor_fixups = {
+ .late_init = at25fs_nor_late_init,
+};
+
+/**
+ * atmel_nor_set_global_protection - Do a Global Protect or Unprotect command
+ * @nor: pointer to 'struct spi_nor'
+ * @ofs: offset in bytes
+ * @len: len in bytes
+ * @is_protect: if true do a Global Protect otherwise it is a Global Unprotect
+ *
+ * Return: 0 on success, -error otherwise.
+ */
+static int atmel_nor_set_global_protection(struct spi_nor *nor, loff_t ofs,
+ uint64_t len, bool is_protect)
+{
+ int ret;
+ u8 sr;
+
+ /* We only support locking the whole flash array */
+ if (ofs || len != nor->params->size)
+ return -EINVAL;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ sr = nor->bouncebuf[0];
+
+ /* SRWD bit needs to be cleared, otherwise the protection doesn't change */
+ if (sr & SR_SRWD) {
+ sr &= ~SR_SRWD;
+ ret = spi_nor_write_sr_and_check(nor, sr);
+ if (ret) {
+ dev_dbg(nor->dev, "unable to clear SRWD bit, WP# asserted?\n");
+ return ret;
+ }
+ }
+
+ if (is_protect) {
+ sr |= ATMEL_SR_GLOBAL_PROTECT_MASK;
+ /*
+ * Set the SRWD bit again as soon as we are protecting
+ * anything. This will ensure that the WP# pin is working
+ * correctly. By doing this we also behave the same as
+ * spi_nor_sr_lock(), which sets SRWD if any block protection
+ * is active.
+ */
+ sr |= SR_SRWD;
+ } else {
+ sr &= ~ATMEL_SR_GLOBAL_PROTECT_MASK;
+ }
+
+ nor->bouncebuf[0] = sr;
+
+ /*
+ * We cannot use the spi_nor_write_sr_and_check() because this command
+ * isn't really setting any bits, instead it is an pseudo command for
+ * "Global Unprotect" or "Global Protect"
+ */
+ return spi_nor_write_sr(nor, nor->bouncebuf, 1);
+}
+
+static int atmel_nor_global_protect(struct spi_nor *nor, loff_t ofs,
+ uint64_t len)
+{
+ return atmel_nor_set_global_protection(nor, ofs, len, true);
+}
+
+static int atmel_nor_global_unprotect(struct spi_nor *nor, loff_t ofs,
+ uint64_t len)
+{
+ return atmel_nor_set_global_protection(nor, ofs, len, false);
+}
+
+static int atmel_nor_is_global_protected(struct spi_nor *nor, loff_t ofs,
+ uint64_t len)
+{
+ int ret;
+
+ if (ofs >= nor->params->size || (ofs + len) > nor->params->size)
+ return -EINVAL;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ return ((nor->bouncebuf[0] & ATMEL_SR_GLOBAL_PROTECT_MASK) == ATMEL_SR_GLOBAL_PROTECT_MASK);
+}
+
+static const struct spi_nor_locking_ops atmel_nor_global_protection_ops = {
+ .lock = atmel_nor_global_protect,
+ .unlock = atmel_nor_global_unprotect,
+ .is_locked = atmel_nor_is_global_protected,
+};
+
+static int atmel_nor_global_protection_late_init(struct spi_nor *nor)
+{
+ nor->params->locking_ops = &atmel_nor_global_protection_ops;
+
+ return 0;
+}
+
+static const struct spi_nor_fixups atmel_nor_global_protection_fixups = {
+ .late_init = atmel_nor_global_protection_late_init,
+};
+
+static const struct flash_info atmel_nor_parts[] = {
+ /* Atmel -- some are (confusingly) marketed as "DataFlash" */
+ { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4)
+ FLAGS(SPI_NOR_HAS_LOCK)
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &at25fs_nor_fixups },
+ { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8)
+ FLAGS(SPI_NOR_HAS_LOCK)
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &at25fs_nor_fixups },
+ { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &atmel_nor_global_protection_fixups },
+ { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &atmel_nor_global_protection_fixups },
+ { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &atmel_nor_global_protection_fixups },
+ { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &atmel_nor_global_protection_fixups },
+ { "at25sl321", INFO(0x1f4216, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &atmel_nor_global_protection_fixups },
+ { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &atmel_nor_global_protection_fixups },
+ { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &atmel_nor_global_protection_fixups },
+ { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K) },
+};
+
+const struct spi_nor_manufacturer spi_nor_atmel = {
+ .name = "atmel",
+ .parts = atmel_nor_parts,
+ .nparts = ARRAY_SIZE(atmel_nor_parts),
+};
diff --git a/drivers/mtd/spi-nor/catalyst.c b/drivers/mtd/spi-nor/catalyst.c
new file mode 100644
index 0000000000..6d310815fb
--- /dev/null
+++ b/drivers/mtd/spi-nor/catalyst.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info catalyst_nor_parts[] = {
+ /* Catalyst / On Semiconductor -- non-JEDEC */
+ { "cat25c11", CAT25_INFO(16, 8, 16, 1) },
+ { "cat25c03", CAT25_INFO(32, 8, 16, 2) },
+ { "cat25c09", CAT25_INFO(128, 8, 32, 2) },
+ { "cat25c17", CAT25_INFO(256, 8, 32, 2) },
+ { "cat25128", CAT25_INFO(2048, 8, 64, 2) },
+};
+
+const struct spi_nor_manufacturer spi_nor_catalyst = {
+ .name = "catalyst",
+ .parts = catalyst_nor_parts,
+ .nparts = ARRAY_SIZE(catalyst_nor_parts),
+};
diff --git a/drivers/mtd/spi-nor/controllers/Kconfig b/drivers/mtd/spi-nor/controllers/Kconfig
new file mode 100644
index 0000000000..ca45dcd3ff
--- /dev/null
+++ b/drivers/mtd/spi-nor/controllers/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config SPI_HISI_SFC
+ tristate "Hisilicon FMC SPI NOR Flash Controller(SFC)"
+ depends on ARCH_HISI || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This enables support for HiSilicon FMC SPI NOR flash controller.
+
+config SPI_NXP_SPIFI
+ tristate "NXP SPI Flash Interface (SPIFI)"
+ depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
+ depends on HAS_IOMEM
+ help
+ Enable support for the NXP LPC SPI Flash Interface controller.
+
+ SPIFI is a specialized controller for connecting serial SPI
+ Flash. Enable this option if you have a device with a SPIFI
+ controller and want to access the Flash as a mtd device.
diff --git a/drivers/mtd/spi-nor/controllers/Makefile b/drivers/mtd/spi-nor/controllers/Makefile
new file mode 100644
index 0000000000..0b8e1d5309
--- /dev/null
+++ b/drivers/mtd/spi-nor/controllers/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o
+obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
diff --git a/drivers/mtd/spi-nor/controllers/hisi-sfc.c b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
new file mode 100644
index 0000000000..5070d72835
--- /dev/null
+++ b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * HiSilicon FMC SPI NOR flash controller driver
+ *
+ * Copyright (c) 2015-2016 HiSilicon Technologies Co., Ltd.
+ */
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Hardware register offsets and field definitions */
+#define FMC_CFG 0x00
+#define FMC_CFG_OP_MODE_MASK BIT_MASK(0)
+#define FMC_CFG_OP_MODE_BOOT 0
+#define FMC_CFG_OP_MODE_NORMAL 1
+#define FMC_CFG_FLASH_SEL(type) (((type) & 0x3) << 1)
+#define FMC_CFG_FLASH_SEL_MASK 0x6
+#define FMC_ECC_TYPE(type) (((type) & 0x7) << 5)
+#define FMC_ECC_TYPE_MASK GENMASK(7, 5)
+#define SPI_NOR_ADDR_MODE_MASK BIT_MASK(10)
+#define SPI_NOR_ADDR_MODE_3BYTES (0x0 << 10)
+#define SPI_NOR_ADDR_MODE_4BYTES (0x1 << 10)
+#define FMC_GLOBAL_CFG 0x04
+#define FMC_GLOBAL_CFG_WP_ENABLE BIT(6)
+#define FMC_SPI_TIMING_CFG 0x08
+#define TIMING_CFG_TCSH(nr) (((nr) & 0xf) << 8)
+#define TIMING_CFG_TCSS(nr) (((nr) & 0xf) << 4)
+#define TIMING_CFG_TSHSL(nr) ((nr) & 0xf)
+#define CS_HOLD_TIME 0x6
+#define CS_SETUP_TIME 0x6
+#define CS_DESELECT_TIME 0xf
+#define FMC_INT 0x18
+#define FMC_INT_OP_DONE BIT(0)
+#define FMC_INT_CLR 0x20
+#define FMC_CMD 0x24
+#define FMC_CMD_CMD1(cmd) ((cmd) & 0xff)
+#define FMC_ADDRL 0x2c
+#define FMC_OP_CFG 0x30
+#define OP_CFG_FM_CS(cs) ((cs) << 11)
+#define OP_CFG_MEM_IF_TYPE(type) (((type) & 0x7) << 7)
+#define OP_CFG_ADDR_NUM(addr) (((addr) & 0x7) << 4)
+#define OP_CFG_DUMMY_NUM(dummy) ((dummy) & 0xf)
+#define FMC_DATA_NUM 0x38
+#define FMC_DATA_NUM_CNT(cnt) ((cnt) & GENMASK(13, 0))
+#define FMC_OP 0x3c
+#define FMC_OP_DUMMY_EN BIT(8)
+#define FMC_OP_CMD1_EN BIT(7)
+#define FMC_OP_ADDR_EN BIT(6)
+#define FMC_OP_WRITE_DATA_EN BIT(5)
+#define FMC_OP_READ_DATA_EN BIT(2)
+#define FMC_OP_READ_STATUS_EN BIT(1)
+#define FMC_OP_REG_OP_START BIT(0)
+#define FMC_DMA_LEN 0x40
+#define FMC_DMA_LEN_SET(len) ((len) & GENMASK(27, 0))
+#define FMC_DMA_SADDR_D0 0x4c
+#define HIFMC_DMA_MAX_LEN (4096)
+#define HIFMC_DMA_MASK (HIFMC_DMA_MAX_LEN - 1)
+#define FMC_OP_DMA 0x68
+#define OP_CTRL_RD_OPCODE(code) (((code) & 0xff) << 16)
+#define OP_CTRL_WR_OPCODE(code) (((code) & 0xff) << 8)
+#define OP_CTRL_RW_OP(op) ((op) << 1)
+#define OP_CTRL_DMA_OP_READY BIT(0)
+#define FMC_OP_READ 0x0
+#define FMC_OP_WRITE 0x1
+#define FMC_WAIT_TIMEOUT 1000000
+
+enum hifmc_iftype {
+ IF_TYPE_STD,
+ IF_TYPE_DUAL,
+ IF_TYPE_DIO,
+ IF_TYPE_QUAD,
+ IF_TYPE_QIO,
+};
+
+struct hifmc_priv {
+ u32 chipselect;
+ u32 clkrate;
+ struct hifmc_host *host;
+};
+
+#define HIFMC_MAX_CHIP_NUM 2
+struct hifmc_host {
+ struct device *dev;
+ struct mutex lock;
+
+ void __iomem *regbase;
+ void __iomem *iobase;
+ struct clk *clk;
+ void *buffer;
+ dma_addr_t dma_buffer;
+
+ struct spi_nor *nor[HIFMC_MAX_CHIP_NUM];
+ u32 num_chip;
+};
+
+static inline int hisi_spi_nor_wait_op_finish(struct hifmc_host *host)
+{
+ u32 reg;
+
+ return readl_poll_timeout(host->regbase + FMC_INT, reg,
+ (reg & FMC_INT_OP_DONE), 0, FMC_WAIT_TIMEOUT);
+}
+
+static int hisi_spi_nor_get_if_type(enum spi_nor_protocol proto)
+{
+ enum hifmc_iftype if_type;
+
+ switch (proto) {
+ case SNOR_PROTO_1_1_2:
+ if_type = IF_TYPE_DUAL;
+ break;
+ case SNOR_PROTO_1_2_2:
+ if_type = IF_TYPE_DIO;
+ break;
+ case SNOR_PROTO_1_1_4:
+ if_type = IF_TYPE_QUAD;
+ break;
+ case SNOR_PROTO_1_4_4:
+ if_type = IF_TYPE_QIO;
+ break;
+ case SNOR_PROTO_1_1_1:
+ default:
+ if_type = IF_TYPE_STD;
+ break;
+ }
+
+ return if_type;
+}
+
+static void hisi_spi_nor_init(struct hifmc_host *host)
+{
+ u32 reg;
+
+ reg = TIMING_CFG_TCSH(CS_HOLD_TIME)
+ | TIMING_CFG_TCSS(CS_SETUP_TIME)
+ | TIMING_CFG_TSHSL(CS_DESELECT_TIME);
+ writel(reg, host->regbase + FMC_SPI_TIMING_CFG);
+}
+
+static int hisi_spi_nor_prep(struct spi_nor *nor)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+ int ret;
+
+ mutex_lock(&host->lock);
+
+ ret = clk_set_rate(host->clk, priv->clkrate);
+ if (ret)
+ goto out;
+
+ ret = clk_prepare_enable(host->clk);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ mutex_unlock(&host->lock);
+ return ret;
+}
+
+static void hisi_spi_nor_unprep(struct spi_nor *nor)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+
+ clk_disable_unprepare(host->clk);
+ mutex_unlock(&host->lock);
+}
+
+static int hisi_spi_nor_op_reg(struct spi_nor *nor,
+ u8 opcode, size_t len, u8 optype)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+ u32 reg;
+
+ reg = FMC_CMD_CMD1(opcode);
+ writel(reg, host->regbase + FMC_CMD);
+
+ reg = FMC_DATA_NUM_CNT(len);
+ writel(reg, host->regbase + FMC_DATA_NUM);
+
+ reg = OP_CFG_FM_CS(priv->chipselect);
+ writel(reg, host->regbase + FMC_OP_CFG);
+
+ writel(0xff, host->regbase + FMC_INT_CLR);
+ reg = FMC_OP_CMD1_EN | FMC_OP_REG_OP_START | optype;
+ writel(reg, host->regbase + FMC_OP);
+
+ return hisi_spi_nor_wait_op_finish(host);
+}
+
+static int hisi_spi_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+ size_t len)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+ int ret;
+
+ ret = hisi_spi_nor_op_reg(nor, opcode, len, FMC_OP_READ_DATA_EN);
+ if (ret)
+ return ret;
+
+ memcpy_fromio(buf, host->iobase, len);
+ return 0;
+}
+
+static int hisi_spi_nor_write_reg(struct spi_nor *nor, u8 opcode,
+ const u8 *buf, size_t len)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+
+ if (len)
+ memcpy_toio(host->iobase, buf, len);
+
+ return hisi_spi_nor_op_reg(nor, opcode, len, FMC_OP_WRITE_DATA_EN);
+}
+
+static int hisi_spi_nor_dma_transfer(struct spi_nor *nor, loff_t start_off,
+ dma_addr_t dma_buf, size_t len, u8 op_type)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+ u8 if_type = 0;
+ u32 reg;
+
+ reg = readl(host->regbase + FMC_CFG);
+ reg &= ~(FMC_CFG_OP_MODE_MASK | SPI_NOR_ADDR_MODE_MASK);
+ reg |= FMC_CFG_OP_MODE_NORMAL;
+ reg |= (nor->addr_nbytes == 4) ? SPI_NOR_ADDR_MODE_4BYTES
+ : SPI_NOR_ADDR_MODE_3BYTES;
+ writel(reg, host->regbase + FMC_CFG);
+
+ writel(start_off, host->regbase + FMC_ADDRL);
+ writel(dma_buf, host->regbase + FMC_DMA_SADDR_D0);
+ writel(FMC_DMA_LEN_SET(len), host->regbase + FMC_DMA_LEN);
+
+ reg = OP_CFG_FM_CS(priv->chipselect);
+ if (op_type == FMC_OP_READ)
+ if_type = hisi_spi_nor_get_if_type(nor->read_proto);
+ else
+ if_type = hisi_spi_nor_get_if_type(nor->write_proto);
+ reg |= OP_CFG_MEM_IF_TYPE(if_type);
+ if (op_type == FMC_OP_READ)
+ reg |= OP_CFG_DUMMY_NUM(nor->read_dummy >> 3);
+ writel(reg, host->regbase + FMC_OP_CFG);
+
+ writel(0xff, host->regbase + FMC_INT_CLR);
+ reg = OP_CTRL_RW_OP(op_type) | OP_CTRL_DMA_OP_READY;
+ reg |= (op_type == FMC_OP_READ)
+ ? OP_CTRL_RD_OPCODE(nor->read_opcode)
+ : OP_CTRL_WR_OPCODE(nor->program_opcode);
+ writel(reg, host->regbase + FMC_OP_DMA);
+
+ return hisi_spi_nor_wait_op_finish(host);
+}
+
+static ssize_t hisi_spi_nor_read(struct spi_nor *nor, loff_t from, size_t len,
+ u_char *read_buf)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+ size_t offset;
+ int ret;
+
+ for (offset = 0; offset < len; offset += HIFMC_DMA_MAX_LEN) {
+ size_t trans = min_t(size_t, HIFMC_DMA_MAX_LEN, len - offset);
+
+ ret = hisi_spi_nor_dma_transfer(nor,
+ from + offset, host->dma_buffer, trans, FMC_OP_READ);
+ if (ret) {
+ dev_warn(nor->dev, "DMA read timeout\n");
+ return ret;
+ }
+ memcpy(read_buf + offset, host->buffer, trans);
+ }
+
+ return len;
+}
+
+static ssize_t hisi_spi_nor_write(struct spi_nor *nor, loff_t to,
+ size_t len, const u_char *write_buf)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+ size_t offset;
+ int ret;
+
+ for (offset = 0; offset < len; offset += HIFMC_DMA_MAX_LEN) {
+ size_t trans = min_t(size_t, HIFMC_DMA_MAX_LEN, len - offset);
+
+ memcpy(host->buffer, write_buf + offset, trans);
+ ret = hisi_spi_nor_dma_transfer(nor,
+ to + offset, host->dma_buffer, trans, FMC_OP_WRITE);
+ if (ret) {
+ dev_warn(nor->dev, "DMA write timeout\n");
+ return ret;
+ }
+ }
+
+ return len;
+}
+
+static const struct spi_nor_controller_ops hisi_controller_ops = {
+ .prepare = hisi_spi_nor_prep,
+ .unprepare = hisi_spi_nor_unprep,
+ .read_reg = hisi_spi_nor_read_reg,
+ .write_reg = hisi_spi_nor_write_reg,
+ .read = hisi_spi_nor_read,
+ .write = hisi_spi_nor_write,
+};
+
+/*
+ * Get spi flash device information and register it as a mtd device.
+ */
+static int hisi_spi_nor_register(struct device_node *np,
+ struct hifmc_host *host)
+{
+ const struct spi_nor_hwcaps hwcaps = {
+ .mask = SNOR_HWCAPS_READ |
+ SNOR_HWCAPS_READ_FAST |
+ SNOR_HWCAPS_READ_1_1_2 |
+ SNOR_HWCAPS_READ_1_1_4 |
+ SNOR_HWCAPS_PP,
+ };
+ struct device *dev = host->dev;
+ struct spi_nor *nor;
+ struct hifmc_priv *priv;
+ struct mtd_info *mtd;
+ int ret;
+
+ nor = devm_kzalloc(dev, sizeof(*nor), GFP_KERNEL);
+ if (!nor)
+ return -ENOMEM;
+
+ nor->dev = dev;
+ spi_nor_set_flash_node(nor, np);
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(np, "reg", &priv->chipselect);
+ if (ret) {
+ dev_err(dev, "There's no reg property for %pOF\n",
+ np);
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "spi-max-frequency",
+ &priv->clkrate);
+ if (ret) {
+ dev_err(dev, "There's no spi-max-frequency property for %pOF\n",
+ np);
+ return ret;
+ }
+ priv->host = host;
+ nor->priv = priv;
+ nor->controller_ops = &hisi_controller_ops;
+
+ ret = spi_nor_scan(nor, NULL, &hwcaps);
+ if (ret)
+ return ret;
+
+ mtd = &nor->mtd;
+ mtd->name = np->name;
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ return ret;
+
+ host->nor[host->num_chip] = nor;
+ host->num_chip++;
+ return 0;
+}
+
+static void hisi_spi_nor_unregister_all(struct hifmc_host *host)
+{
+ int i;
+
+ for (i = 0; i < host->num_chip; i++)
+ mtd_device_unregister(&host->nor[i]->mtd);
+}
+
+static int hisi_spi_nor_register_all(struct hifmc_host *host)
+{
+ struct device *dev = host->dev;
+ struct device_node *np;
+ int ret;
+
+ for_each_available_child_of_node(dev->of_node, np) {
+ ret = hisi_spi_nor_register(np, host);
+ if (ret) {
+ of_node_put(np);
+ goto fail;
+ }
+
+ if (host->num_chip == HIFMC_MAX_CHIP_NUM) {
+ dev_warn(dev, "Flash device number exceeds the maximum chipselect number\n");
+ of_node_put(np);
+ break;
+ }
+ }
+
+ return 0;
+
+fail:
+ hisi_spi_nor_unregister_all(host);
+ return ret;
+}
+
+static int hisi_spi_nor_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hifmc_host *host;
+ int ret;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, host);
+ host->dev = dev;
+
+ host->regbase = devm_platform_ioremap_resource_byname(pdev, "control");
+ if (IS_ERR(host->regbase))
+ return PTR_ERR(host->regbase);
+
+ host->iobase = devm_platform_ioremap_resource_byname(pdev, "memory");
+ if (IS_ERR(host->iobase))
+ return PTR_ERR(host->iobase);
+
+ host->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(host->clk))
+ return PTR_ERR(host->clk);
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_warn(dev, "Unable to set dma mask\n");
+ return ret;
+ }
+
+ host->buffer = dmam_alloc_coherent(dev, HIFMC_DMA_MAX_LEN,
+ &host->dma_buffer, GFP_KERNEL);
+ if (!host->buffer)
+ return -ENOMEM;
+
+ ret = clk_prepare_enable(host->clk);
+ if (ret)
+ return ret;
+
+ mutex_init(&host->lock);
+ hisi_spi_nor_init(host);
+ ret = hisi_spi_nor_register_all(host);
+ if (ret)
+ mutex_destroy(&host->lock);
+
+ clk_disable_unprepare(host->clk);
+ return ret;
+}
+
+static int hisi_spi_nor_remove(struct platform_device *pdev)
+{
+ struct hifmc_host *host = platform_get_drvdata(pdev);
+
+ hisi_spi_nor_unregister_all(host);
+ mutex_destroy(&host->lock);
+ return 0;
+}
+
+static const struct of_device_id hisi_spi_nor_dt_ids[] = {
+ { .compatible = "hisilicon,fmc-spi-nor"},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, hisi_spi_nor_dt_ids);
+
+static struct platform_driver hisi_spi_nor_driver = {
+ .driver = {
+ .name = "hisi-sfc",
+ .of_match_table = hisi_spi_nor_dt_ids,
+ },
+ .probe = hisi_spi_nor_probe,
+ .remove = hisi_spi_nor_remove,
+};
+module_platform_driver(hisi_spi_nor_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("HiSilicon SPI Nor Flash Controller Driver");
diff --git a/drivers/mtd/spi-nor/controllers/nxp-spifi.c b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
new file mode 100644
index 0000000000..5d8f47ab14
--- /dev/null
+++ b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SPI NOR driver for NXP SPI Flash Interface (SPIFI)
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * Based on Freescale QuadSPI driver:
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+/* NXP SPIFI registers, bits and macros */
+#define SPIFI_CTRL 0x000
+#define SPIFI_CTRL_TIMEOUT(timeout) (timeout)
+#define SPIFI_CTRL_CSHIGH(cshigh) ((cshigh) << 16)
+#define SPIFI_CTRL_MODE3 BIT(23)
+#define SPIFI_CTRL_DUAL BIT(28)
+#define SPIFI_CTRL_FBCLK BIT(30)
+#define SPIFI_CMD 0x004
+#define SPIFI_CMD_DATALEN(dlen) ((dlen) & 0x3fff)
+#define SPIFI_CMD_DOUT BIT(15)
+#define SPIFI_CMD_INTLEN(ilen) ((ilen) << 16)
+#define SPIFI_CMD_FIELDFORM(field) ((field) << 19)
+#define SPIFI_CMD_FIELDFORM_ALL_SERIAL SPIFI_CMD_FIELDFORM(0x0)
+#define SPIFI_CMD_FIELDFORM_QUAD_DUAL_DATA SPIFI_CMD_FIELDFORM(0x1)
+#define SPIFI_CMD_FRAMEFORM(frame) ((frame) << 21)
+#define SPIFI_CMD_FRAMEFORM_OPCODE_ONLY SPIFI_CMD_FRAMEFORM(0x1)
+#define SPIFI_CMD_OPCODE(op) ((op) << 24)
+#define SPIFI_ADDR 0x008
+#define SPIFI_IDATA 0x00c
+#define SPIFI_CLIMIT 0x010
+#define SPIFI_DATA 0x014
+#define SPIFI_MCMD 0x018
+#define SPIFI_STAT 0x01c
+#define SPIFI_STAT_MCINIT BIT(0)
+#define SPIFI_STAT_CMD BIT(1)
+#define SPIFI_STAT_RESET BIT(4)
+
+#define SPI_NOR_MAX_ID_LEN 6
+
+struct nxp_spifi {
+ struct device *dev;
+ struct clk *clk_spifi;
+ struct clk *clk_reg;
+ void __iomem *io_base;
+ void __iomem *flash_base;
+ struct spi_nor nor;
+ bool memory_mode;
+ u32 mcmd;
+};
+
+static int nxp_spifi_wait_for_cmd(struct nxp_spifi *spifi)
+{
+ u8 stat;
+ int ret;
+
+ ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat,
+ !(stat & SPIFI_STAT_CMD), 10, 30);
+ if (ret)
+ dev_warn(spifi->dev, "command timed out\n");
+
+ return ret;
+}
+
+static int nxp_spifi_reset(struct nxp_spifi *spifi)
+{
+ u8 stat;
+ int ret;
+
+ writel(SPIFI_STAT_RESET, spifi->io_base + SPIFI_STAT);
+ ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat,
+ !(stat & SPIFI_STAT_RESET), 10, 30);
+ if (ret)
+ dev_warn(spifi->dev, "state reset timed out\n");
+
+ return ret;
+}
+
+static int nxp_spifi_set_memory_mode_off(struct nxp_spifi *spifi)
+{
+ int ret;
+
+ if (!spifi->memory_mode)
+ return 0;
+
+ ret = nxp_spifi_reset(spifi);
+ if (ret)
+ dev_err(spifi->dev, "unable to enter command mode\n");
+ else
+ spifi->memory_mode = false;
+
+ return ret;
+}
+
+static int nxp_spifi_set_memory_mode_on(struct nxp_spifi *spifi)
+{
+ u8 stat;
+ int ret;
+
+ if (spifi->memory_mode)
+ return 0;
+
+ writel(spifi->mcmd, spifi->io_base + SPIFI_MCMD);
+ ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat,
+ stat & SPIFI_STAT_MCINIT, 10, 30);
+ if (ret)
+ dev_err(spifi->dev, "unable to enter memory mode\n");
+ else
+ spifi->memory_mode = true;
+
+ return ret;
+}
+
+static int nxp_spifi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+ size_t len)
+{
+ struct nxp_spifi *spifi = nor->priv;
+ u32 cmd;
+ int ret;
+
+ ret = nxp_spifi_set_memory_mode_off(spifi);
+ if (ret)
+ return ret;
+
+ cmd = SPIFI_CMD_DATALEN(len) |
+ SPIFI_CMD_OPCODE(opcode) |
+ SPIFI_CMD_FIELDFORM_ALL_SERIAL |
+ SPIFI_CMD_FRAMEFORM_OPCODE_ONLY;
+ writel(cmd, spifi->io_base + SPIFI_CMD);
+
+ while (len--)
+ *buf++ = readb(spifi->io_base + SPIFI_DATA);
+
+ return nxp_spifi_wait_for_cmd(spifi);
+}
+
+static int nxp_spifi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
+ size_t len)
+{
+ struct nxp_spifi *spifi = nor->priv;
+ u32 cmd;
+ int ret;
+
+ ret = nxp_spifi_set_memory_mode_off(spifi);
+ if (ret)
+ return ret;
+
+ cmd = SPIFI_CMD_DOUT |
+ SPIFI_CMD_DATALEN(len) |
+ SPIFI_CMD_OPCODE(opcode) |
+ SPIFI_CMD_FIELDFORM_ALL_SERIAL |
+ SPIFI_CMD_FRAMEFORM_OPCODE_ONLY;
+ writel(cmd, spifi->io_base + SPIFI_CMD);
+
+ while (len--)
+ writeb(*buf++, spifi->io_base + SPIFI_DATA);
+
+ return nxp_spifi_wait_for_cmd(spifi);
+}
+
+static ssize_t nxp_spifi_read(struct spi_nor *nor, loff_t from, size_t len,
+ u_char *buf)
+{
+ struct nxp_spifi *spifi = nor->priv;
+ int ret;
+
+ ret = nxp_spifi_set_memory_mode_on(spifi);
+ if (ret)
+ return ret;
+
+ memcpy_fromio(buf, spifi->flash_base + from, len);
+
+ return len;
+}
+
+static ssize_t nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len,
+ const u_char *buf)
+{
+ struct nxp_spifi *spifi = nor->priv;
+ u32 cmd;
+ int ret;
+ size_t i;
+
+ ret = nxp_spifi_set_memory_mode_off(spifi);
+ if (ret)
+ return ret;
+
+ writel(to, spifi->io_base + SPIFI_ADDR);
+
+ cmd = SPIFI_CMD_DOUT |
+ SPIFI_CMD_DATALEN(len) |
+ SPIFI_CMD_FIELDFORM_ALL_SERIAL |
+ SPIFI_CMD_OPCODE(nor->program_opcode) |
+ SPIFI_CMD_FRAMEFORM(spifi->nor.addr_nbytes + 1);
+ writel(cmd, spifi->io_base + SPIFI_CMD);
+
+ for (i = 0; i < len; i++)
+ writeb(buf[i], spifi->io_base + SPIFI_DATA);
+
+ ret = nxp_spifi_wait_for_cmd(spifi);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static int nxp_spifi_erase(struct spi_nor *nor, loff_t offs)
+{
+ struct nxp_spifi *spifi = nor->priv;
+ u32 cmd;
+ int ret;
+
+ ret = nxp_spifi_set_memory_mode_off(spifi);
+ if (ret)
+ return ret;
+
+ writel(offs, spifi->io_base + SPIFI_ADDR);
+
+ cmd = SPIFI_CMD_FIELDFORM_ALL_SERIAL |
+ SPIFI_CMD_OPCODE(nor->erase_opcode) |
+ SPIFI_CMD_FRAMEFORM(spifi->nor.addr_nbytes + 1);
+ writel(cmd, spifi->io_base + SPIFI_CMD);
+
+ return nxp_spifi_wait_for_cmd(spifi);
+}
+
+static int nxp_spifi_setup_memory_cmd(struct nxp_spifi *spifi)
+{
+ switch (spifi->nor.read_proto) {
+ case SNOR_PROTO_1_1_1:
+ spifi->mcmd = SPIFI_CMD_FIELDFORM_ALL_SERIAL;
+ break;
+ case SNOR_PROTO_1_1_2:
+ case SNOR_PROTO_1_1_4:
+ spifi->mcmd = SPIFI_CMD_FIELDFORM_QUAD_DUAL_DATA;
+ break;
+ default:
+ dev_err(spifi->dev, "unsupported SPI read mode\n");
+ return -EINVAL;
+ }
+
+ /* Memory mode supports address length between 1 and 4 */
+ if (spifi->nor.addr_nbytes < 1 || spifi->nor.addr_nbytes > 4)
+ return -EINVAL;
+
+ spifi->mcmd |= SPIFI_CMD_OPCODE(spifi->nor.read_opcode) |
+ SPIFI_CMD_INTLEN(spifi->nor.read_dummy / 8) |
+ SPIFI_CMD_FRAMEFORM(spifi->nor.addr_nbytes + 1);
+
+ return 0;
+}
+
+static void nxp_spifi_dummy_id_read(struct spi_nor *nor)
+{
+ u8 id[SPI_NOR_MAX_ID_LEN];
+ nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
+ SPI_NOR_MAX_ID_LEN);
+}
+
+static const struct spi_nor_controller_ops nxp_spifi_controller_ops = {
+ .read_reg = nxp_spifi_read_reg,
+ .write_reg = nxp_spifi_write_reg,
+ .read = nxp_spifi_read,
+ .write = nxp_spifi_write,
+ .erase = nxp_spifi_erase,
+};
+
+static int nxp_spifi_setup_flash(struct nxp_spifi *spifi,
+ struct device_node *np)
+{
+ struct spi_nor_hwcaps hwcaps = {
+ .mask = SNOR_HWCAPS_READ |
+ SNOR_HWCAPS_READ_FAST |
+ SNOR_HWCAPS_PP,
+ };
+ u32 ctrl, property;
+ u16 mode = 0;
+ int ret;
+
+ if (!of_property_read_u32(np, "spi-rx-bus-width", &property)) {
+ switch (property) {
+ case 1:
+ break;
+ case 2:
+ mode |= SPI_RX_DUAL;
+ break;
+ case 4:
+ mode |= SPI_RX_QUAD;
+ break;
+ default:
+ dev_err(spifi->dev, "unsupported rx-bus-width\n");
+ return -EINVAL;
+ }
+ }
+
+ if (of_property_read_bool(np, "spi-cpha"))
+ mode |= SPI_CPHA;
+
+ if (of_property_read_bool(np, "spi-cpol"))
+ mode |= SPI_CPOL;
+
+ /* Setup control register defaults */
+ ctrl = SPIFI_CTRL_TIMEOUT(1000) |
+ SPIFI_CTRL_CSHIGH(15) |
+ SPIFI_CTRL_FBCLK;
+
+ if (mode & SPI_RX_DUAL) {
+ ctrl |= SPIFI_CTRL_DUAL;
+ hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
+ } else if (mode & SPI_RX_QUAD) {
+ ctrl &= ~SPIFI_CTRL_DUAL;
+ hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
+ } else {
+ ctrl |= SPIFI_CTRL_DUAL;
+ }
+
+ switch (mode & SPI_MODE_X_MASK) {
+ case SPI_MODE_0:
+ ctrl &= ~SPIFI_CTRL_MODE3;
+ break;
+ case SPI_MODE_3:
+ ctrl |= SPIFI_CTRL_MODE3;
+ break;
+ default:
+ dev_err(spifi->dev, "only mode 0 and 3 supported\n");
+ return -EINVAL;
+ }
+
+ writel(ctrl, spifi->io_base + SPIFI_CTRL);
+
+ spifi->nor.dev = spifi->dev;
+ spi_nor_set_flash_node(&spifi->nor, np);
+ spifi->nor.priv = spifi;
+ spifi->nor.controller_ops = &nxp_spifi_controller_ops;
+
+ /*
+ * The first read on a hard reset isn't reliable so do a
+ * dummy read of the id before calling spi_nor_scan().
+ * The reason for this problem is unknown.
+ *
+ * The official NXP spifilib uses more or less the same
+ * workaround that is applied here by reading the device
+ * id multiple times.
+ */
+ nxp_spifi_dummy_id_read(&spifi->nor);
+
+ ret = spi_nor_scan(&spifi->nor, NULL, &hwcaps);
+ if (ret) {
+ dev_err(spifi->dev, "device scan failed\n");
+ return ret;
+ }
+
+ ret = nxp_spifi_setup_memory_cmd(spifi);
+ if (ret) {
+ dev_err(spifi->dev, "memory command setup failed\n");
+ return ret;
+ }
+
+ ret = mtd_device_register(&spifi->nor.mtd, NULL, 0);
+ if (ret) {
+ dev_err(spifi->dev, "mtd device parse failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int nxp_spifi_probe(struct platform_device *pdev)
+{
+ struct device_node *flash_np;
+ struct nxp_spifi *spifi;
+ int ret;
+
+ spifi = devm_kzalloc(&pdev->dev, sizeof(*spifi), GFP_KERNEL);
+ if (!spifi)
+ return -ENOMEM;
+
+ spifi->io_base = devm_platform_ioremap_resource_byname(pdev, "spifi");
+ if (IS_ERR(spifi->io_base))
+ return PTR_ERR(spifi->io_base);
+
+ spifi->flash_base = devm_platform_ioremap_resource_byname(pdev, "flash");
+ if (IS_ERR(spifi->flash_base))
+ return PTR_ERR(spifi->flash_base);
+
+ spifi->clk_spifi = devm_clk_get_enabled(&pdev->dev, "spifi");
+ if (IS_ERR(spifi->clk_spifi)) {
+ dev_err(&pdev->dev, "spifi clock not found or unable to enable\n");
+ return PTR_ERR(spifi->clk_spifi);
+ }
+
+ spifi->clk_reg = devm_clk_get_enabled(&pdev->dev, "reg");
+ if (IS_ERR(spifi->clk_reg)) {
+ dev_err(&pdev->dev, "reg clock not found or unable to enable\n");
+ return PTR_ERR(spifi->clk_reg);
+ }
+
+ spifi->dev = &pdev->dev;
+ platform_set_drvdata(pdev, spifi);
+
+ /* Initialize and reset device */
+ nxp_spifi_reset(spifi);
+ writel(0, spifi->io_base + SPIFI_IDATA);
+ writel(0, spifi->io_base + SPIFI_MCMD);
+ nxp_spifi_reset(spifi);
+
+ flash_np = of_get_next_available_child(pdev->dev.of_node, NULL);
+ if (!flash_np) {
+ dev_err(&pdev->dev, "no SPI flash device to configure\n");
+ return -ENODEV;
+ }
+
+ ret = nxp_spifi_setup_flash(spifi, flash_np);
+ of_node_put(flash_np);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to setup flash chip\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int nxp_spifi_remove(struct platform_device *pdev)
+{
+ struct nxp_spifi *spifi = platform_get_drvdata(pdev);
+
+ mtd_device_unregister(&spifi->nor.mtd);
+
+ return 0;
+}
+
+static const struct of_device_id nxp_spifi_match[] = {
+ {.compatible = "nxp,lpc1773-spifi"},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, nxp_spifi_match);
+
+static struct platform_driver nxp_spifi_driver = {
+ .probe = nxp_spifi_probe,
+ .remove = nxp_spifi_remove,
+ .driver = {
+ .name = "nxp-spifi",
+ .of_match_table = nxp_spifi_match,
+ },
+};
+module_platform_driver(nxp_spifi_driver);
+
+MODULE_DESCRIPTION("NXP SPI Flash Interface driver");
+MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
new file mode 100644
index 0000000000..1b0c6770c1
--- /dev/null
+++ b/drivers/mtd/spi-nor/core.c
@@ -0,0 +1,3813 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
+ * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
+ *
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/sched/task_stack.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/spi/flash.h>
+
+#include "core.h"
+
+/* Define max times to check status register before we give up. */
+
+/*
+ * For everything but full-chip erase; probably could be much smaller, but kept
+ * around for safety for now
+ */
+#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
+
+/*
+ * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
+ * for larger flash
+ */
+#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
+
+#define SPI_NOR_MAX_ADDR_NBYTES 4
+
+#define SPI_NOR_SRST_SLEEP_MIN 200
+#define SPI_NOR_SRST_SLEEP_MAX 400
+
+/**
+ * spi_nor_get_cmd_ext() - Get the command opcode extension based on the
+ * extension type.
+ * @nor: pointer to a 'struct spi_nor'
+ * @op: pointer to the 'struct spi_mem_op' whose properties
+ * need to be initialized.
+ *
+ * Right now, only "repeat" and "invert" are supported.
+ *
+ * Return: The opcode extension.
+ */
+static u8 spi_nor_get_cmd_ext(const struct spi_nor *nor,
+ const struct spi_mem_op *op)
+{
+ switch (nor->cmd_ext_type) {
+ case SPI_NOR_EXT_INVERT:
+ return ~op->cmd.opcode;
+
+ case SPI_NOR_EXT_REPEAT:
+ return op->cmd.opcode;
+
+ default:
+ dev_err(nor->dev, "Unknown command extension type\n");
+ return 0;
+ }
+}
+
+/**
+ * spi_nor_spimem_setup_op() - Set up common properties of a spi-mem op.
+ * @nor: pointer to a 'struct spi_nor'
+ * @op: pointer to the 'struct spi_mem_op' whose properties
+ * need to be initialized.
+ * @proto: the protocol from which the properties need to be set.
+ */
+void spi_nor_spimem_setup_op(const struct spi_nor *nor,
+ struct spi_mem_op *op,
+ const enum spi_nor_protocol proto)
+{
+ u8 ext;
+
+ op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(proto);
+
+ if (op->addr.nbytes)
+ op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto);
+
+ if (op->dummy.nbytes)
+ op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto);
+
+ if (op->data.nbytes)
+ op->data.buswidth = spi_nor_get_protocol_data_nbits(proto);
+
+ if (spi_nor_protocol_is_dtr(proto)) {
+ /*
+ * SPIMEM supports mixed DTR modes, but right now we can only
+ * have all phases either DTR or STR. IOW, SPIMEM can have
+ * something like 4S-4D-4D, but SPI NOR can't. So, set all 4
+ * phases to either DTR or STR.
+ */
+ op->cmd.dtr = true;
+ op->addr.dtr = true;
+ op->dummy.dtr = true;
+ op->data.dtr = true;
+
+ /* 2 bytes per clock cycle in DTR mode. */
+ op->dummy.nbytes *= 2;
+
+ ext = spi_nor_get_cmd_ext(nor, op);
+ op->cmd.opcode = (op->cmd.opcode << 8) | ext;
+ op->cmd.nbytes = 2;
+ }
+}
+
+/**
+ * spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data
+ * transfer
+ * @nor: pointer to 'struct spi_nor'
+ * @op: pointer to 'struct spi_mem_op' template for transfer
+ *
+ * If we have to use the bounce buffer, the data field in @op will be updated.
+ *
+ * Return: true if the bounce buffer is needed, false if not
+ */
+static bool spi_nor_spimem_bounce(struct spi_nor *nor, struct spi_mem_op *op)
+{
+ /* op->data.buf.in occupies the same memory as op->data.buf.out */
+ if (object_is_on_stack(op->data.buf.in) ||
+ !virt_addr_valid(op->data.buf.in)) {
+ if (op->data.nbytes > nor->bouncebuf_size)
+ op->data.nbytes = nor->bouncebuf_size;
+ op->data.buf.in = nor->bouncebuf;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * spi_nor_spimem_exec_op() - execute a memory operation
+ * @nor: pointer to 'struct spi_nor'
+ * @op: pointer to 'struct spi_mem_op' template for transfer
+ *
+ * Return: 0 on success, -error otherwise.
+ */
+static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op)
+{
+ int error;
+
+ error = spi_mem_adjust_op_size(nor->spimem, op);
+ if (error)
+ return error;
+
+ return spi_mem_exec_op(nor->spimem, op);
+}
+
+int spi_nor_controller_ops_read_reg(struct spi_nor *nor, u8 opcode,
+ u8 *buf, size_t len)
+{
+ if (spi_nor_protocol_is_dtr(nor->reg_proto))
+ return -EOPNOTSUPP;
+
+ return nor->controller_ops->read_reg(nor, opcode, buf, len);
+}
+
+int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode,
+ const u8 *buf, size_t len)
+{
+ if (spi_nor_protocol_is_dtr(nor->reg_proto))
+ return -EOPNOTSUPP;
+
+ return nor->controller_ops->write_reg(nor, opcode, buf, len);
+}
+
+static int spi_nor_controller_ops_erase(struct spi_nor *nor, loff_t offs)
+{
+ if (spi_nor_protocol_is_dtr(nor->reg_proto))
+ return -EOPNOTSUPP;
+
+ return nor->controller_ops->erase(nor, offs);
+}
+
+/**
+ * spi_nor_spimem_read_data() - read data from flash's memory region via
+ * spi-mem
+ * @nor: pointer to 'struct spi_nor'
+ * @from: offset to read from
+ * @len: number of bytes to read
+ * @buf: pointer to dst buffer
+ *
+ * Return: number of bytes read successfully, -errno otherwise
+ */
+static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
+ size_t len, u8 *buf)
+{
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
+ SPI_MEM_OP_ADDR(nor->addr_nbytes, from, 0),
+ SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
+ SPI_MEM_OP_DATA_IN(len, buf, 0));
+ bool usebouncebuf;
+ ssize_t nbytes;
+ int error;
+
+ spi_nor_spimem_setup_op(nor, &op, nor->read_proto);
+
+ /* convert the dummy cycles to the number of bytes */
+ op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
+ if (spi_nor_protocol_is_dtr(nor->read_proto))
+ op.dummy.nbytes *= 2;
+
+ usebouncebuf = spi_nor_spimem_bounce(nor, &op);
+
+ if (nor->dirmap.rdesc) {
+ nbytes = spi_mem_dirmap_read(nor->dirmap.rdesc, op.addr.val,
+ op.data.nbytes, op.data.buf.in);
+ } else {
+ error = spi_nor_spimem_exec_op(nor, &op);
+ if (error)
+ return error;
+ nbytes = op.data.nbytes;
+ }
+
+ if (usebouncebuf && nbytes > 0)
+ memcpy(buf, op.data.buf.in, nbytes);
+
+ return nbytes;
+}
+
+/**
+ * spi_nor_read_data() - read data from flash memory
+ * @nor: pointer to 'struct spi_nor'
+ * @from: offset to read from
+ * @len: number of bytes to read
+ * @buf: pointer to dst buffer
+ *
+ * Return: number of bytes read successfully, -errno otherwise
+ */
+ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf)
+{
+ if (nor->spimem)
+ return spi_nor_spimem_read_data(nor, from, len, buf);
+
+ return nor->controller_ops->read(nor, from, len, buf);
+}
+
+/**
+ * spi_nor_spimem_write_data() - write data to flash memory via
+ * spi-mem
+ * @nor: pointer to 'struct spi_nor'
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @buf: pointer to src buffer
+ *
+ * Return: number of bytes written successfully, -errno otherwise
+ */
+static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
+ size_t len, const u8 *buf)
+{
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
+ SPI_MEM_OP_ADDR(nor->addr_nbytes, to, 0),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(len, buf, 0));
+ ssize_t nbytes;
+ int error;
+
+ if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
+ op.addr.nbytes = 0;
+
+ spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
+
+ if (spi_nor_spimem_bounce(nor, &op))
+ memcpy(nor->bouncebuf, buf, op.data.nbytes);
+
+ if (nor->dirmap.wdesc) {
+ nbytes = spi_mem_dirmap_write(nor->dirmap.wdesc, op.addr.val,
+ op.data.nbytes, op.data.buf.out);
+ } else {
+ error = spi_nor_spimem_exec_op(nor, &op);
+ if (error)
+ return error;
+ nbytes = op.data.nbytes;
+ }
+
+ return nbytes;
+}
+
+/**
+ * spi_nor_write_data() - write data to flash memory
+ * @nor: pointer to 'struct spi_nor'
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @buf: pointer to src buffer
+ *
+ * Return: number of bytes written successfully, -errno otherwise
+ */
+ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
+ const u8 *buf)
+{
+ if (nor->spimem)
+ return spi_nor_spimem_write_data(nor, to, len, buf);
+
+ return nor->controller_ops->write(nor, to, len, buf);
+}
+
+/**
+ * spi_nor_read_any_reg() - read any register from flash memory, nonvolatile or
+ * volatile.
+ * @nor: pointer to 'struct spi_nor'.
+ * @op: SPI memory operation. op->data.buf must be DMA-able.
+ * @proto: SPI protocol to use for the register operation.
+ *
+ * Return: zero on success, -errno otherwise
+ */
+int spi_nor_read_any_reg(struct spi_nor *nor, struct spi_mem_op *op,
+ enum spi_nor_protocol proto)
+{
+ if (!nor->spimem)
+ return -EOPNOTSUPP;
+
+ spi_nor_spimem_setup_op(nor, op, proto);
+ return spi_nor_spimem_exec_op(nor, op);
+}
+
+/**
+ * spi_nor_write_any_volatile_reg() - write any volatile register to flash
+ * memory.
+ * @nor: pointer to 'struct spi_nor'
+ * @op: SPI memory operation. op->data.buf must be DMA-able.
+ * @proto: SPI protocol to use for the register operation.
+ *
+ * Writing volatile registers are instant according to some manufacturers
+ * (Cypress, Micron) and do not need any status polling.
+ *
+ * Return: zero on success, -errno otherwise
+ */
+int spi_nor_write_any_volatile_reg(struct spi_nor *nor, struct spi_mem_op *op,
+ enum spi_nor_protocol proto)
+{
+ int ret;
+
+ if (!nor->spimem)
+ return -EOPNOTSUPP;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+ spi_nor_spimem_setup_op(nor, op, proto);
+ return spi_nor_spimem_exec_op(nor, op);
+}
+
+/**
+ * spi_nor_write_enable() - Set write enable latch with Write Enable command.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_write_enable(struct spi_nor *nor)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op = SPI_NOR_WREN_OP;
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREN,
+ NULL, 0);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d on Write Enable\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_write_disable() - Send Write Disable instruction to the chip.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_write_disable(struct spi_nor *nor)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op = SPI_NOR_WRDI_OP;
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRDI,
+ NULL, 0);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d on Write Disable\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_read_id() - Read the JEDEC ID.
+ * @nor: pointer to 'struct spi_nor'.
+ * @naddr: number of address bytes to send. Can be zero if the operation
+ * does not need to send an address.
+ * @ndummy: number of dummy bytes to send after an opcode or address. Can
+ * be zero if the operation does not require dummy bytes.
+ * @id: pointer to a DMA-able buffer where the value of the JEDEC ID
+ * will be written.
+ * @proto: the SPI protocol for register operation.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_read_id(struct spi_nor *nor, u8 naddr, u8 ndummy, u8 *id,
+ enum spi_nor_protocol proto)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_NOR_READID_OP(naddr, ndummy, id, SPI_NOR_MAX_ID_LEN);
+
+ spi_nor_spimem_setup_op(nor, &op, proto);
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
+ SPI_NOR_MAX_ID_LEN);
+ }
+ return ret;
+}
+
+/**
+ * spi_nor_read_sr() - Read the Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr: pointer to a DMA-able buffer where the value of the
+ * Status Register will be written. Should be at least 2 bytes.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op = SPI_NOR_RDSR_OP(sr);
+
+ if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
+ op.addr.nbytes = nor->params->rdsr_addr_nbytes;
+ op.dummy.nbytes = nor->params->rdsr_dummy;
+ /*
+ * We don't want to read only one byte in DTR mode. So,
+ * read 2 and then discard the second byte.
+ */
+ op.data.nbytes = 2;
+ }
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR, sr,
+ 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading SR\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_read_cr() - Read the Configuration Register using the
+ * SPINOR_OP_RDCR (35h) command.
+ * @nor: pointer to 'struct spi_nor'
+ * @cr: pointer to a DMA-able buffer where the value of the
+ * Configuration Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op = SPI_NOR_RDCR_OP(cr);
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDCR, cr,
+ 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading CR\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_set_4byte_addr_mode_en4b_ex4b() - Enter/Exit 4-byte address mode
+ * using SPINOR_OP_EN4B/SPINOR_OP_EX4B. Typically used by
+ * Winbond and Macronix.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_set_4byte_addr_mode_en4b_ex4b(struct spi_nor *nor, bool enable)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op = SPI_NOR_EN4B_EX4B_OP(enable);
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = spi_nor_controller_ops_write_reg(nor,
+ enable ? SPINOR_OP_EN4B :
+ SPINOR_OP_EX4B,
+ NULL, 0);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_set_4byte_addr_mode_wren_en4b_ex4b() - Set 4-byte address mode using
+ * SPINOR_OP_WREN followed by SPINOR_OP_EN4B or SPINOR_OP_EX4B. Typically used
+ * by ST and Micron flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_set_4byte_addr_mode_wren_en4b_ex4b(struct spi_nor *nor, bool enable)
+{
+ int ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_set_4byte_addr_mode_en4b_ex4b(nor, enable);
+ if (ret)
+ return ret;
+
+ return spi_nor_write_disable(nor);
+}
+
+/**
+ * spi_nor_set_4byte_addr_mode_brwr() - Set 4-byte address mode using
+ * SPINOR_OP_BRWR. Typically used by Spansion flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * 8-bit volatile bank register used to define A[30:A24] bits. MSB (bit[7]) is
+ * used to enable/disable 4-byte address mode. When MSB is set to ‘1’, 4-byte
+ * address mode is active and A[30:24] bits are don’t care. Write instruction is
+ * SPINOR_OP_BRWR(17h) with 1 byte of data.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_set_4byte_addr_mode_brwr(struct spi_nor *nor, bool enable)
+{
+ int ret;
+
+ nor->bouncebuf[0] = enable << 7;
+
+ if (nor->spimem) {
+ struct spi_mem_op op = SPI_NOR_BRWR_OP(nor->bouncebuf);
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_BRWR,
+ nor->bouncebuf, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready
+ * for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 1 if ready, 0 if not ready, -errno on errors.
+ */
+int spi_nor_sr_ready(struct spi_nor *nor)
+{
+ int ret;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ return !(nor->bouncebuf[0] & SR_WIP);
+}
+
+/**
+ * spi_nor_use_parallel_locking() - Checks if RWW locking scheme shall be used
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: true if parallel locking is enabled, false otherwise.
+ */
+static bool spi_nor_use_parallel_locking(struct spi_nor *nor)
+{
+ return nor->flags & SNOR_F_RWW;
+}
+
+/* Locking helpers for status read operations */
+static int spi_nor_rww_start_rdst(struct spi_nor *nor)
+{
+ struct spi_nor_rww *rww = &nor->rww;
+ int ret = -EAGAIN;
+
+ mutex_lock(&nor->lock);
+
+ if (rww->ongoing_io || rww->ongoing_rd)
+ goto busy;
+
+ rww->ongoing_io = true;
+ rww->ongoing_rd = true;
+ ret = 0;
+
+busy:
+ mutex_unlock(&nor->lock);
+ return ret;
+}
+
+static void spi_nor_rww_end_rdst(struct spi_nor *nor)
+{
+ struct spi_nor_rww *rww = &nor->rww;
+
+ mutex_lock(&nor->lock);
+
+ rww->ongoing_io = false;
+ rww->ongoing_rd = false;
+
+ mutex_unlock(&nor->lock);
+}
+
+static int spi_nor_lock_rdst(struct spi_nor *nor)
+{
+ if (spi_nor_use_parallel_locking(nor))
+ return spi_nor_rww_start_rdst(nor);
+
+ return 0;
+}
+
+static void spi_nor_unlock_rdst(struct spi_nor *nor)
+{
+ if (spi_nor_use_parallel_locking(nor)) {
+ spi_nor_rww_end_rdst(nor);
+ wake_up(&nor->rww.wait);
+ }
+}
+
+/**
+ * spi_nor_ready() - Query the flash to see if it is ready for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 1 if ready, 0 if not ready, -errno on errors.
+ */
+static int spi_nor_ready(struct spi_nor *nor)
+{
+ int ret;
+
+ ret = spi_nor_lock_rdst(nor);
+ if (ret)
+ return 0;
+
+ /* Flashes might override the standard routine. */
+ if (nor->params->ready)
+ ret = nor->params->ready(nor);
+ else
+ ret = spi_nor_sr_ready(nor);
+
+ spi_nor_unlock_rdst(nor);
+
+ return ret;
+}
+
+/**
+ * spi_nor_wait_till_ready_with_timeout() - Service routine to read the
+ * Status Register until ready, or timeout occurs.
+ * @nor: pointer to "struct spi_nor".
+ * @timeout_jiffies: jiffies to wait until timeout.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
+ unsigned long timeout_jiffies)
+{
+ unsigned long deadline;
+ int timeout = 0, ret;
+
+ deadline = jiffies + timeout_jiffies;
+
+ while (!timeout) {
+ if (time_after_eq(jiffies, deadline))
+ timeout = 1;
+
+ ret = spi_nor_ready(nor);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ return 0;
+
+ cond_resched();
+ }
+
+ dev_dbg(nor->dev, "flash operation timed out\n");
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the
+ * flash to be ready, or timeout occurs.
+ * @nor: pointer to "struct spi_nor".
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_wait_till_ready(struct spi_nor *nor)
+{
+ return spi_nor_wait_till_ready_with_timeout(nor,
+ DEFAULT_READY_WAIT_JIFFIES);
+}
+
+/**
+ * spi_nor_global_block_unlock() - Unlock Global Block Protection.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_global_block_unlock(struct spi_nor *nor)
+{
+ int ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op = SPI_NOR_GBULK_OP;
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_GBULK,
+ NULL, 0);
+ }
+
+ if (ret) {
+ dev_dbg(nor->dev, "error %d on Global Block Unlock\n", ret);
+ return ret;
+ }
+
+ return spi_nor_wait_till_ready(nor);
+}
+
+/**
+ * spi_nor_write_sr() - Write the Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr: pointer to DMA-able buffer to write to the Status Register.
+ * @len: number of bytes to write to the Status Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
+{
+ int ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op = SPI_NOR_WRSR_OP(sr, len);
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR, sr,
+ len);
+ }
+
+ if (ret) {
+ dev_dbg(nor->dev, "error %d writing SR\n", ret);
+ return ret;
+ }
+
+ return spi_nor_wait_till_ready(nor);
+}
+
+/**
+ * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and
+ * ensure that the byte written match the received value.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @sr1: byte value to be written to the Status Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1)
+{
+ int ret;
+
+ nor->bouncebuf[0] = sr1;
+
+ ret = spi_nor_write_sr(nor, nor->bouncebuf, 1);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] != sr1) {
+ dev_dbg(nor->dev, "SR1: read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the
+ * Status Register 2 in one shot. Ensure that the byte written in the Status
+ * Register 1 match the received value, and that the 16-bit Write did not
+ * affect what was already in the Status Register 2.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @sr1: byte value to be written to the Status Register 1.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
+{
+ int ret;
+ u8 *sr_cr = nor->bouncebuf;
+ u8 cr_written;
+
+ /* Make sure we don't overwrite the contents of Status Register 2. */
+ if (!(nor->flags & SNOR_F_NO_READ_CR)) {
+ ret = spi_nor_read_cr(nor, &sr_cr[1]);
+ if (ret)
+ return ret;
+ } else if (spi_nor_get_protocol_width(nor->read_proto) == 4 &&
+ spi_nor_get_protocol_width(nor->write_proto) == 4 &&
+ nor->params->quad_enable) {
+ /*
+ * If the Status Register 2 Read command (35h) is not
+ * supported, we should at least be sure we don't
+ * change the value of the SR2 Quad Enable bit.
+ *
+ * When the Quad Enable method is set and the buswidth is 4, we
+ * can safely assume that the value of the QE bit is one, as a
+ * consequence of the nor->params->quad_enable() call.
+ *
+ * According to the JESD216 revB standard, BFPT DWORDS[15],
+ * bits 22:20, the 16-bit Write Status (01h) command is
+ * available just for the cases in which the QE bit is
+ * described in SR2 at BIT(1).
+ */
+ sr_cr[1] = SR2_QUAD_EN_BIT1;
+ } else {
+ sr_cr[1] = 0;
+ }
+
+ sr_cr[0] = sr1;
+
+ ret = spi_nor_write_sr(nor, sr_cr, 2);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_read_sr(nor, sr_cr);
+ if (ret)
+ return ret;
+
+ if (sr1 != sr_cr[0]) {
+ dev_dbg(nor->dev, "SR: Read back test failed\n");
+ return -EIO;
+ }
+
+ if (nor->flags & SNOR_F_NO_READ_CR)
+ return 0;
+
+ cr_written = sr_cr[1];
+
+ ret = spi_nor_read_cr(nor, &sr_cr[1]);
+ if (ret)
+ return ret;
+
+ if (cr_written != sr_cr[1]) {
+ dev_dbg(nor->dev, "CR: read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the
+ * Configuration Register in one shot. Ensure that the byte written in the
+ * Configuration Register match the received value, and that the 16-bit Write
+ * did not affect what was already in the Status Register 1.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @cr: byte value to be written to the Configuration Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr)
+{
+ int ret;
+ u8 *sr_cr = nor->bouncebuf;
+ u8 sr_written;
+
+ /* Keep the current value of the Status Register 1. */
+ ret = spi_nor_read_sr(nor, sr_cr);
+ if (ret)
+ return ret;
+
+ sr_cr[1] = cr;
+
+ ret = spi_nor_write_sr(nor, sr_cr, 2);
+ if (ret)
+ return ret;
+
+ sr_written = sr_cr[0];
+
+ ret = spi_nor_read_sr(nor, sr_cr);
+ if (ret)
+ return ret;
+
+ if (sr_written != sr_cr[0]) {
+ dev_dbg(nor->dev, "SR: Read back test failed\n");
+ return -EIO;
+ }
+
+ if (nor->flags & SNOR_F_NO_READ_CR)
+ return 0;
+
+ ret = spi_nor_read_cr(nor, &sr_cr[1]);
+ if (ret)
+ return ret;
+
+ if (cr != sr_cr[1]) {
+ dev_dbg(nor->dev, "CR: read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that
+ * the byte written match the received value without affecting other bits in the
+ * Status Register 1 and 2.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @sr1: byte value to be written to the Status Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1)
+{
+ if (nor->flags & SNOR_F_HAS_16BIT_SR)
+ return spi_nor_write_16bit_sr_and_check(nor, sr1);
+
+ return spi_nor_write_sr1_and_check(nor, sr1);
+}
+
+/**
+ * spi_nor_write_sr2() - Write the Status Register 2 using the
+ * SPINOR_OP_WRSR2 (3eh) command.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr2: pointer to DMA-able buffer to write to the Status Register 2.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2)
+{
+ int ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op = SPI_NOR_WRSR2_OP(sr2);
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR2,
+ sr2, 1);
+ }
+
+ if (ret) {
+ dev_dbg(nor->dev, "error %d writing SR2\n", ret);
+ return ret;
+ }
+
+ return spi_nor_wait_till_ready(nor);
+}
+
+/**
+ * spi_nor_read_sr2() - Read the Status Register 2 using the
+ * SPINOR_OP_RDSR2 (3fh) command.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr2: pointer to DMA-able buffer where the value of the
+ * Status Register 2 will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op = SPI_NOR_RDSR2_OP(sr2);
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR2, sr2,
+ 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading SR2\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_erase_chip() - Erase the entire flash memory.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_erase_chip(struct spi_nor *nor)
+{
+ int ret;
+
+ dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
+
+ if (nor->spimem) {
+ struct spi_mem_op op = SPI_NOR_CHIP_ERASE_OP;
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = spi_nor_controller_ops_write_reg(nor,
+ SPINOR_OP_CHIP_ERASE,
+ NULL, 0);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d erasing chip\n", ret);
+
+ return ret;
+}
+
+static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ if (table[i][0] == opcode)
+ return table[i][1];
+
+ /* No conversion found, keep input op code. */
+ return opcode;
+}
+
+u8 spi_nor_convert_3to4_read(u8 opcode)
+{
+ static const u8 spi_nor_3to4_read[][2] = {
+ { SPINOR_OP_READ, SPINOR_OP_READ_4B },
+ { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
+ { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
+ { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
+ { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
+ { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
+ { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
+ { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
+
+ { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
+ { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
+ { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
+ ARRAY_SIZE(spi_nor_3to4_read));
+}
+
+static u8 spi_nor_convert_3to4_program(u8 opcode)
+{
+ static const u8 spi_nor_3to4_program[][2] = {
+ { SPINOR_OP_PP, SPINOR_OP_PP_4B },
+ { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
+ { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
+ { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
+ { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
+ ARRAY_SIZE(spi_nor_3to4_program));
+}
+
+static u8 spi_nor_convert_3to4_erase(u8 opcode)
+{
+ static const u8 spi_nor_3to4_erase[][2] = {
+ { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
+ { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
+ { SPINOR_OP_SE, SPINOR_OP_SE_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
+ ARRAY_SIZE(spi_nor_3to4_erase));
+}
+
+static bool spi_nor_has_uniform_erase(const struct spi_nor *nor)
+{
+ return !!nor->params->erase_map.uniform_erase_type;
+}
+
+static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
+{
+ nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
+ nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
+ nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
+
+ if (!spi_nor_has_uniform_erase(nor)) {
+ struct spi_nor_erase_map *map = &nor->params->erase_map;
+ struct spi_nor_erase_type *erase;
+ int i;
+
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ erase = &map->erase_type[i];
+ erase->opcode =
+ spi_nor_convert_3to4_erase(erase->opcode);
+ }
+ }
+}
+
+static int spi_nor_prep(struct spi_nor *nor)
+{
+ int ret = 0;
+
+ if (nor->controller_ops && nor->controller_ops->prepare)
+ ret = nor->controller_ops->prepare(nor);
+
+ return ret;
+}
+
+static void spi_nor_unprep(struct spi_nor *nor)
+{
+ if (nor->controller_ops && nor->controller_ops->unprepare)
+ nor->controller_ops->unprepare(nor);
+}
+
+static void spi_nor_offset_to_banks(u64 bank_size, loff_t start, size_t len,
+ u8 *first, u8 *last)
+{
+ /* This is currently safe, the number of banks being very small */
+ *first = DIV_ROUND_DOWN_ULL(start, bank_size);
+ *last = DIV_ROUND_DOWN_ULL(start + len - 1, bank_size);
+}
+
+/* Generic helpers for internal locking and serialization */
+static bool spi_nor_rww_start_io(struct spi_nor *nor)
+{
+ struct spi_nor_rww *rww = &nor->rww;
+ bool start = false;
+
+ mutex_lock(&nor->lock);
+
+ if (rww->ongoing_io)
+ goto busy;
+
+ rww->ongoing_io = true;
+ start = true;
+
+busy:
+ mutex_unlock(&nor->lock);
+ return start;
+}
+
+static void spi_nor_rww_end_io(struct spi_nor *nor)
+{
+ mutex_lock(&nor->lock);
+ nor->rww.ongoing_io = false;
+ mutex_unlock(&nor->lock);
+}
+
+static int spi_nor_lock_device(struct spi_nor *nor)
+{
+ if (!spi_nor_use_parallel_locking(nor))
+ return 0;
+
+ return wait_event_killable(nor->rww.wait, spi_nor_rww_start_io(nor));
+}
+
+static void spi_nor_unlock_device(struct spi_nor *nor)
+{
+ if (spi_nor_use_parallel_locking(nor)) {
+ spi_nor_rww_end_io(nor);
+ wake_up(&nor->rww.wait);
+ }
+}
+
+/* Generic helpers for internal locking and serialization */
+static bool spi_nor_rww_start_exclusive(struct spi_nor *nor)
+{
+ struct spi_nor_rww *rww = &nor->rww;
+ bool start = false;
+
+ mutex_lock(&nor->lock);
+
+ if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe)
+ goto busy;
+
+ rww->ongoing_io = true;
+ rww->ongoing_rd = true;
+ rww->ongoing_pe = true;
+ start = true;
+
+busy:
+ mutex_unlock(&nor->lock);
+ return start;
+}
+
+static void spi_nor_rww_end_exclusive(struct spi_nor *nor)
+{
+ struct spi_nor_rww *rww = &nor->rww;
+
+ mutex_lock(&nor->lock);
+ rww->ongoing_io = false;
+ rww->ongoing_rd = false;
+ rww->ongoing_pe = false;
+ mutex_unlock(&nor->lock);
+}
+
+int spi_nor_prep_and_lock(struct spi_nor *nor)
+{
+ int ret;
+
+ ret = spi_nor_prep(nor);
+ if (ret)
+ return ret;
+
+ if (!spi_nor_use_parallel_locking(nor))
+ mutex_lock(&nor->lock);
+ else
+ ret = wait_event_killable(nor->rww.wait,
+ spi_nor_rww_start_exclusive(nor));
+
+ return ret;
+}
+
+void spi_nor_unlock_and_unprep(struct spi_nor *nor)
+{
+ if (!spi_nor_use_parallel_locking(nor)) {
+ mutex_unlock(&nor->lock);
+ } else {
+ spi_nor_rww_end_exclusive(nor);
+ wake_up(&nor->rww.wait);
+ }
+
+ spi_nor_unprep(nor);
+}
+
+/* Internal locking helpers for program and erase operations */
+static bool spi_nor_rww_start_pe(struct spi_nor *nor, loff_t start, size_t len)
+{
+ struct spi_nor_rww *rww = &nor->rww;
+ unsigned int used_banks = 0;
+ bool started = false;
+ u8 first, last;
+ int bank;
+
+ mutex_lock(&nor->lock);
+
+ if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe)
+ goto busy;
+
+ spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
+ for (bank = first; bank <= last; bank++) {
+ if (rww->used_banks & BIT(bank))
+ goto busy;
+
+ used_banks |= BIT(bank);
+ }
+
+ rww->used_banks |= used_banks;
+ rww->ongoing_pe = true;
+ started = true;
+
+busy:
+ mutex_unlock(&nor->lock);
+ return started;
+}
+
+static void spi_nor_rww_end_pe(struct spi_nor *nor, loff_t start, size_t len)
+{
+ struct spi_nor_rww *rww = &nor->rww;
+ u8 first, last;
+ int bank;
+
+ mutex_lock(&nor->lock);
+
+ spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
+ for (bank = first; bank <= last; bank++)
+ rww->used_banks &= ~BIT(bank);
+
+ rww->ongoing_pe = false;
+
+ mutex_unlock(&nor->lock);
+}
+
+static int spi_nor_prep_and_lock_pe(struct spi_nor *nor, loff_t start, size_t len)
+{
+ int ret;
+
+ ret = spi_nor_prep(nor);
+ if (ret)
+ return ret;
+
+ if (!spi_nor_use_parallel_locking(nor))
+ mutex_lock(&nor->lock);
+ else
+ ret = wait_event_killable(nor->rww.wait,
+ spi_nor_rww_start_pe(nor, start, len));
+
+ return ret;
+}
+
+static void spi_nor_unlock_and_unprep_pe(struct spi_nor *nor, loff_t start, size_t len)
+{
+ if (!spi_nor_use_parallel_locking(nor)) {
+ mutex_unlock(&nor->lock);
+ } else {
+ spi_nor_rww_end_pe(nor, start, len);
+ wake_up(&nor->rww.wait);
+ }
+
+ spi_nor_unprep(nor);
+}
+
+/* Internal locking helpers for read operations */
+static bool spi_nor_rww_start_rd(struct spi_nor *nor, loff_t start, size_t len)
+{
+ struct spi_nor_rww *rww = &nor->rww;
+ unsigned int used_banks = 0;
+ bool started = false;
+ u8 first, last;
+ int bank;
+
+ mutex_lock(&nor->lock);
+
+ if (rww->ongoing_io || rww->ongoing_rd)
+ goto busy;
+
+ spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
+ for (bank = first; bank <= last; bank++) {
+ if (rww->used_banks & BIT(bank))
+ goto busy;
+
+ used_banks |= BIT(bank);
+ }
+
+ rww->used_banks |= used_banks;
+ rww->ongoing_io = true;
+ rww->ongoing_rd = true;
+ started = true;
+
+busy:
+ mutex_unlock(&nor->lock);
+ return started;
+}
+
+static void spi_nor_rww_end_rd(struct spi_nor *nor, loff_t start, size_t len)
+{
+ struct spi_nor_rww *rww = &nor->rww;
+ u8 first, last;
+ int bank;
+
+ mutex_lock(&nor->lock);
+
+ spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
+ for (bank = first; bank <= last; bank++)
+ nor->rww.used_banks &= ~BIT(bank);
+
+ rww->ongoing_io = false;
+ rww->ongoing_rd = false;
+
+ mutex_unlock(&nor->lock);
+}
+
+static int spi_nor_prep_and_lock_rd(struct spi_nor *nor, loff_t start, size_t len)
+{
+ int ret;
+
+ ret = spi_nor_prep(nor);
+ if (ret)
+ return ret;
+
+ if (!spi_nor_use_parallel_locking(nor))
+ mutex_lock(&nor->lock);
+ else
+ ret = wait_event_killable(nor->rww.wait,
+ spi_nor_rww_start_rd(nor, start, len));
+
+ return ret;
+}
+
+static void spi_nor_unlock_and_unprep_rd(struct spi_nor *nor, loff_t start, size_t len)
+{
+ if (!spi_nor_use_parallel_locking(nor)) {
+ mutex_unlock(&nor->lock);
+ } else {
+ spi_nor_rww_end_rd(nor, start, len);
+ wake_up(&nor->rww.wait);
+ }
+
+ spi_nor_unprep(nor);
+}
+
+static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr)
+{
+ if (!nor->params->convert_addr)
+ return addr;
+
+ return nor->params->convert_addr(nor, addr);
+}
+
+/*
+ * Initiate the erasure of a single sector
+ */
+int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
+{
+ int i;
+
+ addr = spi_nor_convert_addr(nor, addr);
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_NOR_SECTOR_ERASE_OP(nor->erase_opcode,
+ nor->addr_nbytes, addr);
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ return spi_mem_exec_op(nor->spimem, &op);
+ } else if (nor->controller_ops->erase) {
+ return spi_nor_controller_ops_erase(nor, addr);
+ }
+
+ /*
+ * Default implementation, if driver doesn't have a specialized HW
+ * control
+ */
+ for (i = nor->addr_nbytes - 1; i >= 0; i--) {
+ nor->bouncebuf[i] = addr & 0xff;
+ addr >>= 8;
+ }
+
+ return spi_nor_controller_ops_write_reg(nor, nor->erase_opcode,
+ nor->bouncebuf, nor->addr_nbytes);
+}
+
+/**
+ * spi_nor_div_by_erase_size() - calculate remainder and update new dividend
+ * @erase: pointer to a structure that describes a SPI NOR erase type
+ * @dividend: dividend value
+ * @remainder: pointer to u32 remainder (will be updated)
+ *
+ * Return: the result of the division
+ */
+static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
+ u64 dividend, u32 *remainder)
+{
+ /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
+ *remainder = (u32)dividend & erase->size_mask;
+ return dividend >> erase->size_shift;
+}
+
+/**
+ * spi_nor_find_best_erase_type() - find the best erase type for the given
+ * offset in the serial flash memory and the
+ * number of bytes to erase. The region in
+ * which the address fits is expected to be
+ * provided.
+ * @map: the erase map of the SPI NOR
+ * @region: pointer to a structure that describes a SPI NOR erase region
+ * @addr: offset in the serial flash memory
+ * @len: number of bytes to erase
+ *
+ * Return: a pointer to the best fitted erase type, NULL otherwise.
+ */
+static const struct spi_nor_erase_type *
+spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
+ const struct spi_nor_erase_region *region,
+ u64 addr, u32 len)
+{
+ const struct spi_nor_erase_type *erase;
+ u32 rem;
+ int i;
+ u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
+
+ /*
+ * Erase types are ordered by size, with the smallest erase type at
+ * index 0.
+ */
+ for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
+ /* Does the erase region support the tested erase type? */
+ if (!(erase_mask & BIT(i)))
+ continue;
+
+ erase = &map->erase_type[i];
+ if (!erase->size)
+ continue;
+
+ /* Alignment is not mandatory for overlaid regions */
+ if (region->offset & SNOR_OVERLAID_REGION &&
+ region->size <= len)
+ return erase;
+
+ /* Don't erase more than what the user has asked for. */
+ if (erase->size > len)
+ continue;
+
+ spi_nor_div_by_erase_size(erase, addr, &rem);
+ if (!rem)
+ return erase;
+ }
+
+ return NULL;
+}
+
+static u64 spi_nor_region_is_last(const struct spi_nor_erase_region *region)
+{
+ return region->offset & SNOR_LAST_REGION;
+}
+
+static u64 spi_nor_region_end(const struct spi_nor_erase_region *region)
+{
+ return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size;
+}
+
+/**
+ * spi_nor_region_next() - get the next spi nor region
+ * @region: pointer to a structure that describes a SPI NOR erase region
+ *
+ * Return: the next spi nor region or NULL if last region.
+ */
+struct spi_nor_erase_region *
+spi_nor_region_next(struct spi_nor_erase_region *region)
+{
+ if (spi_nor_region_is_last(region))
+ return NULL;
+ region++;
+ return region;
+}
+
+/**
+ * spi_nor_find_erase_region() - find the region of the serial flash memory in
+ * which the offset fits
+ * @map: the erase map of the SPI NOR
+ * @addr: offset in the serial flash memory
+ *
+ * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno)
+ * otherwise.
+ */
+static struct spi_nor_erase_region *
+spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
+{
+ struct spi_nor_erase_region *region = map->regions;
+ u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
+ u64 region_end = region_start + region->size;
+
+ while (addr < region_start || addr >= region_end) {
+ region = spi_nor_region_next(region);
+ if (!region)
+ return ERR_PTR(-EINVAL);
+
+ region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
+ region_end = region_start + region->size;
+ }
+
+ return region;
+}
+
+/**
+ * spi_nor_init_erase_cmd() - initialize an erase command
+ * @region: pointer to a structure that describes a SPI NOR erase region
+ * @erase: pointer to a structure that describes a SPI NOR erase type
+ *
+ * Return: the pointer to the allocated erase command, ERR_PTR(-errno)
+ * otherwise.
+ */
+static struct spi_nor_erase_command *
+spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
+ const struct spi_nor_erase_type *erase)
+{
+ struct spi_nor_erase_command *cmd;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&cmd->list);
+ cmd->opcode = erase->opcode;
+ cmd->count = 1;
+
+ if (region->offset & SNOR_OVERLAID_REGION)
+ cmd->size = region->size;
+ else
+ cmd->size = erase->size;
+
+ return cmd;
+}
+
+/**
+ * spi_nor_destroy_erase_cmd_list() - destroy erase command list
+ * @erase_list: list of erase commands
+ */
+static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
+{
+ struct spi_nor_erase_command *cmd, *next;
+
+ list_for_each_entry_safe(cmd, next, erase_list, list) {
+ list_del(&cmd->list);
+ kfree(cmd);
+ }
+}
+
+/**
+ * spi_nor_init_erase_cmd_list() - initialize erase command list
+ * @nor: pointer to a 'struct spi_nor'
+ * @erase_list: list of erase commands to be executed once we validate that the
+ * erase can be performed
+ * @addr: offset in the serial flash memory
+ * @len: number of bytes to erase
+ *
+ * Builds the list of best fitted erase commands and verifies if the erase can
+ * be performed.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
+ struct list_head *erase_list,
+ u64 addr, u32 len)
+{
+ const struct spi_nor_erase_map *map = &nor->params->erase_map;
+ const struct spi_nor_erase_type *erase, *prev_erase = NULL;
+ struct spi_nor_erase_region *region;
+ struct spi_nor_erase_command *cmd = NULL;
+ u64 region_end;
+ int ret = -EINVAL;
+
+ region = spi_nor_find_erase_region(map, addr);
+ if (IS_ERR(region))
+ return PTR_ERR(region);
+
+ region_end = spi_nor_region_end(region);
+
+ while (len) {
+ erase = spi_nor_find_best_erase_type(map, region, addr, len);
+ if (!erase)
+ goto destroy_erase_cmd_list;
+
+ if (prev_erase != erase ||
+ erase->size != cmd->size ||
+ region->offset & SNOR_OVERLAID_REGION) {
+ cmd = spi_nor_init_erase_cmd(region, erase);
+ if (IS_ERR(cmd)) {
+ ret = PTR_ERR(cmd);
+ goto destroy_erase_cmd_list;
+ }
+
+ list_add_tail(&cmd->list, erase_list);
+ } else {
+ cmd->count++;
+ }
+
+ addr += cmd->size;
+ len -= cmd->size;
+
+ if (len && addr >= region_end) {
+ region = spi_nor_region_next(region);
+ if (!region)
+ goto destroy_erase_cmd_list;
+ region_end = spi_nor_region_end(region);
+ }
+
+ prev_erase = erase;
+ }
+
+ return 0;
+
+destroy_erase_cmd_list:
+ spi_nor_destroy_erase_cmd_list(erase_list);
+ return ret;
+}
+
+/**
+ * spi_nor_erase_multi_sectors() - perform a non-uniform erase
+ * @nor: pointer to a 'struct spi_nor'
+ * @addr: offset in the serial flash memory
+ * @len: number of bytes to erase
+ *
+ * Build a list of best fitted erase commands and execute it once we validate
+ * that the erase can be performed.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
+{
+ LIST_HEAD(erase_list);
+ struct spi_nor_erase_command *cmd, *next;
+ int ret;
+
+ ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
+ if (ret)
+ return ret;
+
+ list_for_each_entry_safe(cmd, next, &erase_list, list) {
+ nor->erase_opcode = cmd->opcode;
+ while (cmd->count) {
+ dev_vdbg(nor->dev, "erase_cmd->size = 0x%08x, erase_cmd->opcode = 0x%02x, erase_cmd->count = %u\n",
+ cmd->size, cmd->opcode, cmd->count);
+
+ ret = spi_nor_lock_device(nor);
+ if (ret)
+ goto destroy_erase_cmd_list;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret) {
+ spi_nor_unlock_device(nor);
+ goto destroy_erase_cmd_list;
+ }
+
+ ret = spi_nor_erase_sector(nor, addr);
+ spi_nor_unlock_device(nor);
+ if (ret)
+ goto destroy_erase_cmd_list;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto destroy_erase_cmd_list;
+
+ addr += cmd->size;
+ cmd->count--;
+ }
+ list_del(&cmd->list);
+ kfree(cmd);
+ }
+
+ return 0;
+
+destroy_erase_cmd_list:
+ spi_nor_destroy_erase_cmd_list(&erase_list);
+ return ret;
+}
+
+/*
+ * Erase an address range on the nor chip. The address range may extend
+ * one or more erase sectors. Return an error if there is a problem erasing.
+ */
+static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ u32 addr, len;
+ uint32_t rem;
+ int ret;
+
+ dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
+ (long long)instr->len);
+
+ if (spi_nor_has_uniform_erase(nor)) {
+ div_u64_rem(instr->len, mtd->erasesize, &rem);
+ if (rem)
+ return -EINVAL;
+ }
+
+ addr = instr->addr;
+ len = instr->len;
+
+ ret = spi_nor_prep_and_lock_pe(nor, instr->addr, instr->len);
+ if (ret)
+ return ret;
+
+ /* whole-chip erase? */
+ if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
+ unsigned long timeout;
+
+ ret = spi_nor_lock_device(nor);
+ if (ret)
+ goto erase_err;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret) {
+ spi_nor_unlock_device(nor);
+ goto erase_err;
+ }
+
+ ret = spi_nor_erase_chip(nor);
+ spi_nor_unlock_device(nor);
+ if (ret)
+ goto erase_err;
+
+ /*
+ * Scale the timeout linearly with the size of the flash, with
+ * a minimum calibrated to an old 2MB flash. We could try to
+ * pull these from CFI/SFDP, but these values should be good
+ * enough for now.
+ */
+ timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
+ CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
+ (unsigned long)(mtd->size / SZ_2M));
+ ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
+ if (ret)
+ goto erase_err;
+
+ /* REVISIT in some cases we could speed up erasing large regions
+ * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
+ * to use "small sector erase", but that's not always optimal.
+ */
+
+ /* "sector"-at-a-time erase */
+ } else if (spi_nor_has_uniform_erase(nor)) {
+ while (len) {
+ ret = spi_nor_lock_device(nor);
+ if (ret)
+ goto erase_err;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret) {
+ spi_nor_unlock_device(nor);
+ goto erase_err;
+ }
+
+ ret = spi_nor_erase_sector(nor, addr);
+ spi_nor_unlock_device(nor);
+ if (ret)
+ goto erase_err;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto erase_err;
+
+ addr += mtd->erasesize;
+ len -= mtd->erasesize;
+ }
+
+ /* erase multiple sectors */
+ } else {
+ ret = spi_nor_erase_multi_sectors(nor, addr, len);
+ if (ret)
+ goto erase_err;
+ }
+
+ ret = spi_nor_write_disable(nor);
+
+erase_err:
+ spi_nor_unlock_and_unprep_pe(nor, instr->addr, instr->len);
+
+ return ret;
+}
+
+/**
+ * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
+ * Register 1.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
+{
+ int ret;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
+ return 0;
+
+ nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
+
+ return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
+}
+
+/**
+ * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
+ * Register 2.
+ * @nor: pointer to a 'struct spi_nor'.
+ *
+ * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
+{
+ int ret;
+
+ if (nor->flags & SNOR_F_NO_READ_CR)
+ return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
+
+ ret = spi_nor_read_cr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
+ return 0;
+
+ nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
+
+ return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
+}
+
+/**
+ * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Set the Quad Enable (QE) bit in the Status Register 2.
+ *
+ * This is one of the procedures to set the QE bit described in the SFDP
+ * (JESD216 rev B) specification but no manufacturer using this procedure has
+ * been identified yet, hence the name of the function.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
+{
+ u8 *sr2 = nor->bouncebuf;
+ int ret;
+ u8 sr2_written;
+
+ /* Check current Quad Enable bit value. */
+ ret = spi_nor_read_sr2(nor, sr2);
+ if (ret)
+ return ret;
+ if (*sr2 & SR2_QUAD_EN_BIT7)
+ return 0;
+
+ /* Update the Quad Enable bit. */
+ *sr2 |= SR2_QUAD_EN_BIT7;
+
+ ret = spi_nor_write_sr2(nor, sr2);
+ if (ret)
+ return ret;
+
+ sr2_written = *sr2;
+
+ /* Read back and check it. */
+ ret = spi_nor_read_sr2(nor, sr2);
+ if (ret)
+ return ret;
+
+ if (*sr2 != sr2_written) {
+ dev_dbg(nor->dev, "SR2: Read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct spi_nor_manufacturer *manufacturers[] = {
+ &spi_nor_atmel,
+ &spi_nor_catalyst,
+ &spi_nor_eon,
+ &spi_nor_esmt,
+ &spi_nor_everspin,
+ &spi_nor_fujitsu,
+ &spi_nor_gigadevice,
+ &spi_nor_intel,
+ &spi_nor_issi,
+ &spi_nor_macronix,
+ &spi_nor_micron,
+ &spi_nor_st,
+ &spi_nor_spansion,
+ &spi_nor_sst,
+ &spi_nor_winbond,
+ &spi_nor_xilinx,
+ &spi_nor_xmc,
+};
+
+static const struct flash_info spi_nor_generic_flash = {
+ .name = "spi-nor-generic",
+ .n_banks = 1,
+ /*
+ * JESD216 rev A doesn't specify the page size, therefore we need a
+ * sane default.
+ */
+ .page_size = 256,
+ .parse_sfdp = true,
+};
+
+static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
+ const u8 *id)
+{
+ const struct flash_info *part;
+ unsigned int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
+ for (j = 0; j < manufacturers[i]->nparts; j++) {
+ part = &manufacturers[i]->parts[j];
+ if (part->id_len &&
+ !memcmp(part->id, id, part->id_len)) {
+ nor->manufacturer = manufacturers[i];
+ return part;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static const struct flash_info *spi_nor_detect(struct spi_nor *nor)
+{
+ const struct flash_info *info;
+ u8 *id = nor->bouncebuf;
+ int ret;
+
+ ret = spi_nor_read_id(nor, 0, 0, id, nor->reg_proto);
+ if (ret) {
+ dev_dbg(nor->dev, "error %d reading JEDEC ID\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ /* Cache the complete flash ID. */
+ nor->id = devm_kmemdup(nor->dev, id, SPI_NOR_MAX_ID_LEN, GFP_KERNEL);
+ if (!nor->id)
+ return ERR_PTR(-ENOMEM);
+
+ info = spi_nor_match_id(nor, id);
+
+ /* Fallback to a generic flash described only by its SFDP data. */
+ if (!info) {
+ ret = spi_nor_check_sfdp_signature(nor);
+ if (!ret)
+ info = &spi_nor_generic_flash;
+ }
+
+ if (!info) {
+ dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
+ SPI_NOR_MAX_ID_LEN, id);
+ return ERR_PTR(-ENODEV);
+ }
+ return info;
+}
+
+static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ loff_t from_lock = from;
+ size_t len_lock = len;
+ ssize_t ret;
+
+ dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
+
+ ret = spi_nor_prep_and_lock_rd(nor, from_lock, len_lock);
+ if (ret)
+ return ret;
+
+ while (len) {
+ loff_t addr = from;
+
+ addr = spi_nor_convert_addr(nor, addr);
+
+ ret = spi_nor_read_data(nor, addr, len, buf);
+ if (ret == 0) {
+ /* We shouldn't see 0-length reads */
+ ret = -EIO;
+ goto read_err;
+ }
+ if (ret < 0)
+ goto read_err;
+
+ WARN_ON(ret > len);
+ *retlen += ret;
+ buf += ret;
+ from += ret;
+ len -= ret;
+ }
+ ret = 0;
+
+read_err:
+ spi_nor_unlock_and_unprep_rd(nor, from_lock, len_lock);
+
+ return ret;
+}
+
+/*
+ * Write an address range to the nor chip. Data must be written in
+ * FLASH_PAGESIZE chunks. The address range may be any size provided
+ * it is within the physical boundaries.
+ */
+static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ size_t page_offset, page_remain, i;
+ ssize_t ret;
+ u32 page_size = nor->params->page_size;
+
+ dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
+
+ ret = spi_nor_prep_and_lock_pe(nor, to, len);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < len; ) {
+ ssize_t written;
+ loff_t addr = to + i;
+
+ /*
+ * If page_size is a power of two, the offset can be quickly
+ * calculated with an AND operation. On the other cases we
+ * need to do a modulus operation (more expensive).
+ */
+ if (is_power_of_2(page_size)) {
+ page_offset = addr & (page_size - 1);
+ } else {
+ uint64_t aux = addr;
+
+ page_offset = do_div(aux, page_size);
+ }
+ /* the size of data remaining on the first page */
+ page_remain = min_t(size_t, page_size - page_offset, len - i);
+
+ addr = spi_nor_convert_addr(nor, addr);
+
+ ret = spi_nor_lock_device(nor);
+ if (ret)
+ goto write_err;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret) {
+ spi_nor_unlock_device(nor);
+ goto write_err;
+ }
+
+ ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
+ spi_nor_unlock_device(nor);
+ if (ret < 0)
+ goto write_err;
+ written = ret;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto write_err;
+ *retlen += written;
+ i += written;
+ }
+
+write_err:
+ spi_nor_unlock_and_unprep_pe(nor, to, len);
+
+ return ret;
+}
+
+static int spi_nor_check(struct spi_nor *nor)
+{
+ if (!nor->dev ||
+ (!nor->spimem && !nor->controller_ops) ||
+ (!nor->spimem && nor->controller_ops &&
+ (!nor->controller_ops->read ||
+ !nor->controller_ops->write ||
+ !nor->controller_ops->read_reg ||
+ !nor->controller_ops->write_reg))) {
+ pr_err("spi-nor: please fill all the necessary fields!\n");
+ return -EINVAL;
+ }
+
+ if (nor->spimem && nor->controller_ops) {
+ dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void
+spi_nor_set_read_settings(struct spi_nor_read_command *read,
+ u8 num_mode_clocks,
+ u8 num_wait_states,
+ u8 opcode,
+ enum spi_nor_protocol proto)
+{
+ read->num_mode_clocks = num_mode_clocks;
+ read->num_wait_states = num_wait_states;
+ read->opcode = opcode;
+ read->proto = proto;
+}
+
+void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
+ enum spi_nor_protocol proto)
+{
+ pp->opcode = opcode;
+ pp->proto = proto;
+}
+
+static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ if (table[i][0] == (int)hwcaps)
+ return table[i][1];
+
+ return -EINVAL;
+}
+
+int spi_nor_hwcaps_read2cmd(u32 hwcaps)
+{
+ static const int hwcaps_read2cmd[][2] = {
+ { SNOR_HWCAPS_READ, SNOR_CMD_READ },
+ { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
+ { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
+ { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
+ { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
+ { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
+ { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
+ { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
+ { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
+ { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
+ { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
+ { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
+ { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
+ { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
+ { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
+ { SNOR_HWCAPS_READ_8_8_8_DTR, SNOR_CMD_READ_8_8_8_DTR },
+ };
+
+ return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
+ ARRAY_SIZE(hwcaps_read2cmd));
+}
+
+int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
+{
+ static const int hwcaps_pp2cmd[][2] = {
+ { SNOR_HWCAPS_PP, SNOR_CMD_PP },
+ { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
+ { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
+ { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
+ { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
+ { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
+ { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
+ { SNOR_HWCAPS_PP_8_8_8_DTR, SNOR_CMD_PP_8_8_8_DTR },
+ };
+
+ return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
+ ARRAY_SIZE(hwcaps_pp2cmd));
+}
+
+/**
+ * spi_nor_spimem_check_op - check if the operation is supported
+ * by controller
+ *@nor: pointer to a 'struct spi_nor'
+ *@op: pointer to op template to be checked
+ *
+ * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
+ */
+static int spi_nor_spimem_check_op(struct spi_nor *nor,
+ struct spi_mem_op *op)
+{
+ /*
+ * First test with 4 address bytes. The opcode itself might
+ * be a 3B addressing opcode but we don't care, because
+ * SPI controller implementation should not check the opcode,
+ * but just the sequence.
+ */
+ op->addr.nbytes = 4;
+ if (!spi_mem_supports_op(nor->spimem, op)) {
+ if (nor->params->size > SZ_16M)
+ return -EOPNOTSUPP;
+
+ /* If flash size <= 16MB, 3 address bytes are sufficient */
+ op->addr.nbytes = 3;
+ if (!spi_mem_supports_op(nor->spimem, op))
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_spimem_check_readop - check if the read op is supported
+ * by controller
+ *@nor: pointer to a 'struct spi_nor'
+ *@read: pointer to op template to be checked
+ *
+ * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
+ */
+static int spi_nor_spimem_check_readop(struct spi_nor *nor,
+ const struct spi_nor_read_command *read)
+{
+ struct spi_mem_op op = SPI_NOR_READ_OP(read->opcode);
+
+ spi_nor_spimem_setup_op(nor, &op, read->proto);
+
+ /* convert the dummy cycles to the number of bytes */
+ op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) *
+ op.dummy.buswidth / 8;
+ if (spi_nor_protocol_is_dtr(nor->read_proto))
+ op.dummy.nbytes *= 2;
+
+ return spi_nor_spimem_check_op(nor, &op);
+}
+
+/**
+ * spi_nor_spimem_check_pp - check if the page program op is supported
+ * by controller
+ *@nor: pointer to a 'struct spi_nor'
+ *@pp: pointer to op template to be checked
+ *
+ * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
+ */
+static int spi_nor_spimem_check_pp(struct spi_nor *nor,
+ const struct spi_nor_pp_command *pp)
+{
+ struct spi_mem_op op = SPI_NOR_PP_OP(pp->opcode);
+
+ spi_nor_spimem_setup_op(nor, &op, pp->proto);
+
+ return spi_nor_spimem_check_op(nor, &op);
+}
+
+/**
+ * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol
+ * based on SPI controller capabilities
+ * @nor: pointer to a 'struct spi_nor'
+ * @hwcaps: pointer to resulting capabilities after adjusting
+ * according to controller and flash's capability
+ */
+static void
+spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+ unsigned int cap;
+
+ /* X-X-X modes are not supported yet, mask them all. */
+ *hwcaps &= ~SNOR_HWCAPS_X_X_X;
+
+ /*
+ * If the reset line is broken, we do not want to enter a stateful
+ * mode.
+ */
+ if (nor->flags & SNOR_F_BROKEN_RESET)
+ *hwcaps &= ~(SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR);
+
+ for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
+ int rdidx, ppidx;
+
+ if (!(*hwcaps & BIT(cap)))
+ continue;
+
+ rdidx = spi_nor_hwcaps_read2cmd(BIT(cap));
+ if (rdidx >= 0 &&
+ spi_nor_spimem_check_readop(nor, &params->reads[rdidx]))
+ *hwcaps &= ~BIT(cap);
+
+ ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap));
+ if (ppidx < 0)
+ continue;
+
+ if (spi_nor_spimem_check_pp(nor,
+ &params->page_programs[ppidx]))
+ *hwcaps &= ~BIT(cap);
+ }
+}
+
+/**
+ * spi_nor_set_erase_type() - set a SPI NOR erase type
+ * @erase: pointer to a structure that describes a SPI NOR erase type
+ * @size: the size of the sector/block erased by the erase type
+ * @opcode: the SPI command op code to erase the sector/block
+ */
+void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
+ u8 opcode)
+{
+ erase->size = size;
+ erase->opcode = opcode;
+ /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
+ erase->size_shift = ffs(erase->size) - 1;
+ erase->size_mask = (1 << erase->size_shift) - 1;
+}
+
+/**
+ * spi_nor_mask_erase_type() - mask out a SPI NOR erase type
+ * @erase: pointer to a structure that describes a SPI NOR erase type
+ */
+void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase)
+{
+ erase->size = 0;
+}
+
+/**
+ * spi_nor_init_uniform_erase_map() - Initialize uniform erase map
+ * @map: the erase map of the SPI NOR
+ * @erase_mask: bitmask encoding erase types that can erase the entire
+ * flash memory
+ * @flash_size: the spi nor flash memory size
+ */
+void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
+ u8 erase_mask, u64 flash_size)
+{
+ /* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */
+ map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
+ SNOR_LAST_REGION;
+ map->uniform_region.size = flash_size;
+ map->regions = &map->uniform_region;
+ map->uniform_erase_type = erase_mask;
+}
+
+int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt)
+{
+ int ret;
+
+ if (nor->manufacturer && nor->manufacturer->fixups &&
+ nor->manufacturer->fixups->post_bfpt) {
+ ret = nor->manufacturer->fixups->post_bfpt(nor, bfpt_header,
+ bfpt);
+ if (ret)
+ return ret;
+ }
+
+ if (nor->info->fixups && nor->info->fixups->post_bfpt)
+ return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt);
+
+ return 0;
+}
+
+static int spi_nor_select_read(struct spi_nor *nor,
+ u32 shared_hwcaps)
+{
+ int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
+ const struct spi_nor_read_command *read;
+
+ if (best_match < 0)
+ return -EINVAL;
+
+ cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
+ if (cmd < 0)
+ return -EINVAL;
+
+ read = &nor->params->reads[cmd];
+ nor->read_opcode = read->opcode;
+ nor->read_proto = read->proto;
+
+ /*
+ * In the SPI NOR framework, we don't need to make the difference
+ * between mode clock cycles and wait state clock cycles.
+ * Indeed, the value of the mode clock cycles is used by a QSPI
+ * flash memory to know whether it should enter or leave its 0-4-4
+ * (Continuous Read / XIP) mode.
+ * eXecution In Place is out of the scope of the mtd sub-system.
+ * Hence we choose to merge both mode and wait state clock cycles
+ * into the so called dummy clock cycles.
+ */
+ nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
+ return 0;
+}
+
+static int spi_nor_select_pp(struct spi_nor *nor,
+ u32 shared_hwcaps)
+{
+ int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
+ const struct spi_nor_pp_command *pp;
+
+ if (best_match < 0)
+ return -EINVAL;
+
+ cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
+ if (cmd < 0)
+ return -EINVAL;
+
+ pp = &nor->params->page_programs[cmd];
+ nor->program_opcode = pp->opcode;
+ nor->write_proto = pp->proto;
+ return 0;
+}
+
+/**
+ * spi_nor_select_uniform_erase() - select optimum uniform erase type
+ * @map: the erase map of the SPI NOR
+ * @wanted_size: the erase type size to search for. Contains the value of
+ * info->sector_size, the "small sector" size in case
+ * CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined or 0 if
+ * there is no information about the sector size. The
+ * latter is the case if the flash parameters are parsed
+ * solely by SFDP, then the largest supported erase type
+ * is selected.
+ *
+ * Once the optimum uniform sector erase command is found, disable all the
+ * other.
+ *
+ * Return: pointer to erase type on success, NULL otherwise.
+ */
+static const struct spi_nor_erase_type *
+spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
+ const u32 wanted_size)
+{
+ const struct spi_nor_erase_type *tested_erase, *erase = NULL;
+ int i;
+ u8 uniform_erase_type = map->uniform_erase_type;
+
+ for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
+ if (!(uniform_erase_type & BIT(i)))
+ continue;
+
+ tested_erase = &map->erase_type[i];
+
+ /* Skip masked erase types. */
+ if (!tested_erase->size)
+ continue;
+
+ /*
+ * If the current erase size is the one, stop here:
+ * we have found the right uniform Sector Erase command.
+ */
+ if (tested_erase->size == wanted_size) {
+ erase = tested_erase;
+ break;
+ }
+
+ /*
+ * Otherwise, the current erase size is still a valid candidate.
+ * Select the biggest valid candidate.
+ */
+ if (!erase && tested_erase->size)
+ erase = tested_erase;
+ /* keep iterating to find the wanted_size */
+ }
+
+ if (!erase)
+ return NULL;
+
+ /* Disable all other Sector Erase commands. */
+ map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
+ map->uniform_erase_type |= BIT(erase - map->erase_type);
+ return erase;
+}
+
+static int spi_nor_select_erase(struct spi_nor *nor)
+{
+ struct spi_nor_erase_map *map = &nor->params->erase_map;
+ const struct spi_nor_erase_type *erase = NULL;
+ struct mtd_info *mtd = &nor->mtd;
+ u32 wanted_size = nor->info->sector_size;
+ int i;
+
+ /*
+ * The previous implementation handling Sector Erase commands assumed
+ * that the SPI flash memory has an uniform layout then used only one
+ * of the supported erase sizes for all Sector Erase commands.
+ * So to be backward compatible, the new implementation also tries to
+ * manage the SPI flash memory as uniform with a single erase sector
+ * size, when possible.
+ */
+#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
+ /* prefer "small sector" erase if possible */
+ wanted_size = 4096u;
+#endif
+
+ if (spi_nor_has_uniform_erase(nor)) {
+ erase = spi_nor_select_uniform_erase(map, wanted_size);
+ if (!erase)
+ return -EINVAL;
+ nor->erase_opcode = erase->opcode;
+ mtd->erasesize = erase->size;
+ return 0;
+ }
+
+ /*
+ * For non-uniform SPI flash memory, set mtd->erasesize to the
+ * maximum erase sector size. No need to set nor->erase_opcode.
+ */
+ for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
+ if (map->erase_type[i].size) {
+ erase = &map->erase_type[i];
+ break;
+ }
+ }
+
+ if (!erase)
+ return -EINVAL;
+
+ mtd->erasesize = erase->size;
+ return 0;
+}
+
+static int spi_nor_default_setup(struct spi_nor *nor,
+ const struct spi_nor_hwcaps *hwcaps)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+ u32 ignored_mask, shared_mask;
+ int err;
+
+ /*
+ * Keep only the hardware capabilities supported by both the SPI
+ * controller and the SPI flash memory.
+ */
+ shared_mask = hwcaps->mask & params->hwcaps.mask;
+
+ if (nor->spimem) {
+ /*
+ * When called from spi_nor_probe(), all caps are set and we
+ * need to discard some of them based on what the SPI
+ * controller actually supports (using spi_mem_supports_op()).
+ */
+ spi_nor_spimem_adjust_hwcaps(nor, &shared_mask);
+ } else {
+ /*
+ * SPI n-n-n protocols are not supported when the SPI
+ * controller directly implements the spi_nor interface.
+ * Yet another reason to switch to spi-mem.
+ */
+ ignored_mask = SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR;
+ if (shared_mask & ignored_mask) {
+ dev_dbg(nor->dev,
+ "SPI n-n-n protocols are not supported.\n");
+ shared_mask &= ~ignored_mask;
+ }
+ }
+
+ /* Select the (Fast) Read command. */
+ err = spi_nor_select_read(nor, shared_mask);
+ if (err) {
+ dev_dbg(nor->dev,
+ "can't select read settings supported by both the SPI controller and memory.\n");
+ return err;
+ }
+
+ /* Select the Page Program command. */
+ err = spi_nor_select_pp(nor, shared_mask);
+ if (err) {
+ dev_dbg(nor->dev,
+ "can't select write settings supported by both the SPI controller and memory.\n");
+ return err;
+ }
+
+ /* Select the Sector Erase command. */
+ err = spi_nor_select_erase(nor);
+ if (err) {
+ dev_dbg(nor->dev,
+ "can't select erase settings supported by both the SPI controller and memory.\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int spi_nor_set_addr_nbytes(struct spi_nor *nor)
+{
+ if (nor->params->addr_nbytes) {
+ nor->addr_nbytes = nor->params->addr_nbytes;
+ } else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) {
+ /*
+ * In 8D-8D-8D mode, one byte takes half a cycle to transfer. So
+ * in this protocol an odd addr_nbytes cannot be used because
+ * then the address phase would only span a cycle and a half.
+ * Half a cycle would be left over. We would then have to start
+ * the dummy phase in the middle of a cycle and so too the data
+ * phase, and we will end the transaction with half a cycle left
+ * over.
+ *
+ * Force all 8D-8D-8D flashes to use an addr_nbytes of 4 to
+ * avoid this situation.
+ */
+ nor->addr_nbytes = 4;
+ } else if (nor->info->addr_nbytes) {
+ nor->addr_nbytes = nor->info->addr_nbytes;
+ } else {
+ nor->addr_nbytes = 3;
+ }
+
+ if (nor->addr_nbytes == 3 && nor->params->size > 0x1000000) {
+ /* enable 4-byte addressing if the device exceeds 16MiB */
+ nor->addr_nbytes = 4;
+ }
+
+ if (nor->addr_nbytes > SPI_NOR_MAX_ADDR_NBYTES) {
+ dev_dbg(nor->dev, "The number of address bytes is too large: %u\n",
+ nor->addr_nbytes);
+ return -EINVAL;
+ }
+
+ /* Set 4byte opcodes when possible. */
+ if (nor->addr_nbytes == 4 && nor->flags & SNOR_F_4B_OPCODES &&
+ !(nor->flags & SNOR_F_HAS_4BAIT))
+ spi_nor_set_4byte_opcodes(nor);
+
+ return 0;
+}
+
+static int spi_nor_setup(struct spi_nor *nor,
+ const struct spi_nor_hwcaps *hwcaps)
+{
+ int ret;
+
+ if (nor->params->setup)
+ ret = nor->params->setup(nor, hwcaps);
+ else
+ ret = spi_nor_default_setup(nor, hwcaps);
+ if (ret)
+ return ret;
+
+ return spi_nor_set_addr_nbytes(nor);
+}
+
+/**
+ * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and
+ * settings based on MFR register and ->default_init() hook.
+ * @nor: pointer to a 'struct spi_nor'.
+ */
+static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
+{
+ if (nor->manufacturer && nor->manufacturer->fixups &&
+ nor->manufacturer->fixups->default_init)
+ nor->manufacturer->fixups->default_init(nor);
+
+ if (nor->info->fixups && nor->info->fixups->default_init)
+ nor->info->fixups->default_init(nor);
+}
+
+/**
+ * spi_nor_no_sfdp_init_params() - Initialize the flash's parameters and
+ * settings based on nor->info->sfdp_flags. This method should be called only by
+ * flashes that do not define SFDP tables. If the flash supports SFDP but the
+ * information is wrong and the settings from this function can not be retrieved
+ * by parsing SFDP, one should instead use the fixup hooks and update the wrong
+ * bits.
+ * @nor: pointer to a 'struct spi_nor'.
+ */
+static void spi_nor_no_sfdp_init_params(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+ struct spi_nor_erase_map *map = &params->erase_map;
+ const u8 no_sfdp_flags = nor->info->no_sfdp_flags;
+ u8 i, erase_mask;
+
+ if (no_sfdp_flags & SPI_NOR_DUAL_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_2],
+ 0, 8, SPINOR_OP_READ_1_1_2,
+ SNOR_PROTO_1_1_2);
+ }
+
+ if (no_sfdp_flags & SPI_NOR_QUAD_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_4],
+ 0, 8, SPINOR_OP_READ_1_1_4,
+ SNOR_PROTO_1_1_4);
+ }
+
+ if (no_sfdp_flags & SPI_NOR_OCTAL_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
+ 0, 8, SPINOR_OP_READ_1_1_8,
+ SNOR_PROTO_1_1_8);
+ }
+
+ if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_8_8_8_DTR],
+ 0, 20, SPINOR_OP_READ_FAST,
+ SNOR_PROTO_8_8_8_DTR);
+ }
+
+ if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_PP) {
+ params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR;
+ /*
+ * Since xSPI Page Program opcode is backward compatible with
+ * Legacy SPI, use Legacy SPI opcode there as well.
+ */
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_8_8_8_DTR],
+ SPINOR_OP_PP, SNOR_PROTO_8_8_8_DTR);
+ }
+
+ /*
+ * Sector Erase settings. Sort Erase Types in ascending order, with the
+ * smallest erase size starting at BIT(0).
+ */
+ erase_mask = 0;
+ i = 0;
+ if (no_sfdp_flags & SECT_4K) {
+ erase_mask |= BIT(i);
+ spi_nor_set_erase_type(&map->erase_type[i], 4096u,
+ SPINOR_OP_BE_4K);
+ i++;
+ }
+ erase_mask |= BIT(i);
+ spi_nor_set_erase_type(&map->erase_type[i], nor->info->sector_size,
+ SPINOR_OP_SE);
+ spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
+}
+
+/**
+ * spi_nor_init_flags() - Initialize NOR flags for settings that are not defined
+ * in the JESD216 SFDP standard, thus can not be retrieved when parsing SFDP.
+ * @nor: pointer to a 'struct spi_nor'
+ */
+static void spi_nor_init_flags(struct spi_nor *nor)
+{
+ struct device_node *np = spi_nor_get_flash_node(nor);
+ const u16 flags = nor->info->flags;
+
+ if (of_property_read_bool(np, "broken-flash-reset"))
+ nor->flags |= SNOR_F_BROKEN_RESET;
+
+ if (of_property_read_bool(np, "no-wp"))
+ nor->flags |= SNOR_F_NO_WP;
+
+ if (flags & SPI_NOR_SWP_IS_VOLATILE)
+ nor->flags |= SNOR_F_SWP_IS_VOLATILE;
+
+ if (flags & SPI_NOR_HAS_LOCK)
+ nor->flags |= SNOR_F_HAS_LOCK;
+
+ if (flags & SPI_NOR_HAS_TB) {
+ nor->flags |= SNOR_F_HAS_SR_TB;
+ if (flags & SPI_NOR_TB_SR_BIT6)
+ nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
+ }
+
+ if (flags & SPI_NOR_4BIT_BP) {
+ nor->flags |= SNOR_F_HAS_4BIT_BP;
+ if (flags & SPI_NOR_BP3_SR_BIT6)
+ nor->flags |= SNOR_F_HAS_SR_BP3_BIT6;
+ }
+
+ if (flags & NO_CHIP_ERASE)
+ nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
+
+ if (flags & SPI_NOR_RWW && nor->info->n_banks > 1 &&
+ !nor->controller_ops)
+ nor->flags |= SNOR_F_RWW;
+}
+
+/**
+ * spi_nor_init_fixup_flags() - Initialize NOR flags for settings that can not
+ * be discovered by SFDP for this particular flash because the SFDP table that
+ * indicates this support is not defined in the flash. In case the table for
+ * this support is defined but has wrong values, one should instead use a
+ * post_sfdp() hook to set the SNOR_F equivalent flag.
+ * @nor: pointer to a 'struct spi_nor'
+ */
+static void spi_nor_init_fixup_flags(struct spi_nor *nor)
+{
+ const u8 fixup_flags = nor->info->fixup_flags;
+
+ if (fixup_flags & SPI_NOR_4B_OPCODES)
+ nor->flags |= SNOR_F_4B_OPCODES;
+
+ if (fixup_flags & SPI_NOR_IO_MODE_EN_VOLATILE)
+ nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE;
+}
+
+/**
+ * spi_nor_late_init_params() - Late initialization of default flash parameters.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Used to initialize flash parameters that are not declared in the JESD216
+ * SFDP standard, or where SFDP tables are not defined at all.
+ * Will replace the spi_nor_manufacturer_init_params() method.
+ */
+static int spi_nor_late_init_params(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+ int ret;
+
+ if (nor->manufacturer && nor->manufacturer->fixups &&
+ nor->manufacturer->fixups->late_init) {
+ ret = nor->manufacturer->fixups->late_init(nor);
+ if (ret)
+ return ret;
+ }
+
+ if (nor->info->fixups && nor->info->fixups->late_init) {
+ ret = nor->info->fixups->late_init(nor);
+ if (ret)
+ return ret;
+ }
+
+ /* Default method kept for backward compatibility. */
+ if (!params->set_4byte_addr_mode)
+ params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_brwr;
+
+ spi_nor_init_flags(nor);
+ spi_nor_init_fixup_flags(nor);
+
+ /*
+ * NOR protection support. When locking_ops are not provided, we pick
+ * the default ones.
+ */
+ if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops)
+ spi_nor_init_default_locking_ops(nor);
+
+ if (nor->info->n_banks > 1)
+ params->bank_size = div64_u64(params->size, nor->info->n_banks);
+
+ return 0;
+}
+
+/**
+ * spi_nor_sfdp_init_params_deprecated() - Deprecated way of initializing flash
+ * parameters and settings based on JESD216 SFDP standard.
+ * @nor: pointer to a 'struct spi_nor'.
+ *
+ * The method has a roll-back mechanism: in case the SFDP parsing fails, the
+ * legacy flash parameters and settings will be restored.
+ */
+static void spi_nor_sfdp_init_params_deprecated(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter sfdp_params;
+
+ memcpy(&sfdp_params, nor->params, sizeof(sfdp_params));
+
+ if (spi_nor_parse_sfdp(nor)) {
+ memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
+ nor->flags &= ~SNOR_F_4B_OPCODES;
+ }
+}
+
+/**
+ * spi_nor_init_params_deprecated() - Deprecated way of initializing flash
+ * parameters and settings.
+ * @nor: pointer to a 'struct spi_nor'.
+ *
+ * The method assumes that flash doesn't support SFDP so it initializes flash
+ * parameters in spi_nor_no_sfdp_init_params() which later on can be overwritten
+ * when parsing SFDP, if supported.
+ */
+static void spi_nor_init_params_deprecated(struct spi_nor *nor)
+{
+ spi_nor_no_sfdp_init_params(nor);
+
+ spi_nor_manufacturer_init_params(nor);
+
+ if (nor->info->no_sfdp_flags & (SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ |
+ SPI_NOR_OCTAL_READ |
+ SPI_NOR_OCTAL_DTR_READ))
+ spi_nor_sfdp_init_params_deprecated(nor);
+}
+
+/**
+ * spi_nor_init_default_params() - Default initialization of flash parameters
+ * and settings. Done for all flashes, regardless is they define SFDP tables
+ * or not.
+ * @nor: pointer to a 'struct spi_nor'.
+ */
+static void spi_nor_init_default_params(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+ const struct flash_info *info = nor->info;
+ struct device_node *np = spi_nor_get_flash_node(nor);
+
+ params->quad_enable = spi_nor_sr2_bit1_quad_enable;
+ params->otp.org = &info->otp_org;
+
+ /* Default to 16-bit Write Status (01h) Command */
+ nor->flags |= SNOR_F_HAS_16BIT_SR;
+
+ /* Set SPI NOR sizes. */
+ params->writesize = 1;
+ params->size = (u64)info->sector_size * info->n_sectors;
+ params->bank_size = params->size;
+ params->page_size = info->page_size;
+
+ if (!(info->flags & SPI_NOR_NO_FR)) {
+ /* Default to Fast Read for DT and non-DT platform devices. */
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
+
+ /* Mask out Fast Read if not requested at DT instantiation. */
+ if (np && !of_property_read_bool(np, "m25p,fast-read"))
+ params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
+ }
+
+ /* (Fast) Read settings. */
+ params->hwcaps.mask |= SNOR_HWCAPS_READ;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
+ 0, 0, SPINOR_OP_READ,
+ SNOR_PROTO_1_1_1);
+
+ if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
+ 0, 8, SPINOR_OP_READ_FAST,
+ SNOR_PROTO_1_1_1);
+ /* Page Program settings. */
+ params->hwcaps.mask |= SNOR_HWCAPS_PP;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
+ SPINOR_OP_PP, SNOR_PROTO_1_1_1);
+
+ if (info->flags & SPI_NOR_QUAD_PP) {
+ params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_1_1_4],
+ SPINOR_OP_PP_1_1_4, SNOR_PROTO_1_1_4);
+ }
+}
+
+/**
+ * spi_nor_init_params() - Initialize the flash's parameters and settings.
+ * @nor: pointer to a 'struct spi_nor'.
+ *
+ * The flash parameters and settings are initialized based on a sequence of
+ * calls that are ordered by priority:
+ *
+ * 1/ Default flash parameters initialization. The initializations are done
+ * based on nor->info data:
+ * spi_nor_info_init_params()
+ *
+ * which can be overwritten by:
+ * 2/ Manufacturer flash parameters initialization. The initializations are
+ * done based on MFR register, or when the decisions can not be done solely
+ * based on MFR, by using specific flash_info tweeks, ->default_init():
+ * spi_nor_manufacturer_init_params()
+ *
+ * which can be overwritten by:
+ * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and
+ * should be more accurate that the above.
+ * spi_nor_parse_sfdp() or spi_nor_no_sfdp_init_params()
+ *
+ * Please note that there is a ->post_bfpt() fixup hook that can overwrite
+ * the flash parameters and settings immediately after parsing the Basic
+ * Flash Parameter Table.
+ * spi_nor_post_sfdp_fixups() is called after the SFDP tables are parsed.
+ * It is used to tweak various flash parameters when information provided
+ * by the SFDP tables are wrong.
+ *
+ * which can be overwritten by:
+ * 4/ Late flash parameters initialization, used to initialize flash
+ * parameters that are not declared in the JESD216 SFDP standard, or where SFDP
+ * tables are not defined at all.
+ * spi_nor_late_init_params()
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_init_params(struct spi_nor *nor)
+{
+ int ret;
+
+ nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL);
+ if (!nor->params)
+ return -ENOMEM;
+
+ spi_nor_init_default_params(nor);
+
+ if (nor->info->parse_sfdp) {
+ ret = spi_nor_parse_sfdp(nor);
+ if (ret) {
+ dev_err(nor->dev, "BFPT parsing failed. Please consider using SPI_NOR_SKIP_SFDP when declaring the flash\n");
+ return ret;
+ }
+ } else if (nor->info->no_sfdp_flags & SPI_NOR_SKIP_SFDP) {
+ spi_nor_no_sfdp_init_params(nor);
+ } else {
+ spi_nor_init_params_deprecated(nor);
+ }
+
+ return spi_nor_late_init_params(nor);
+}
+
+/** spi_nor_set_octal_dtr() - enable or disable Octal DTR I/O.
+ * @nor: pointer to a 'struct spi_nor'
+ * @enable: whether to enable or disable Octal DTR
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_set_octal_dtr(struct spi_nor *nor, bool enable)
+{
+ int ret;
+
+ if (!nor->params->set_octal_dtr)
+ return 0;
+
+ if (!(nor->read_proto == SNOR_PROTO_8_8_8_DTR &&
+ nor->write_proto == SNOR_PROTO_8_8_8_DTR))
+ return 0;
+
+ if (!(nor->flags & SNOR_F_IO_MODE_EN_VOLATILE))
+ return 0;
+
+ ret = nor->params->set_octal_dtr(nor, enable);
+ if (ret)
+ return ret;
+
+ if (enable)
+ nor->reg_proto = SNOR_PROTO_8_8_8_DTR;
+ else
+ nor->reg_proto = SNOR_PROTO_1_1_1;
+
+ return 0;
+}
+
+/**
+ * spi_nor_quad_enable() - enable Quad I/O if needed.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_quad_enable(struct spi_nor *nor)
+{
+ if (!nor->params->quad_enable)
+ return 0;
+
+ if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 ||
+ spi_nor_get_protocol_width(nor->write_proto) == 4))
+ return 0;
+
+ return nor->params->quad_enable(nor);
+}
+
+/**
+ * spi_nor_set_4byte_addr_mode() - Set address mode.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @enable: enable/disable 4 byte address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+ int ret;
+
+ ret = params->set_4byte_addr_mode(nor, enable);
+ if (ret && ret != -ENOTSUPP)
+ return ret;
+
+ if (enable) {
+ params->addr_nbytes = 4;
+ params->addr_mode_nbytes = 4;
+ } else {
+ params->addr_nbytes = 3;
+ params->addr_mode_nbytes = 3;
+ }
+
+ return 0;
+}
+
+static int spi_nor_init(struct spi_nor *nor)
+{
+ int err;
+
+ err = spi_nor_set_octal_dtr(nor, true);
+ if (err) {
+ dev_dbg(nor->dev, "octal mode not supported\n");
+ return err;
+ }
+
+ err = spi_nor_quad_enable(nor);
+ if (err) {
+ dev_dbg(nor->dev, "quad mode not supported\n");
+ return err;
+ }
+
+ /*
+ * Some SPI NOR flashes are write protected by default after a power-on
+ * reset cycle, in order to avoid inadvertent writes during power-up.
+ * Backward compatibility imposes to unlock the entire flash memory
+ * array at power-up by default. Depending on the kernel configuration
+ * (1) do nothing, (2) always unlock the entire flash array or (3)
+ * unlock the entire flash array only when the software write
+ * protection bits are volatile. The latter is indicated by
+ * SNOR_F_SWP_IS_VOLATILE.
+ */
+ if (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE) ||
+ (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE) &&
+ nor->flags & SNOR_F_SWP_IS_VOLATILE))
+ spi_nor_try_unlock_all(nor);
+
+ if (nor->addr_nbytes == 4 &&
+ nor->read_proto != SNOR_PROTO_8_8_8_DTR &&
+ !(nor->flags & SNOR_F_4B_OPCODES)) {
+ /*
+ * If the RESET# pin isn't hooked up properly, or the system
+ * otherwise doesn't perform a reset command in the boot
+ * sequence, it's impossible to 100% protect against unexpected
+ * reboots (e.g., crashes). Warn the user (or hopefully, system
+ * designer) that this is bad.
+ */
+ WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
+ "enabling reset hack; may not recover from unexpected reboots\n");
+ err = spi_nor_set_4byte_addr_mode(nor, true);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_soft_reset() - Perform a software reset
+ * @nor: pointer to 'struct spi_nor'
+ *
+ * Performs a "Soft Reset and Enter Default Protocol Mode" sequence which resets
+ * the device to its power-on-reset state. This is useful when the software has
+ * made some changes to device (volatile) registers and needs to reset it before
+ * shutting down, for example.
+ *
+ * Not every flash supports this sequence. The same set of opcodes might be used
+ * for some other operation on a flash that does not support this. Support for
+ * this sequence can be discovered via SFDP in the BFPT table.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static void spi_nor_soft_reset(struct spi_nor *nor)
+{
+ struct spi_mem_op op;
+ int ret;
+
+ op = (struct spi_mem_op)SPINOR_SRSTEN_OP;
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ if (ret) {
+ dev_warn(nor->dev, "Software reset failed: %d\n", ret);
+ return;
+ }
+
+ op = (struct spi_mem_op)SPINOR_SRST_OP;
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ if (ret) {
+ dev_warn(nor->dev, "Software reset failed: %d\n", ret);
+ return;
+ }
+
+ /*
+ * Software Reset is not instant, and the delay varies from flash to
+ * flash. Looking at a few flashes, most range somewhere below 100
+ * microseconds. So, sleep for a range of 200-400 us.
+ */
+ usleep_range(SPI_NOR_SRST_SLEEP_MIN, SPI_NOR_SRST_SLEEP_MAX);
+}
+
+/* mtd suspend handler */
+static int spi_nor_suspend(struct mtd_info *mtd)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ /* Disable octal DTR mode if we enabled it. */
+ ret = spi_nor_set_octal_dtr(nor, false);
+ if (ret)
+ dev_err(nor->dev, "suspend() failed\n");
+
+ return ret;
+}
+
+/* mtd resume handler */
+static void spi_nor_resume(struct mtd_info *mtd)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ struct device *dev = nor->dev;
+ int ret;
+
+ /* re-initialize the nor chip */
+ ret = spi_nor_init(nor);
+ if (ret)
+ dev_err(dev, "resume() failed\n");
+}
+
+static int spi_nor_get_device(struct mtd_info *mtd)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ struct spi_nor *nor = mtd_to_spi_nor(master);
+ struct device *dev;
+
+ if (nor->spimem)
+ dev = nor->spimem->spi->controller->dev.parent;
+ else
+ dev = nor->dev;
+
+ if (!try_module_get(dev->driver->owner))
+ return -ENODEV;
+
+ return 0;
+}
+
+static void spi_nor_put_device(struct mtd_info *mtd)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ struct spi_nor *nor = mtd_to_spi_nor(master);
+ struct device *dev;
+
+ if (nor->spimem)
+ dev = nor->spimem->spi->controller->dev.parent;
+ else
+ dev = nor->dev;
+
+ module_put(dev->driver->owner);
+}
+
+static void spi_nor_restore(struct spi_nor *nor)
+{
+ int ret;
+
+ /* restore the addressing mode */
+ if (nor->addr_nbytes == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
+ nor->flags & SNOR_F_BROKEN_RESET) {
+ ret = spi_nor_set_4byte_addr_mode(nor, false);
+ if (ret)
+ /*
+ * Do not stop the execution in the hope that the flash
+ * will default to the 3-byte address mode after the
+ * software reset.
+ */
+ dev_err(nor->dev, "Failed to exit 4-byte address mode, err = %d\n", ret);
+ }
+
+ if (nor->flags & SNOR_F_SOFT_RESET)
+ spi_nor_soft_reset(nor);
+}
+
+static const struct flash_info *spi_nor_match_name(struct spi_nor *nor,
+ const char *name)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
+ for (j = 0; j < manufacturers[i]->nparts; j++) {
+ if (!strcmp(name, manufacturers[i]->parts[j].name)) {
+ nor->manufacturer = manufacturers[i];
+ return &manufacturers[i]->parts[j];
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
+ const char *name)
+{
+ const struct flash_info *info = NULL;
+
+ if (name)
+ info = spi_nor_match_name(nor, name);
+ /* Try to auto-detect if chip name wasn't specified or not found */
+ if (!info)
+ return spi_nor_detect(nor);
+
+ /*
+ * If caller has specified name of flash model that can normally be
+ * detected using JEDEC, let's verify it.
+ */
+ if (name && info->id_len) {
+ const struct flash_info *jinfo;
+
+ jinfo = spi_nor_detect(nor);
+ if (IS_ERR(jinfo)) {
+ return jinfo;
+ } else if (jinfo != info) {
+ /*
+ * JEDEC knows better, so overwrite platform ID. We
+ * can't trust partitions any longer, but we'll let
+ * mtd apply them anyway, since some partitions may be
+ * marked read-only, and we don't want to loose that
+ * information, even if it's not 100% accurate.
+ */
+ dev_warn(nor->dev, "found %s, expected %s\n",
+ jinfo->name, info->name);
+ info = jinfo;
+ }
+ }
+
+ return info;
+}
+
+static void spi_nor_set_mtd_info(struct spi_nor *nor)
+{
+ struct mtd_info *mtd = &nor->mtd;
+ struct device *dev = nor->dev;
+
+ spi_nor_set_mtd_locking_ops(nor);
+ spi_nor_set_mtd_otp_ops(nor);
+
+ mtd->dev.parent = dev;
+ if (!mtd->name)
+ mtd->name = dev_name(dev);
+ mtd->type = MTD_NORFLASH;
+ mtd->flags = MTD_CAP_NORFLASH;
+ /* Unset BIT_WRITEABLE to enable JFFS2 write buffer for ECC'd NOR */
+ if (nor->flags & SNOR_F_ECC)
+ mtd->flags &= ~MTD_BIT_WRITEABLE;
+ if (nor->info->flags & SPI_NOR_NO_ERASE)
+ mtd->flags |= MTD_NO_ERASE;
+ else
+ mtd->_erase = spi_nor_erase;
+ mtd->writesize = nor->params->writesize;
+ mtd->writebufsize = nor->params->page_size;
+ mtd->size = nor->params->size;
+ mtd->_read = spi_nor_read;
+ /* Might be already set by some SST flashes. */
+ if (!mtd->_write)
+ mtd->_write = spi_nor_write;
+ mtd->_suspend = spi_nor_suspend;
+ mtd->_resume = spi_nor_resume;
+ mtd->_get_device = spi_nor_get_device;
+ mtd->_put_device = spi_nor_put_device;
+}
+
+static int spi_nor_hw_reset(struct spi_nor *nor)
+{
+ struct gpio_desc *reset;
+
+ reset = devm_gpiod_get_optional(nor->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR_OR_NULL(reset))
+ return PTR_ERR_OR_ZERO(reset);
+
+ /*
+ * Experimental delay values by looking at different flash device
+ * vendors datasheets.
+ */
+ usleep_range(1, 5);
+ gpiod_set_value_cansleep(reset, 1);
+ usleep_range(100, 150);
+ gpiod_set_value_cansleep(reset, 0);
+ usleep_range(1000, 1200);
+
+ return 0;
+}
+
+int spi_nor_scan(struct spi_nor *nor, const char *name,
+ const struct spi_nor_hwcaps *hwcaps)
+{
+ const struct flash_info *info;
+ struct device *dev = nor->dev;
+ struct mtd_info *mtd = &nor->mtd;
+ int ret;
+ int i;
+
+ ret = spi_nor_check(nor);
+ if (ret)
+ return ret;
+
+ /* Reset SPI protocol for all commands. */
+ nor->reg_proto = SNOR_PROTO_1_1_1;
+ nor->read_proto = SNOR_PROTO_1_1_1;
+ nor->write_proto = SNOR_PROTO_1_1_1;
+
+ /*
+ * We need the bounce buffer early to read/write registers when going
+ * through the spi-mem layer (buffers have to be DMA-able).
+ * For spi-mem drivers, we'll reallocate a new buffer if
+ * nor->params->page_size turns out to be greater than PAGE_SIZE (which
+ * shouldn't happen before long since NOR pages are usually less
+ * than 1KB) after spi_nor_scan() returns.
+ */
+ nor->bouncebuf_size = PAGE_SIZE;
+ nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
+ GFP_KERNEL);
+ if (!nor->bouncebuf)
+ return -ENOMEM;
+
+ ret = spi_nor_hw_reset(nor);
+ if (ret)
+ return ret;
+
+ info = spi_nor_get_flash_info(nor, name);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ nor->info = info;
+
+ mutex_init(&nor->lock);
+
+ /* Init flash parameters based on flash_info struct and SFDP */
+ ret = spi_nor_init_params(nor);
+ if (ret)
+ return ret;
+
+ if (spi_nor_use_parallel_locking(nor))
+ init_waitqueue_head(&nor->rww.wait);
+
+ /*
+ * Configure the SPI memory:
+ * - select op codes for (Fast) Read, Page Program and Sector Erase.
+ * - set the number of dummy cycles (mode cycles + wait states).
+ * - set the SPI protocols for register and memory accesses.
+ * - set the number of address bytes.
+ */
+ ret = spi_nor_setup(nor, hwcaps);
+ if (ret)
+ return ret;
+
+ /* Send all the required SPI flash commands to initialize device */
+ ret = spi_nor_init(nor);
+ if (ret)
+ return ret;
+
+ /* No mtd_info fields should be used up to this point. */
+ spi_nor_set_mtd_info(nor);
+
+ dev_info(dev, "%s (%lld Kbytes)\n", info->name,
+ (long long)mtd->size >> 10);
+
+ dev_dbg(dev,
+ "mtd .name = %s, .size = 0x%llx (%lldMiB), "
+ ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
+ mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
+ mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
+
+ if (mtd->numeraseregions)
+ for (i = 0; i < mtd->numeraseregions; i++)
+ dev_dbg(dev,
+ "mtd.eraseregions[%d] = { .offset = 0x%llx, "
+ ".erasesize = 0x%.8x (%uKiB), "
+ ".numblocks = %d }\n",
+ i, (long long)mtd->eraseregions[i].offset,
+ mtd->eraseregions[i].erasesize,
+ mtd->eraseregions[i].erasesize / 1024,
+ mtd->eraseregions[i].numblocks);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_nor_scan);
+
+static int spi_nor_create_read_dirmap(struct spi_nor *nor)
+{
+ struct spi_mem_dirmap_info info = {
+ .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
+ SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0),
+ SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
+ SPI_MEM_OP_DATA_IN(0, NULL, 0)),
+ .offset = 0,
+ .length = nor->params->size,
+ };
+ struct spi_mem_op *op = &info.op_tmpl;
+
+ spi_nor_spimem_setup_op(nor, op, nor->read_proto);
+
+ /* convert the dummy cycles to the number of bytes */
+ op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8;
+ if (spi_nor_protocol_is_dtr(nor->read_proto))
+ op->dummy.nbytes *= 2;
+
+ /*
+ * Since spi_nor_spimem_setup_op() only sets buswidth when the number
+ * of data bytes is non-zero, the data buswidth won't be set here. So,
+ * do it explicitly.
+ */
+ op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
+
+ nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
+ &info);
+ return PTR_ERR_OR_ZERO(nor->dirmap.rdesc);
+}
+
+static int spi_nor_create_write_dirmap(struct spi_nor *nor)
+{
+ struct spi_mem_dirmap_info info = {
+ .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
+ SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(0, NULL, 0)),
+ .offset = 0,
+ .length = nor->params->size,
+ };
+ struct spi_mem_op *op = &info.op_tmpl;
+
+ if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
+ op->addr.nbytes = 0;
+
+ spi_nor_spimem_setup_op(nor, op, nor->write_proto);
+
+ /*
+ * Since spi_nor_spimem_setup_op() only sets buswidth when the number
+ * of data bytes is non-zero, the data buswidth won't be set here. So,
+ * do it explicitly.
+ */
+ op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
+
+ nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
+ &info);
+ return PTR_ERR_OR_ZERO(nor->dirmap.wdesc);
+}
+
+static int spi_nor_probe(struct spi_mem *spimem)
+{
+ struct spi_device *spi = spimem->spi;
+ struct flash_platform_data *data = dev_get_platdata(&spi->dev);
+ struct spi_nor *nor;
+ /*
+ * Enable all caps by default. The core will mask them after
+ * checking what's really supported using spi_mem_supports_op().
+ */
+ const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL };
+ char *flash_name;
+ int ret;
+
+ nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL);
+ if (!nor)
+ return -ENOMEM;
+
+ nor->spimem = spimem;
+ nor->dev = &spi->dev;
+ spi_nor_set_flash_node(nor, spi->dev.of_node);
+
+ spi_mem_set_drvdata(spimem, nor);
+
+ if (data && data->name)
+ nor->mtd.name = data->name;
+
+ if (!nor->mtd.name)
+ nor->mtd.name = spi_mem_get_name(spimem);
+
+ /*
+ * For some (historical?) reason many platforms provide two different
+ * names in flash_platform_data: "name" and "type". Quite often name is
+ * set to "m25p80" and then "type" provides a real chip name.
+ * If that's the case, respect "type" and ignore a "name".
+ */
+ if (data && data->type)
+ flash_name = data->type;
+ else if (!strcmp(spi->modalias, "spi-nor"))
+ flash_name = NULL; /* auto-detect */
+ else
+ flash_name = spi->modalias;
+
+ ret = spi_nor_scan(nor, flash_name, &hwcaps);
+ if (ret)
+ return ret;
+
+ spi_nor_debugfs_register(nor);
+
+ /*
+ * None of the existing parts have > 512B pages, but let's play safe
+ * and add this logic so that if anyone ever adds support for such
+ * a NOR we don't end up with buffer overflows.
+ */
+ if (nor->params->page_size > PAGE_SIZE) {
+ nor->bouncebuf_size = nor->params->page_size;
+ devm_kfree(nor->dev, nor->bouncebuf);
+ nor->bouncebuf = devm_kmalloc(nor->dev,
+ nor->bouncebuf_size,
+ GFP_KERNEL);
+ if (!nor->bouncebuf)
+ return -ENOMEM;
+ }
+
+ ret = spi_nor_create_read_dirmap(nor);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_create_write_dirmap(nor);
+ if (ret)
+ return ret;
+
+ return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
+ data ? data->nr_parts : 0);
+}
+
+static int spi_nor_remove(struct spi_mem *spimem)
+{
+ struct spi_nor *nor = spi_mem_get_drvdata(spimem);
+
+ spi_nor_restore(nor);
+
+ /* Clean up MTD stuff. */
+ return mtd_device_unregister(&nor->mtd);
+}
+
+static void spi_nor_shutdown(struct spi_mem *spimem)
+{
+ struct spi_nor *nor = spi_mem_get_drvdata(spimem);
+
+ spi_nor_restore(nor);
+}
+
+/*
+ * Do NOT add to this array without reading the following:
+ *
+ * Historically, many flash devices are bound to this driver by their name. But
+ * since most of these flash are compatible to some extent, and their
+ * differences can often be differentiated by the JEDEC read-ID command, we
+ * encourage new users to add support to the spi-nor library, and simply bind
+ * against a generic string here (e.g., "jedec,spi-nor").
+ *
+ * Many flash names are kept here in this list to keep them available
+ * as module aliases for existing platforms.
+ */
+static const struct spi_device_id spi_nor_dev_ids[] = {
+ /*
+ * Allow non-DT platform devices to bind to the "spi-nor" modalias, and
+ * hack around the fact that the SPI core does not provide uevent
+ * matching for .of_match_table
+ */
+ {"spi-nor"},
+
+ /*
+ * Entries not used in DTs that should be safe to drop after replacing
+ * them with "spi-nor" in platform data.
+ */
+ {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"},
+
+ /*
+ * Entries that were used in DTs without "jedec,spi-nor" fallback and
+ * should be kept for backward compatibility.
+ */
+ {"at25df321a"}, {"at25df641"}, {"at26df081a"},
+ {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"},
+ {"mx25l25635e"},{"mx66l51235l"},
+ {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"},
+ {"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"},
+ {"s25fl064k"},
+ {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
+ {"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"},
+ {"m25p64"}, {"m25p128"},
+ {"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
+ {"w25q80bl"}, {"w25q128"}, {"w25q256"},
+
+ /* Flashes that can't be detected using JEDEC */
+ {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"},
+ {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"},
+ {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
+
+ /* Everspin MRAMs (non-JEDEC) */
+ { "mr25h128" }, /* 128 Kib, 40 MHz */
+ { "mr25h256" }, /* 256 Kib, 40 MHz */
+ { "mr25h10" }, /* 1 Mib, 40 MHz */
+ { "mr25h40" }, /* 4 Mib, 40 MHz */
+
+ { },
+};
+MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids);
+
+static const struct of_device_id spi_nor_of_table[] = {
+ /*
+ * Generic compatibility for SPI NOR that can be identified by the
+ * JEDEC READ ID opcode (0x9F). Use this, if possible.
+ */
+ { .compatible = "jedec,spi-nor" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, spi_nor_of_table);
+
+/*
+ * REVISIT: many of these chips have deep power-down modes, which
+ * should clearly be entered on suspend() to minimize power use.
+ * And also when they're otherwise idle...
+ */
+static struct spi_mem_driver spi_nor_driver = {
+ .spidrv = {
+ .driver = {
+ .name = "spi-nor",
+ .of_match_table = spi_nor_of_table,
+ .dev_groups = spi_nor_sysfs_groups,
+ },
+ .id_table = spi_nor_dev_ids,
+ },
+ .probe = spi_nor_probe,
+ .remove = spi_nor_remove,
+ .shutdown = spi_nor_shutdown,
+};
+
+static int __init spi_nor_module_init(void)
+{
+ return spi_mem_driver_register(&spi_nor_driver);
+}
+module_init(spi_nor_module_init);
+
+static void __exit spi_nor_module_exit(void)
+{
+ spi_mem_driver_unregister(&spi_nor_driver);
+ spi_nor_debugfs_shutdown();
+}
+module_exit(spi_nor_module_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
+MODULE_AUTHOR("Mike Lavender");
+MODULE_DESCRIPTION("framework for SPI NOR");
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
new file mode 100644
index 0000000000..9217379b9c
--- /dev/null
+++ b/drivers/mtd/spi-nor/core.h
@@ -0,0 +1,745 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#ifndef __LINUX_MTD_SPI_NOR_INTERNAL_H
+#define __LINUX_MTD_SPI_NOR_INTERNAL_H
+
+#include "sfdp.h"
+
+#define SPI_NOR_MAX_ID_LEN 6
+
+/* Standard SPI NOR flash operations. */
+#define SPI_NOR_READID_OP(naddr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 0), \
+ SPI_MEM_OP_ADDR(naddr, 0, 0), \
+ SPI_MEM_OP_DUMMY(ndummy, 0), \
+ SPI_MEM_OP_DATA_IN(len, buf, 0))
+
+#define SPI_NOR_WREN_OP \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 0), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPI_NOR_WRDI_OP \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 0), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPI_NOR_RDSR_OP(buf) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 0), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_IN(1, buf, 0))
+
+#define SPI_NOR_WRSR_OP(buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 0), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(len, buf, 0))
+
+#define SPI_NOR_RDSR2_OP(buf) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 0), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(1, buf, 0))
+
+#define SPI_NOR_WRSR2_OP(buf) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 0), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(1, buf, 0))
+
+#define SPI_NOR_RDCR_OP(buf) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 0), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_IN(1, buf, 0))
+
+#define SPI_NOR_EN4B_EX4B_OP(enable) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B, 0), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPI_NOR_BRWR_OP(buf) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 0), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(1, buf, 0))
+
+#define SPI_NOR_GBULK_OP \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_GBULK, 0), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPI_NOR_CHIP_ERASE_OP \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 0), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPI_NOR_SECTOR_ERASE_OP(opcode, addr_nbytes, addr) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 0), \
+ SPI_MEM_OP_ADDR(addr_nbytes, addr, 0), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPI_NOR_READ_OP(opcode) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 0), \
+ SPI_MEM_OP_ADDR(3, 0, 0), \
+ SPI_MEM_OP_DUMMY(1, 0), \
+ SPI_MEM_OP_DATA_IN(2, NULL, 0))
+
+#define SPI_NOR_PP_OP(opcode) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 0), \
+ SPI_MEM_OP_ADDR(3, 0, 0), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(2, NULL, 0))
+
+#define SPINOR_SRSTEN_OP \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRSTEN, 0), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINOR_SRST_OP \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRST, 0), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DATA)
+
+/* Keep these in sync with the list in debugfs.c */
+enum spi_nor_option_flags {
+ SNOR_F_HAS_SR_TB = BIT(0),
+ SNOR_F_NO_OP_CHIP_ERASE = BIT(1),
+ SNOR_F_BROKEN_RESET = BIT(2),
+ SNOR_F_4B_OPCODES = BIT(3),
+ SNOR_F_HAS_4BAIT = BIT(4),
+ SNOR_F_HAS_LOCK = BIT(5),
+ SNOR_F_HAS_16BIT_SR = BIT(6),
+ SNOR_F_NO_READ_CR = BIT(7),
+ SNOR_F_HAS_SR_TB_BIT6 = BIT(8),
+ SNOR_F_HAS_4BIT_BP = BIT(9),
+ SNOR_F_HAS_SR_BP3_BIT6 = BIT(10),
+ SNOR_F_IO_MODE_EN_VOLATILE = BIT(11),
+ SNOR_F_SOFT_RESET = BIT(12),
+ SNOR_F_SWP_IS_VOLATILE = BIT(13),
+ SNOR_F_RWW = BIT(14),
+ SNOR_F_ECC = BIT(15),
+ SNOR_F_NO_WP = BIT(16),
+};
+
+struct spi_nor_read_command {
+ u8 num_mode_clocks;
+ u8 num_wait_states;
+ u8 opcode;
+ enum spi_nor_protocol proto;
+};
+
+struct spi_nor_pp_command {
+ u8 opcode;
+ enum spi_nor_protocol proto;
+};
+
+enum spi_nor_read_command_index {
+ SNOR_CMD_READ,
+ SNOR_CMD_READ_FAST,
+ SNOR_CMD_READ_1_1_1_DTR,
+
+ /* Dual SPI */
+ SNOR_CMD_READ_1_1_2,
+ SNOR_CMD_READ_1_2_2,
+ SNOR_CMD_READ_2_2_2,
+ SNOR_CMD_READ_1_2_2_DTR,
+
+ /* Quad SPI */
+ SNOR_CMD_READ_1_1_4,
+ SNOR_CMD_READ_1_4_4,
+ SNOR_CMD_READ_4_4_4,
+ SNOR_CMD_READ_1_4_4_DTR,
+
+ /* Octal SPI */
+ SNOR_CMD_READ_1_1_8,
+ SNOR_CMD_READ_1_8_8,
+ SNOR_CMD_READ_8_8_8,
+ SNOR_CMD_READ_1_8_8_DTR,
+ SNOR_CMD_READ_8_8_8_DTR,
+
+ SNOR_CMD_READ_MAX
+};
+
+enum spi_nor_pp_command_index {
+ SNOR_CMD_PP,
+
+ /* Quad SPI */
+ SNOR_CMD_PP_1_1_4,
+ SNOR_CMD_PP_1_4_4,
+ SNOR_CMD_PP_4_4_4,
+
+ /* Octal SPI */
+ SNOR_CMD_PP_1_1_8,
+ SNOR_CMD_PP_1_8_8,
+ SNOR_CMD_PP_8_8_8,
+ SNOR_CMD_PP_8_8_8_DTR,
+
+ SNOR_CMD_PP_MAX
+};
+
+/**
+ * struct spi_nor_erase_type - Structure to describe a SPI NOR erase type
+ * @size: the size of the sector/block erased by the erase type.
+ * JEDEC JESD216B imposes erase sizes to be a power of 2.
+ * @size_shift: @size is a power of 2, the shift is stored in
+ * @size_shift.
+ * @size_mask: the size mask based on @size_shift.
+ * @opcode: the SPI command op code to erase the sector/block.
+ * @idx: Erase Type index as sorted in the Basic Flash Parameter
+ * Table. It will be used to synchronize the supported
+ * Erase Types with the ones identified in the SFDP
+ * optional tables.
+ */
+struct spi_nor_erase_type {
+ u32 size;
+ u32 size_shift;
+ u32 size_mask;
+ u8 opcode;
+ u8 idx;
+};
+
+/**
+ * struct spi_nor_erase_command - Used for non-uniform erases
+ * The structure is used to describe a list of erase commands to be executed
+ * once we validate that the erase can be performed. The elements in the list
+ * are run-length encoded.
+ * @list: for inclusion into the list of erase commands.
+ * @count: how many times the same erase command should be
+ * consecutively used.
+ * @size: the size of the sector/block erased by the command.
+ * @opcode: the SPI command op code to erase the sector/block.
+ */
+struct spi_nor_erase_command {
+ struct list_head list;
+ u32 count;
+ u32 size;
+ u8 opcode;
+};
+
+/**
+ * struct spi_nor_erase_region - Structure to describe a SPI NOR erase region
+ * @offset: the offset in the data array of erase region start.
+ * LSB bits are used as a bitmask encoding flags to
+ * determine if this region is overlaid, if this region is
+ * the last in the SPI NOR flash memory and to indicate
+ * all the supported erase commands inside this region.
+ * The erase types are sorted in ascending order with the
+ * smallest Erase Type size being at BIT(0).
+ * @size: the size of the region in bytes.
+ */
+struct spi_nor_erase_region {
+ u64 offset;
+ u64 size;
+};
+
+#define SNOR_ERASE_TYPE_MAX 4
+#define SNOR_ERASE_TYPE_MASK GENMASK_ULL(SNOR_ERASE_TYPE_MAX - 1, 0)
+
+#define SNOR_LAST_REGION BIT(4)
+#define SNOR_OVERLAID_REGION BIT(5)
+
+#define SNOR_ERASE_FLAGS_MAX 6
+#define SNOR_ERASE_FLAGS_MASK GENMASK_ULL(SNOR_ERASE_FLAGS_MAX - 1, 0)
+
+/**
+ * struct spi_nor_erase_map - Structure to describe the SPI NOR erase map
+ * @regions: array of erase regions. The regions are consecutive in
+ * address space. Walking through the regions is done
+ * incrementally.
+ * @uniform_region: a pre-allocated erase region for SPI NOR with a uniform
+ * sector size (legacy implementation).
+ * @erase_type: an array of erase types shared by all the regions.
+ * The erase types are sorted in ascending order, with the
+ * smallest Erase Type size being the first member in the
+ * erase_type array.
+ * @uniform_erase_type: bitmask encoding erase types that can erase the
+ * entire memory. This member is completed at init by
+ * uniform and non-uniform SPI NOR flash memories if they
+ * support at least one erase type that can erase the
+ * entire memory.
+ */
+struct spi_nor_erase_map {
+ struct spi_nor_erase_region *regions;
+ struct spi_nor_erase_region uniform_region;
+ struct spi_nor_erase_type erase_type[SNOR_ERASE_TYPE_MAX];
+ u8 uniform_erase_type;
+};
+
+/**
+ * struct spi_nor_locking_ops - SPI NOR locking methods
+ * @lock: lock a region of the SPI NOR.
+ * @unlock: unlock a region of the SPI NOR.
+ * @is_locked: check if a region of the SPI NOR is completely locked
+ */
+struct spi_nor_locking_ops {
+ int (*lock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+ int (*unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+ int (*is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+};
+
+/**
+ * struct spi_nor_otp_organization - Structure to describe the SPI NOR OTP regions
+ * @len: size of one OTP region in bytes.
+ * @base: start address of the OTP area.
+ * @offset: offset between consecutive OTP regions if there are more
+ * than one.
+ * @n_regions: number of individual OTP regions.
+ */
+struct spi_nor_otp_organization {
+ size_t len;
+ loff_t base;
+ loff_t offset;
+ unsigned int n_regions;
+};
+
+/**
+ * struct spi_nor_otp_ops - SPI NOR OTP methods
+ * @read: read from the SPI NOR OTP area.
+ * @write: write to the SPI NOR OTP area.
+ * @lock: lock an OTP region.
+ * @erase: erase an OTP region.
+ * @is_locked: check if an OTP region of the SPI NOR is locked.
+ */
+struct spi_nor_otp_ops {
+ int (*read)(struct spi_nor *nor, loff_t addr, size_t len, u8 *buf);
+ int (*write)(struct spi_nor *nor, loff_t addr, size_t len,
+ const u8 *buf);
+ int (*lock)(struct spi_nor *nor, unsigned int region);
+ int (*erase)(struct spi_nor *nor, loff_t addr);
+ int (*is_locked)(struct spi_nor *nor, unsigned int region);
+};
+
+/**
+ * struct spi_nor_otp - SPI NOR OTP grouping structure
+ * @org: OTP region organization
+ * @ops: OTP access ops
+ */
+struct spi_nor_otp {
+ const struct spi_nor_otp_organization *org;
+ const struct spi_nor_otp_ops *ops;
+};
+
+/**
+ * struct spi_nor_flash_parameter - SPI NOR flash parameters and settings.
+ * Includes legacy flash parameters and settings that can be overwritten
+ * by the spi_nor_fixups hooks, or dynamically when parsing the JESD216
+ * Serial Flash Discoverable Parameters (SFDP) tables.
+ *
+ * @bank_size: the flash memory bank density in bytes.
+ * @size: the total flash memory density in bytes.
+ * @writesize Minimal writable flash unit size. Defaults to 1. Set to
+ * ECC unit size for ECC-ed flashes.
+ * @page_size: the page size of the SPI NOR flash memory.
+ * @addr_nbytes: number of address bytes to send.
+ * @addr_mode_nbytes: number of address bytes of current address mode. Useful
+ * when the flash operates with 4B opcodes but needs the
+ * internal address mode for opcodes that don't have a 4B
+ * opcode correspondent.
+ * @rdsr_dummy: dummy cycles needed for Read Status Register command
+ * in octal DTR mode.
+ * @rdsr_addr_nbytes: dummy address bytes needed for Read Status Register
+ * command in octal DTR mode.
+ * @n_dice: number of dice in the flash memory.
+ * @vreg_offset: volatile register offset for each die.
+ * @hwcaps: describes the read and page program hardware
+ * capabilities.
+ * @reads: read capabilities ordered by priority: the higher index
+ * in the array, the higher priority.
+ * @page_programs: page program capabilities ordered by priority: the
+ * higher index in the array, the higher priority.
+ * @erase_map: the erase map parsed from the SFDP Sector Map Parameter
+ * Table.
+ * @otp: SPI NOR OTP info.
+ * @set_octal_dtr: enables or disables SPI NOR octal DTR mode.
+ * @quad_enable: enables SPI NOR quad mode.
+ * @set_4byte_addr_mode: puts the SPI NOR in 4 byte addressing mode.
+ * @convert_addr: converts an absolute address into something the flash
+ * will understand. Particularly useful when pagesize is
+ * not a power-of-2.
+ * @setup: (optional) configures the SPI NOR memory. Useful for
+ * SPI NOR flashes that have peculiarities to the SPI NOR
+ * standard e.g. different opcodes, specific address
+ * calculation, page size, etc.
+ * @ready: (optional) flashes might use a different mechanism
+ * than reading the status register to indicate they
+ * are ready for a new command
+ * @locking_ops: SPI NOR locking methods.
+ * @priv: flash's private data.
+ */
+struct spi_nor_flash_parameter {
+ u64 bank_size;
+ u64 size;
+ u32 writesize;
+ u32 page_size;
+ u8 addr_nbytes;
+ u8 addr_mode_nbytes;
+ u8 rdsr_dummy;
+ u8 rdsr_addr_nbytes;
+ u8 n_dice;
+ u32 *vreg_offset;
+
+ struct spi_nor_hwcaps hwcaps;
+ struct spi_nor_read_command reads[SNOR_CMD_READ_MAX];
+ struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX];
+
+ struct spi_nor_erase_map erase_map;
+ struct spi_nor_otp otp;
+
+ int (*set_octal_dtr)(struct spi_nor *nor, bool enable);
+ int (*quad_enable)(struct spi_nor *nor);
+ int (*set_4byte_addr_mode)(struct spi_nor *nor, bool enable);
+ u32 (*convert_addr)(struct spi_nor *nor, u32 addr);
+ int (*setup)(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps);
+ int (*ready)(struct spi_nor *nor);
+
+ const struct spi_nor_locking_ops *locking_ops;
+ void *priv;
+};
+
+/**
+ * struct spi_nor_fixups - SPI NOR fixup hooks
+ * @default_init: called after default flash parameters init. Used to tweak
+ * flash parameters when information provided by the flash_info
+ * table is incomplete or wrong.
+ * @post_bfpt: called after the BFPT table has been parsed
+ * @post_sfdp: called after SFDP has been parsed (is also called for SPI NORs
+ * that do not support RDSFDP). Typically used to tweak various
+ * parameters that could not be extracted by other means (i.e.
+ * when information provided by the SFDP/flash_info tables are
+ * incomplete or wrong).
+ * @late_init: used to initialize flash parameters that are not declared in the
+ * JESD216 SFDP standard, or where SFDP tables not defined at all.
+ * Will replace the default_init() hook.
+ *
+ * Those hooks can be used to tweak the SPI NOR configuration when the SFDP
+ * table is broken or not available.
+ */
+struct spi_nor_fixups {
+ void (*default_init)(struct spi_nor *nor);
+ int (*post_bfpt)(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt);
+ int (*post_sfdp)(struct spi_nor *nor);
+ int (*late_init)(struct spi_nor *nor);
+};
+
+/**
+ * struct flash_info - SPI NOR flash_info entry.
+ * @name: the name of the flash.
+ * @id: the flash's ID bytes. The first three bytes are the
+ * JEDIC ID. JEDEC ID zero means "no ID" (mostly older chips).
+ * @id_len: the number of bytes of ID.
+ * @sector_size: the size listed here is what works with SPINOR_OP_SE, which
+ * isn't necessarily called a "sector" by the vendor.
+ * @n_sectors: the number of sectors.
+ * @n_banks: the number of banks.
+ * @page_size: the flash's page size.
+ * @addr_nbytes: number of address bytes to send.
+ *
+ * @parse_sfdp: true when flash supports SFDP tables. The false value has no
+ * meaning. If one wants to skip the SFDP tables, one should
+ * instead use the SPI_NOR_SKIP_SFDP sfdp_flag.
+ * @flags: flags that indicate support that is not defined by the
+ * JESD216 standard in its SFDP tables. Flag meanings:
+ * SPI_NOR_HAS_LOCK: flash supports lock/unlock via SR
+ * SPI_NOR_HAS_TB: flash SR has Top/Bottom (TB) protect bit. Must be
+ * used with SPI_NOR_HAS_LOCK.
+ * SPI_NOR_TB_SR_BIT6: Top/Bottom (TB) is bit 6 of status register.
+ * Must be used with SPI_NOR_HAS_TB.
+ * SPI_NOR_4BIT_BP: flash SR has 4 bit fields (BP0-3) for block
+ * protection.
+ * SPI_NOR_BP3_SR_BIT6: BP3 is bit 6 of status register. Must be used with
+ * SPI_NOR_4BIT_BP.
+ * SPI_NOR_SWP_IS_VOLATILE: flash has volatile software write protection bits.
+ * Usually these will power-up in a write-protected
+ * state.
+ * SPI_NOR_NO_ERASE: no erase command needed.
+ * NO_CHIP_ERASE: chip does not support chip erase.
+ * SPI_NOR_NO_FR: can't do fastread.
+ * SPI_NOR_QUAD_PP: flash supports Quad Input Page Program.
+ * SPI_NOR_RWW: flash supports reads while write.
+ *
+ * @no_sfdp_flags: flags that indicate support that can be discovered via SFDP.
+ * Used when SFDP tables are not defined in the flash. These
+ * flags are used together with the SPI_NOR_SKIP_SFDP flag.
+ * SPI_NOR_SKIP_SFDP: skip parsing of SFDP tables.
+ * SECT_4K: SPINOR_OP_BE_4K works uniformly.
+ * SPI_NOR_DUAL_READ: flash supports Dual Read.
+ * SPI_NOR_QUAD_READ: flash supports Quad Read.
+ * SPI_NOR_OCTAL_READ: flash supports Octal Read.
+ * SPI_NOR_OCTAL_DTR_READ: flash supports octal DTR Read.
+ * SPI_NOR_OCTAL_DTR_PP: flash supports Octal DTR Page Program.
+ *
+ * @fixup_flags: flags that indicate support that can be discovered via SFDP
+ * ideally, but can not be discovered for this particular flash
+ * because the SFDP table that indicates this support is not
+ * defined by the flash. In case the table for this support is
+ * defined but has wrong values, one should instead use a
+ * post_sfdp() hook to set the SNOR_F equivalent flag.
+ *
+ * SPI_NOR_4B_OPCODES: use dedicated 4byte address op codes to support
+ * memory size above 128Mib.
+ * SPI_NOR_IO_MODE_EN_VOLATILE: flash enables the best available I/O mode
+ * via a volatile bit.
+ * @mfr_flags: manufacturer private flags. Used in the manufacturer fixup
+ * hooks to differentiate support between flashes of the same
+ * manufacturer.
+ * @otp_org: flash's OTP organization.
+ * @fixups: part specific fixup hooks.
+ */
+struct flash_info {
+ char *name;
+ u8 id[SPI_NOR_MAX_ID_LEN];
+ u8 id_len;
+ unsigned sector_size;
+ u16 n_sectors;
+ u16 page_size;
+ u8 n_banks;
+ u8 addr_nbytes;
+
+ bool parse_sfdp;
+ u16 flags;
+#define SPI_NOR_HAS_LOCK BIT(0)
+#define SPI_NOR_HAS_TB BIT(1)
+#define SPI_NOR_TB_SR_BIT6 BIT(2)
+#define SPI_NOR_4BIT_BP BIT(3)
+#define SPI_NOR_BP3_SR_BIT6 BIT(4)
+#define SPI_NOR_SWP_IS_VOLATILE BIT(5)
+#define SPI_NOR_NO_ERASE BIT(6)
+#define NO_CHIP_ERASE BIT(7)
+#define SPI_NOR_NO_FR BIT(8)
+#define SPI_NOR_QUAD_PP BIT(9)
+#define SPI_NOR_RWW BIT(10)
+
+ u8 no_sfdp_flags;
+#define SPI_NOR_SKIP_SFDP BIT(0)
+#define SECT_4K BIT(1)
+#define SPI_NOR_DUAL_READ BIT(3)
+#define SPI_NOR_QUAD_READ BIT(4)
+#define SPI_NOR_OCTAL_READ BIT(5)
+#define SPI_NOR_OCTAL_DTR_READ BIT(6)
+#define SPI_NOR_OCTAL_DTR_PP BIT(7)
+
+ u8 fixup_flags;
+#define SPI_NOR_4B_OPCODES BIT(0)
+#define SPI_NOR_IO_MODE_EN_VOLATILE BIT(1)
+
+ u8 mfr_flags;
+
+ const struct spi_nor_otp_organization otp_org;
+ const struct spi_nor_fixups *fixups;
+};
+
+#define SPI_NOR_ID_2ITEMS(_id) ((_id) >> 8) & 0xff, (_id) & 0xff
+#define SPI_NOR_ID_3ITEMS(_id) ((_id) >> 16) & 0xff, SPI_NOR_ID_2ITEMS(_id)
+
+#define SPI_NOR_ID(_jedec_id, _ext_id) \
+ .id = { SPI_NOR_ID_3ITEMS(_jedec_id), SPI_NOR_ID_2ITEMS(_ext_id) }, \
+ .id_len = !(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))
+
+#define SPI_NOR_ID6(_jedec_id, _ext_id) \
+ .id = { SPI_NOR_ID_3ITEMS(_jedec_id), SPI_NOR_ID_3ITEMS(_ext_id) }, \
+ .id_len = 6
+
+#define SPI_NOR_GEOMETRY(_sector_size, _n_sectors, _n_banks) \
+ .sector_size = (_sector_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = 256, \
+ .n_banks = (_n_banks)
+
+/* Used when the "_ext_id" is two bytes at most */
+#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors) \
+ SPI_NOR_ID((_jedec_id), (_ext_id)), \
+ SPI_NOR_GEOMETRY((_sector_size), (_n_sectors), 1),
+
+#define INFOB(_jedec_id, _ext_id, _sector_size, _n_sectors, _n_banks) \
+ SPI_NOR_ID((_jedec_id), (_ext_id)), \
+ SPI_NOR_GEOMETRY((_sector_size), (_n_sectors), (_n_banks)),
+
+#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors) \
+ SPI_NOR_ID6((_jedec_id), (_ext_id)), \
+ SPI_NOR_GEOMETRY((_sector_size), (_n_sectors), 1),
+
+#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_nbytes) \
+ .sector_size = (_sector_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = (_page_size), \
+ .n_banks = 1, \
+ .addr_nbytes = (_addr_nbytes), \
+ .flags = SPI_NOR_NO_ERASE | SPI_NOR_NO_FR, \
+
+#define OTP_INFO(_len, _n_regions, _base, _offset) \
+ .otp_org = { \
+ .len = (_len), \
+ .base = (_base), \
+ .offset = (_offset), \
+ .n_regions = (_n_regions), \
+ },
+
+#define PARSE_SFDP \
+ .parse_sfdp = true, \
+
+#define FLAGS(_flags) \
+ .flags = (_flags), \
+
+#define NO_SFDP_FLAGS(_no_sfdp_flags) \
+ .no_sfdp_flags = (_no_sfdp_flags), \
+
+#define FIXUP_FLAGS(_fixup_flags) \
+ .fixup_flags = (_fixup_flags), \
+
+#define MFR_FLAGS(_mfr_flags) \
+ .mfr_flags = (_mfr_flags), \
+
+/**
+ * struct spi_nor_manufacturer - SPI NOR manufacturer object
+ * @name: manufacturer name
+ * @parts: array of parts supported by this manufacturer
+ * @nparts: number of entries in the parts array
+ * @fixups: hooks called at various points in time during spi_nor_scan()
+ */
+struct spi_nor_manufacturer {
+ const char *name;
+ const struct flash_info *parts;
+ unsigned int nparts;
+ const struct spi_nor_fixups *fixups;
+};
+
+/**
+ * struct sfdp - SFDP data
+ * @num_dwords: number of entries in the dwords array
+ * @dwords: array of double words of the SFDP data
+ */
+struct sfdp {
+ size_t num_dwords;
+ u32 *dwords;
+};
+
+/* Manufacturer drivers. */
+extern const struct spi_nor_manufacturer spi_nor_atmel;
+extern const struct spi_nor_manufacturer spi_nor_catalyst;
+extern const struct spi_nor_manufacturer spi_nor_eon;
+extern const struct spi_nor_manufacturer spi_nor_esmt;
+extern const struct spi_nor_manufacturer spi_nor_everspin;
+extern const struct spi_nor_manufacturer spi_nor_fujitsu;
+extern const struct spi_nor_manufacturer spi_nor_gigadevice;
+extern const struct spi_nor_manufacturer spi_nor_intel;
+extern const struct spi_nor_manufacturer spi_nor_issi;
+extern const struct spi_nor_manufacturer spi_nor_macronix;
+extern const struct spi_nor_manufacturer spi_nor_micron;
+extern const struct spi_nor_manufacturer spi_nor_st;
+extern const struct spi_nor_manufacturer spi_nor_spansion;
+extern const struct spi_nor_manufacturer spi_nor_sst;
+extern const struct spi_nor_manufacturer spi_nor_winbond;
+extern const struct spi_nor_manufacturer spi_nor_xilinx;
+extern const struct spi_nor_manufacturer spi_nor_xmc;
+
+extern const struct attribute_group *spi_nor_sysfs_groups[];
+
+void spi_nor_spimem_setup_op(const struct spi_nor *nor,
+ struct spi_mem_op *op,
+ const enum spi_nor_protocol proto);
+int spi_nor_write_enable(struct spi_nor *nor);
+int spi_nor_write_disable(struct spi_nor *nor);
+int spi_nor_set_4byte_addr_mode_en4b_ex4b(struct spi_nor *nor, bool enable);
+int spi_nor_set_4byte_addr_mode_wren_en4b_ex4b(struct spi_nor *nor,
+ bool enable);
+int spi_nor_set_4byte_addr_mode_brwr(struct spi_nor *nor, bool enable);
+int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable);
+int spi_nor_wait_till_ready(struct spi_nor *nor);
+int spi_nor_global_block_unlock(struct spi_nor *nor);
+int spi_nor_prep_and_lock(struct spi_nor *nor);
+void spi_nor_unlock_and_unprep(struct spi_nor *nor);
+int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor);
+int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor);
+int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor);
+int spi_nor_read_id(struct spi_nor *nor, u8 naddr, u8 ndummy, u8 *id,
+ enum spi_nor_protocol reg_proto);
+int spi_nor_read_sr(struct spi_nor *nor, u8 *sr);
+int spi_nor_sr_ready(struct spi_nor *nor);
+int spi_nor_read_cr(struct spi_nor *nor, u8 *cr);
+int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len);
+int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1);
+int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr);
+
+ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
+ u8 *buf);
+ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
+ const u8 *buf);
+int spi_nor_read_any_reg(struct spi_nor *nor, struct spi_mem_op *op,
+ enum spi_nor_protocol proto);
+int spi_nor_write_any_volatile_reg(struct spi_nor *nor, struct spi_mem_op *op,
+ enum spi_nor_protocol proto);
+int spi_nor_erase_sector(struct spi_nor *nor, u32 addr);
+
+int spi_nor_otp_read_secr(struct spi_nor *nor, loff_t addr, size_t len, u8 *buf);
+int spi_nor_otp_write_secr(struct spi_nor *nor, loff_t addr, size_t len,
+ const u8 *buf);
+int spi_nor_otp_erase_secr(struct spi_nor *nor, loff_t addr);
+int spi_nor_otp_lock_sr2(struct spi_nor *nor, unsigned int region);
+int spi_nor_otp_is_locked_sr2(struct spi_nor *nor, unsigned int region);
+
+int spi_nor_hwcaps_read2cmd(u32 hwcaps);
+int spi_nor_hwcaps_pp2cmd(u32 hwcaps);
+u8 spi_nor_convert_3to4_read(u8 opcode);
+void spi_nor_set_read_settings(struct spi_nor_read_command *read,
+ u8 num_mode_clocks,
+ u8 num_wait_states,
+ u8 opcode,
+ enum spi_nor_protocol proto);
+void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
+ enum spi_nor_protocol proto);
+
+void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
+ u8 opcode);
+void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase);
+struct spi_nor_erase_region *
+spi_nor_region_next(struct spi_nor_erase_region *region);
+void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
+ u8 erase_mask, u64 flash_size);
+
+int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt);
+
+void spi_nor_init_default_locking_ops(struct spi_nor *nor);
+void spi_nor_try_unlock_all(struct spi_nor *nor);
+void spi_nor_set_mtd_locking_ops(struct spi_nor *nor);
+void spi_nor_set_mtd_otp_ops(struct spi_nor *nor);
+
+int spi_nor_controller_ops_read_reg(struct spi_nor *nor, u8 opcode,
+ u8 *buf, size_t len);
+int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode,
+ const u8 *buf, size_t len);
+
+int spi_nor_check_sfdp_signature(struct spi_nor *nor);
+int spi_nor_parse_sfdp(struct spi_nor *nor);
+
+static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
+{
+ return container_of(mtd, struct spi_nor, mtd);
+}
+
+#ifdef CONFIG_DEBUG_FS
+void spi_nor_debugfs_register(struct spi_nor *nor);
+void spi_nor_debugfs_shutdown(void);
+#else
+static inline void spi_nor_debugfs_register(struct spi_nor *nor) {}
+static inline void spi_nor_debugfs_shutdown(void) {}
+#endif
+
+#endif /* __LINUX_MTD_SPI_NOR_INTERNAL_H */
diff --git a/drivers/mtd/spi-nor/debugfs.c b/drivers/mtd/spi-nor/debugfs.c
new file mode 100644
index 0000000000..6e163cb5b4
--- /dev/null
+++ b/drivers/mtd/spi-nor/debugfs.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/debugfs.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+#include "core.h"
+
+#define SPI_NOR_DEBUGFS_ROOT "spi-nor"
+
+#define SNOR_F_NAME(name) [ilog2(SNOR_F_##name)] = #name
+static const char *const snor_f_names[] = {
+ SNOR_F_NAME(HAS_SR_TB),
+ SNOR_F_NAME(NO_OP_CHIP_ERASE),
+ SNOR_F_NAME(BROKEN_RESET),
+ SNOR_F_NAME(4B_OPCODES),
+ SNOR_F_NAME(HAS_4BAIT),
+ SNOR_F_NAME(HAS_LOCK),
+ SNOR_F_NAME(HAS_16BIT_SR),
+ SNOR_F_NAME(NO_READ_CR),
+ SNOR_F_NAME(HAS_SR_TB_BIT6),
+ SNOR_F_NAME(HAS_4BIT_BP),
+ SNOR_F_NAME(HAS_SR_BP3_BIT6),
+ SNOR_F_NAME(IO_MODE_EN_VOLATILE),
+ SNOR_F_NAME(SOFT_RESET),
+ SNOR_F_NAME(SWP_IS_VOLATILE),
+ SNOR_F_NAME(RWW),
+ SNOR_F_NAME(ECC),
+ SNOR_F_NAME(NO_WP),
+};
+#undef SNOR_F_NAME
+
+static const char *spi_nor_protocol_name(enum spi_nor_protocol proto)
+{
+ switch (proto) {
+ case SNOR_PROTO_1_1_1: return "1S-1S-1S";
+ case SNOR_PROTO_1_1_2: return "1S-1S-2S";
+ case SNOR_PROTO_1_1_4: return "1S-1S-4S";
+ case SNOR_PROTO_1_1_8: return "1S-1S-8S";
+ case SNOR_PROTO_1_2_2: return "1S-2S-2S";
+ case SNOR_PROTO_1_4_4: return "1S-4S-4S";
+ case SNOR_PROTO_1_8_8: return "1S-8S-8S";
+ case SNOR_PROTO_2_2_2: return "2S-2S-2S";
+ case SNOR_PROTO_4_4_4: return "4S-4S-4S";
+ case SNOR_PROTO_8_8_8: return "8S-8S-8S";
+ case SNOR_PROTO_1_1_1_DTR: return "1D-1D-1D";
+ case SNOR_PROTO_1_2_2_DTR: return "1D-2D-2D";
+ case SNOR_PROTO_1_4_4_DTR: return "1D-4D-4D";
+ case SNOR_PROTO_1_8_8_DTR: return "1D-8D-8D";
+ case SNOR_PROTO_8_8_8_DTR: return "8D-8D-8D";
+ }
+
+ return "<unknown>";
+}
+
+static void spi_nor_print_flags(struct seq_file *s, unsigned long flags,
+ const char *const *names, int names_len)
+{
+ bool sep = false;
+ int i;
+
+ for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
+ if (!(flags & BIT(i)))
+ continue;
+ if (sep)
+ seq_puts(s, " | ");
+ sep = true;
+ if (i < names_len && names[i])
+ seq_puts(s, names[i]);
+ else
+ seq_printf(s, "1<<%d", i);
+ }
+}
+
+static int spi_nor_params_show(struct seq_file *s, void *data)
+{
+ struct spi_nor *nor = s->private;
+ struct spi_nor_flash_parameter *params = nor->params;
+ struct spi_nor_erase_map *erase_map = &params->erase_map;
+ struct spi_nor_erase_region *region;
+ const struct flash_info *info = nor->info;
+ char buf[16], *str;
+ int i;
+
+ seq_printf(s, "name\t\t%s\n", info->name);
+ seq_printf(s, "id\t\t%*ph\n", SPI_NOR_MAX_ID_LEN, nor->id);
+ string_get_size(params->size, 1, STRING_UNITS_2, buf, sizeof(buf));
+ seq_printf(s, "size\t\t%s\n", buf);
+ seq_printf(s, "write size\t%u\n", params->writesize);
+ seq_printf(s, "page size\t%u\n", params->page_size);
+ seq_printf(s, "address nbytes\t%u\n", nor->addr_nbytes);
+
+ seq_puts(s, "flags\t\t");
+ spi_nor_print_flags(s, nor->flags, snor_f_names, sizeof(snor_f_names));
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\nopcodes\n");
+ seq_printf(s, " read\t\t0x%02x\n", nor->read_opcode);
+ seq_printf(s, " dummy cycles\t%u\n", nor->read_dummy);
+ seq_printf(s, " erase\t\t0x%02x\n", nor->erase_opcode);
+ seq_printf(s, " program\t0x%02x\n", nor->program_opcode);
+
+ switch (nor->cmd_ext_type) {
+ case SPI_NOR_EXT_NONE:
+ str = "none";
+ break;
+ case SPI_NOR_EXT_REPEAT:
+ str = "repeat";
+ break;
+ case SPI_NOR_EXT_INVERT:
+ str = "invert";
+ break;
+ default:
+ str = "<unknown>";
+ break;
+ }
+ seq_printf(s, " 8D extension\t%s\n", str);
+
+ seq_puts(s, "\nprotocols\n");
+ seq_printf(s, " read\t\t%s\n",
+ spi_nor_protocol_name(nor->read_proto));
+ seq_printf(s, " write\t\t%s\n",
+ spi_nor_protocol_name(nor->write_proto));
+ seq_printf(s, " register\t%s\n",
+ spi_nor_protocol_name(nor->reg_proto));
+
+ seq_puts(s, "\nerase commands\n");
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ struct spi_nor_erase_type *et = &erase_map->erase_type[i];
+
+ if (et->size) {
+ string_get_size(et->size, 1, STRING_UNITS_2, buf,
+ sizeof(buf));
+ seq_printf(s, " %02x (%s) [%d]\n", et->opcode, buf, i);
+ }
+ }
+
+ if (!(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
+ string_get_size(params->size, 1, STRING_UNITS_2, buf, sizeof(buf));
+ seq_printf(s, " %02x (%s)\n", SPINOR_OP_CHIP_ERASE, buf);
+ }
+
+ seq_puts(s, "\nsector map\n");
+ seq_puts(s, " region (in hex) | erase mask | flags\n");
+ seq_puts(s, " ------------------+------------+----------\n");
+ for (region = erase_map->regions;
+ region;
+ region = spi_nor_region_next(region)) {
+ u64 start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
+ u64 flags = region->offset & SNOR_ERASE_FLAGS_MASK;
+ u64 end = start + region->size - 1;
+
+ seq_printf(s, " %08llx-%08llx | [%c%c%c%c] | %s\n",
+ start, end,
+ flags & BIT(0) ? '0' : ' ',
+ flags & BIT(1) ? '1' : ' ',
+ flags & BIT(2) ? '2' : ' ',
+ flags & BIT(3) ? '3' : ' ',
+ flags & SNOR_OVERLAID_REGION ? "overlaid" : "");
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(spi_nor_params);
+
+static void spi_nor_print_read_cmd(struct seq_file *s, u32 cap,
+ struct spi_nor_read_command *cmd)
+{
+ seq_printf(s, " %s%s\n", spi_nor_protocol_name(cmd->proto),
+ cap == SNOR_HWCAPS_READ_FAST ? " (fast read)" : "");
+ seq_printf(s, " opcode\t0x%02x\n", cmd->opcode);
+ seq_printf(s, " mode cycles\t%u\n", cmd->num_mode_clocks);
+ seq_printf(s, " dummy cycles\t%u\n", cmd->num_wait_states);
+}
+
+static void spi_nor_print_pp_cmd(struct seq_file *s,
+ struct spi_nor_pp_command *cmd)
+{
+ seq_printf(s, " %s\n", spi_nor_protocol_name(cmd->proto));
+ seq_printf(s, " opcode\t0x%02x\n", cmd->opcode);
+}
+
+static int spi_nor_capabilities_show(struct seq_file *s, void *data)
+{
+ struct spi_nor *nor = s->private;
+ struct spi_nor_flash_parameter *params = nor->params;
+ u32 hwcaps = params->hwcaps.mask;
+ int i, cmd;
+
+ seq_puts(s, "Supported read modes by the flash\n");
+ for (i = 0; i < sizeof(hwcaps) * BITS_PER_BYTE; i++) {
+ if (!(hwcaps & BIT(i)))
+ continue;
+
+ cmd = spi_nor_hwcaps_read2cmd(BIT(i));
+ if (cmd < 0)
+ continue;
+
+ spi_nor_print_read_cmd(s, BIT(i), &params->reads[cmd]);
+ hwcaps &= ~BIT(i);
+ }
+
+ seq_puts(s, "\nSupported page program modes by the flash\n");
+ for (i = 0; i < sizeof(hwcaps) * BITS_PER_BYTE; i++) {
+ if (!(hwcaps & BIT(i)))
+ continue;
+
+ cmd = spi_nor_hwcaps_pp2cmd(BIT(i));
+ if (cmd < 0)
+ continue;
+
+ spi_nor_print_pp_cmd(s, &params->page_programs[cmd]);
+ hwcaps &= ~BIT(i);
+ }
+
+ if (hwcaps)
+ seq_printf(s, "\nunknown hwcaps 0x%x\n", hwcaps);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(spi_nor_capabilities);
+
+static void spi_nor_debugfs_unregister(void *data)
+{
+ struct spi_nor *nor = data;
+
+ debugfs_remove(nor->debugfs_root);
+ nor->debugfs_root = NULL;
+}
+
+static struct dentry *rootdir;
+
+void spi_nor_debugfs_register(struct spi_nor *nor)
+{
+ struct dentry *d;
+ int ret;
+
+ if (!rootdir)
+ rootdir = debugfs_create_dir(SPI_NOR_DEBUGFS_ROOT, NULL);
+
+ ret = devm_add_action(nor->dev, spi_nor_debugfs_unregister, nor);
+ if (ret)
+ return;
+
+ d = debugfs_create_dir(dev_name(nor->dev), rootdir);
+ nor->debugfs_root = d;
+
+ debugfs_create_file("params", 0444, d, nor, &spi_nor_params_fops);
+ debugfs_create_file("capabilities", 0444, d, nor,
+ &spi_nor_capabilities_fops);
+}
+
+void spi_nor_debugfs_shutdown(void)
+{
+ debugfs_remove(rootdir);
+}
diff --git a/drivers/mtd/spi-nor/eon.c b/drivers/mtd/spi-nor/eon.c
new file mode 100644
index 0000000000..50a1105371
--- /dev/null
+++ b/drivers/mtd/spi-nor/eon.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info eon_nor_parts[] = {
+ /* EON -- en25xxx */
+ { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64) },
+ { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64) },
+ { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128) },
+ { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "en25qh16", INFO(0x1c7015, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64) },
+ { "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256) },
+ { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512)
+ PARSE_SFDP },
+ { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K) },
+};
+
+const struct spi_nor_manufacturer spi_nor_eon = {
+ .name = "eon",
+ .parts = eon_nor_parts,
+ .nparts = ARRAY_SIZE(eon_nor_parts),
+};
diff --git a/drivers/mtd/spi-nor/esmt.c b/drivers/mtd/spi-nor/esmt.c
new file mode 100644
index 0000000000..fcc3b0e7cd
--- /dev/null
+++ b/drivers/mtd/spi-nor/esmt.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info esmt_nor_parts[] = {
+ /* ESMT */
+ { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "f25l32qa-2s", INFO(0x8c4116, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128)
+ FLAGS(SPI_NOR_HAS_LOCK)
+ NO_SFDP_FLAGS(SECT_4K) },
+};
+
+const struct spi_nor_manufacturer spi_nor_esmt = {
+ .name = "esmt",
+ .parts = esmt_nor_parts,
+ .nparts = ARRAY_SIZE(esmt_nor_parts),
+};
diff --git a/drivers/mtd/spi-nor/everspin.c b/drivers/mtd/spi-nor/everspin.c
new file mode 100644
index 0000000000..84a07c2e05
--- /dev/null
+++ b/drivers/mtd/spi-nor/everspin.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info everspin_nor_parts[] = {
+ /* Everspin */
+ { "mr25h128", CAT25_INFO(16 * 1024, 1, 256, 2) },
+ { "mr25h256", CAT25_INFO(32 * 1024, 1, 256, 2) },
+ { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3) },
+ { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3) },
+};
+
+const struct spi_nor_manufacturer spi_nor_everspin = {
+ .name = "everspin",
+ .parts = everspin_nor_parts,
+ .nparts = ARRAY_SIZE(everspin_nor_parts),
+};
diff --git a/drivers/mtd/spi-nor/fujitsu.c b/drivers/mtd/spi-nor/fujitsu.c
new file mode 100644
index 0000000000..69cffc5c73
--- /dev/null
+++ b/drivers/mtd/spi-nor/fujitsu.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info fujitsu_nor_parts[] = {
+ /* Fujitsu */
+ { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1)
+ FLAGS(SPI_NOR_NO_ERASE) },
+};
+
+const struct spi_nor_manufacturer spi_nor_fujitsu = {
+ .name = "fujitsu",
+ .parts = fujitsu_nor_parts,
+ .nparts = ARRAY_SIZE(fujitsu_nor_parts),
+};
diff --git a/drivers/mtd/spi-nor/gigadevice.c b/drivers/mtd/spi-nor/gigadevice.c
new file mode 100644
index 0000000000..d57ddaf152
--- /dev/null
+++ b/drivers/mtd/spi-nor/gigadevice.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static int
+gd25q256_post_bfpt(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt)
+{
+ /*
+ * GD25Q256C supports the first version of JESD216 which does not define
+ * the Quad Enable methods. Overwrite the default Quad Enable method.
+ *
+ * GD25Q256 GENERATION | SFDP MAJOR VERSION | SFDP MINOR VERSION
+ * GD25Q256C | SFDP_JESD216_MAJOR | SFDP_JESD216_MINOR
+ * GD25Q256D | SFDP_JESD216_MAJOR | SFDP_JESD216B_MINOR
+ * GD25Q256E | SFDP_JESD216_MAJOR | SFDP_JESD216B_MINOR
+ */
+ if (bfpt_header->major == SFDP_JESD216_MAJOR &&
+ bfpt_header->minor == SFDP_JESD216_MINOR)
+ nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
+
+ return 0;
+}
+
+static const struct spi_nor_fixups gd25q256_fixups = {
+ .post_bfpt = gd25q256_post_bfpt,
+};
+
+static const struct flash_info gigadevice_nor_parts[] = {
+ { "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "gd25lq128d", INFO(0xc86018, 0, 64 * 1024, 256)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512)
+ PARSE_SFDP
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
+ .fixups = &gd25q256_fixups },
+};
+
+const struct spi_nor_manufacturer spi_nor_gigadevice = {
+ .name = "gigadevice",
+ .parts = gigadevice_nor_parts,
+ .nparts = ARRAY_SIZE(gigadevice_nor_parts),
+};
diff --git a/drivers/mtd/spi-nor/intel.c b/drivers/mtd/spi-nor/intel.c
new file mode 100644
index 0000000000..9179f2d09c
--- /dev/null
+++ b/drivers/mtd/spi-nor/intel.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info intel_nor_parts[] = {
+ /* Intel/Numonyx -- xxxs33b */
+ { "160s33b", INFO(0x898911, 0, 64 * 1024, 32)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
+ { "320s33b", INFO(0x898912, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
+ { "640s33b", INFO(0x898913, 0, 64 * 1024, 128)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
+};
+
+const struct spi_nor_manufacturer spi_nor_intel = {
+ .name = "intel",
+ .parts = intel_nor_parts,
+ .nparts = ARRAY_SIZE(intel_nor_parts),
+};
diff --git a/drivers/mtd/spi-nor/issi.c b/drivers/mtd/spi-nor/issi.c
new file mode 100644
index 0000000000..accdf7aa2b
--- /dev/null
+++ b/drivers/mtd/spi-nor/issi.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static int
+is25lp256_post_bfpt_fixups(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt)
+{
+ /*
+ * IS25LP256 supports 4B opcodes, but the BFPT advertises
+ * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY.
+ * Overwrite the number of address bytes advertised by the BFPT.
+ */
+ if ((bfpt->dwords[SFDP_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) ==
+ BFPT_DWORD1_ADDRESS_BYTES_3_ONLY)
+ nor->params->addr_nbytes = 4;
+
+ return 0;
+}
+
+static const struct spi_nor_fixups is25lp256_fixups = {
+ .post_bfpt = is25lp256_post_bfpt_fixups,
+};
+
+static int pm25lv_nor_late_init(struct spi_nor *nor)
+{
+ struct spi_nor_erase_map *map = &nor->params->erase_map;
+ int i;
+
+ /* The PM25LV series has a different 4k sector erase opcode */
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
+ if (map->erase_type[i].size == 4096)
+ map->erase_type[i].opcode = SPINOR_OP_BE_4K_PMC;
+
+ return 0;
+}
+
+static const struct spi_nor_fixups pm25lv_nor_fixups = {
+ .late_init = pm25lv_nor_late_init,
+};
+
+static const struct flash_info issi_nor_parts[] = {
+ /* ISSI */
+ { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp016d", INFO(0x9d6015, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp032", INFO(0x9d6016, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "is25lp064", INFO(0x9d6017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512)
+ PARSE_SFDP
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
+ .fixups = &is25lp256_fixups },
+ { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25wp256", INFO(0x9d7019, 0, 0, 0)
+ PARSE_SFDP
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
+ FLAGS(SPI_NOR_QUAD_PP)
+ .fixups = &is25lp256_fixups },
+
+ /* PMC */
+ { "pm25lv512", INFO(0, 0, 32 * 1024, 2)
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &pm25lv_nor_fixups
+ },
+ { "pm25lv010", INFO(0, 0, 32 * 1024, 4)
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &pm25lv_nor_fixups
+ },
+ { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+};
+
+static void issi_nor_default_init(struct spi_nor *nor)
+{
+ nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
+}
+
+static const struct spi_nor_fixups issi_fixups = {
+ .default_init = issi_nor_default_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_issi = {
+ .name = "issi",
+ .parts = issi_nor_parts,
+ .nparts = ARRAY_SIZE(issi_nor_parts),
+ .fixups = &issi_fixups,
+};
diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
new file mode 100644
index 0000000000..eb149e517c
--- /dev/null
+++ b/drivers/mtd/spi-nor/macronix.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static int
+mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt)
+{
+ /*
+ * MX25L25635F supports 4B opcodes but MX25L25635E does not.
+ * Unfortunately, Macronix has re-used the same JEDEC ID for both
+ * variants which prevents us from defining a new entry in the parts
+ * table.
+ * We need a way to differentiate MX25L25635E and MX25L25635F, and it
+ * seems that the F version advertises support for Fast Read 4-4-4 in
+ * its BFPT table.
+ */
+ if (bfpt->dwords[SFDP_DWORD(5)] & BFPT_DWORD5_FAST_READ_4_4_4)
+ nor->flags |= SNOR_F_4B_OPCODES;
+
+ return 0;
+}
+
+static const struct spi_nor_fixups mx25l25635_fixups = {
+ .post_bfpt = mx25l25635_post_bfpt_fixups,
+};
+
+static const struct flash_info macronix_nor_parts[] = {
+ /* Macronix */
+ { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16) },
+ { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256) },
+ { "mx25r1635f", INFO(0xc22815, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "mx25r3235f", INFO(0xc22816, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ .fixups = &mx25l25635_fixups },
+ { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SECT_4K)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "mx25u51245g", INFO(0xc2253a, 0, 64 * 1024, 1024)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "mx25uw51245g", INFOB(0xc2813a, 0, 0, 0, 4)
+ PARSE_SFDP
+ FLAGS(SPI_NOR_RWW) },
+ { "mx25v8035f", INFO(0xc22314, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512) },
+ { "mx66l51235f", INFO(0xc2201a, 0, 64 * 1024, 1024)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048)
+ NO_SFDP_FLAGS(SPI_NOR_QUAD_READ) },
+ { "mx66u2g45g", INFO(0xc2253c, 0, 64 * 1024, 4096)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+};
+
+static void macronix_nor_default_init(struct spi_nor *nor)
+{
+ nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
+}
+
+static int macronix_nor_late_init(struct spi_nor *nor)
+{
+ if (!nor->params->set_4byte_addr_mode)
+ nor->params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_en4b_ex4b;
+
+ return 0;
+}
+
+static const struct spi_nor_fixups macronix_nor_fixups = {
+ .default_init = macronix_nor_default_init,
+ .late_init = macronix_nor_late_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_macronix = {
+ .name = "macronix",
+ .parts = macronix_nor_parts,
+ .nparts = ARRAY_SIZE(macronix_nor_parts),
+ .fixups = &macronix_nor_fixups,
+};
diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c
new file mode 100644
index 0000000000..6ad080c52a
--- /dev/null
+++ b/drivers/mtd/spi-nor/micron-st.c
@@ -0,0 +1,462 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+/* flash_info mfr_flag. Used to read proprietary FSR register. */
+#define USE_FSR BIT(0)
+
+#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */
+#define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */
+#define SPINOR_OP_MT_DTR_RD 0xfd /* Fast Read opcode in DTR mode */
+#define SPINOR_OP_MT_RD_ANY_REG 0x85 /* Read volatile register */
+#define SPINOR_OP_MT_WR_ANY_REG 0x81 /* Write volatile register */
+#define SPINOR_REG_MT_CFR0V 0x00 /* For setting octal DTR mode */
+#define SPINOR_REG_MT_CFR1V 0x01 /* For setting dummy cycles */
+#define SPINOR_REG_MT_CFR1V_DEF 0x1f /* Default dummy cycles */
+#define SPINOR_MT_OCT_DTR 0xe7 /* Enable Octal DTR. */
+#define SPINOR_MT_EXSPI 0xff /* Enable Extended SPI (default) */
+
+/* Flag Status Register bits */
+#define FSR_READY BIT(7) /* Device status, 0 = Busy, 1 = Ready */
+#define FSR_E_ERR BIT(5) /* Erase operation status */
+#define FSR_P_ERR BIT(4) /* Program operation status */
+#define FSR_PT_ERR BIT(1) /* Protection error bit */
+
+/* Micron ST SPI NOR flash operations. */
+#define MICRON_ST_NOR_WR_ANY_REG_OP(naddr, addr, ndata, buf) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_MT_WR_ANY_REG, 0), \
+ SPI_MEM_OP_ADDR(naddr, addr, 0), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(ndata, buf, 0))
+
+#define MICRON_ST_RDFSR_OP(buf) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 0), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_IN(1, buf, 0))
+
+#define MICRON_ST_CLFSR_OP \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 0), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+static int micron_st_nor_octal_dtr_en(struct spi_nor *nor)
+{
+ struct spi_mem_op op;
+ u8 *buf = nor->bouncebuf;
+ int ret;
+ u8 addr_mode_nbytes = nor->params->addr_mode_nbytes;
+
+ /* Use 20 dummy cycles for memory array reads. */
+ *buf = 20;
+ op = (struct spi_mem_op)
+ MICRON_ST_NOR_WR_ANY_REG_OP(addr_mode_nbytes,
+ SPINOR_REG_MT_CFR1V, 1, buf);
+ ret = spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ buf[0] = SPINOR_MT_OCT_DTR;
+ op = (struct spi_mem_op)
+ MICRON_ST_NOR_WR_ANY_REG_OP(addr_mode_nbytes,
+ SPINOR_REG_MT_CFR0V, 1, buf);
+ ret = spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ /* Read flash ID to make sure the switch was successful. */
+ ret = spi_nor_read_id(nor, 0, 8, buf, SNOR_PROTO_8_8_8_DTR);
+ if (ret) {
+ dev_dbg(nor->dev, "error %d reading JEDEC ID after enabling 8D-8D-8D mode\n", ret);
+ return ret;
+ }
+
+ if (memcmp(buf, nor->info->id, nor->info->id_len))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int micron_st_nor_octal_dtr_dis(struct spi_nor *nor)
+{
+ struct spi_mem_op op;
+ u8 *buf = nor->bouncebuf;
+ int ret;
+
+ /*
+ * The register is 1-byte wide, but 1-byte transactions are not allowed
+ * in 8D-8D-8D mode. The next register is the dummy cycle configuration
+ * register. Since the transaction needs to be at least 2 bytes wide,
+ * set the next register to its default value. This also makes sense
+ * because the value was changed when enabling 8D-8D-8D mode, it should
+ * be reset when disabling.
+ */
+ buf[0] = SPINOR_MT_EXSPI;
+ buf[1] = SPINOR_REG_MT_CFR1V_DEF;
+ op = (struct spi_mem_op)
+ MICRON_ST_NOR_WR_ANY_REG_OP(nor->addr_nbytes,
+ SPINOR_REG_MT_CFR0V, 2, buf);
+ ret = spi_nor_write_any_volatile_reg(nor, &op, SNOR_PROTO_8_8_8_DTR);
+ if (ret)
+ return ret;
+
+ /* Read flash ID to make sure the switch was successful. */
+ ret = spi_nor_read_id(nor, 0, 0, buf, SNOR_PROTO_1_1_1);
+ if (ret) {
+ dev_dbg(nor->dev, "error %d reading JEDEC ID after disabling 8D-8D-8D mode\n", ret);
+ return ret;
+ }
+
+ if (memcmp(buf, nor->info->id, nor->info->id_len))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int micron_st_nor_set_octal_dtr(struct spi_nor *nor, bool enable)
+{
+ return enable ? micron_st_nor_octal_dtr_en(nor) :
+ micron_st_nor_octal_dtr_dis(nor);
+}
+
+static void mt35xu512aba_default_init(struct spi_nor *nor)
+{
+ nor->params->set_octal_dtr = micron_st_nor_set_octal_dtr;
+}
+
+static int mt35xu512aba_post_sfdp_fixup(struct spi_nor *nor)
+{
+ /* Set the Fast Read settings. */
+ nor->params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR;
+ spi_nor_set_read_settings(&nor->params->reads[SNOR_CMD_READ_8_8_8_DTR],
+ 0, 20, SPINOR_OP_MT_DTR_RD,
+ SNOR_PROTO_8_8_8_DTR);
+
+ nor->cmd_ext_type = SPI_NOR_EXT_REPEAT;
+ nor->params->rdsr_dummy = 8;
+ nor->params->rdsr_addr_nbytes = 0;
+
+ /*
+ * The BFPT quad enable field is set to a reserved value so the quad
+ * enable function is ignored by spi_nor_parse_bfpt(). Make sure we
+ * disable it.
+ */
+ nor->params->quad_enable = NULL;
+
+ return 0;
+}
+
+static const struct spi_nor_fixups mt35xu512aba_fixups = {
+ .default_init = mt35xu512aba_default_init,
+ .post_sfdp = mt35xu512aba_post_sfdp_fixup,
+};
+
+static const struct flash_info micron_nor_parts[] = {
+ { "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_OCTAL_READ |
+ SPI_NOR_OCTAL_DTR_READ | SPI_NOR_OCTAL_DTR_PP)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES | SPI_NOR_IO_MODE_EN_VOLATILE)
+ MFR_FLAGS(USE_FSR)
+ .fixups = &mt35xu512aba_fixups
+ },
+ { "mt35xu02g", INFO(0x2c5b1c, 0, 128 * 1024, 2048)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_OCTAL_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
+ MFR_FLAGS(USE_FSR)
+ },
+};
+
+static const struct flash_info st_nor_parts[] = {
+ { "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SPI_NOR_QUAD_READ) },
+ { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SPI_NOR_QUAD_READ) },
+ { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_FSR)
+ },
+ { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_FSR)
+ },
+ { "mt25ql256a", INFO6(0x20ba19, 0x104400, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
+ MFR_FLAGS(USE_FSR)
+ },
+ { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_FSR)
+ },
+ { "mt25qu256a", INFO6(0x20bb19, 0x104400, 64 * 1024, 512)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
+ MFR_FLAGS(USE_FSR)
+ },
+ { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_FSR)
+ },
+ { "mt25ql512a", INFO6(0x20ba20, 0x104400, 64 * 1024, 1024)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
+ MFR_FLAGS(USE_FSR)
+ },
+ { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_FSR)
+ },
+ { "mt25qu512a", INFO6(0x20bb20, 0x104400, 64 * 1024, 1024)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
+ MFR_FLAGS(USE_FSR)
+ },
+ { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_FSR)
+ },
+ { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6 | NO_CHIP_ERASE)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_FSR)
+ },
+ { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048)
+ FLAGS(NO_CHIP_ERASE)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_FSR)
+ },
+ { "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096)
+ FLAGS(NO_CHIP_ERASE)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_FSR)
+ },
+ { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096)
+ FLAGS(NO_CHIP_ERASE)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_FSR)
+ },
+
+ { "m25p05", INFO(0x202010, 0, 32 * 1024, 2) },
+ { "m25p10", INFO(0x202011, 0, 32 * 1024, 4) },
+ { "m25p20", INFO(0x202012, 0, 64 * 1024, 4) },
+ { "m25p40", INFO(0x202013, 0, 64 * 1024, 8) },
+ { "m25p80", INFO(0x202014, 0, 64 * 1024, 16) },
+ { "m25p16", INFO(0x202015, 0, 64 * 1024, 32) },
+ { "m25p32", INFO(0x202016, 0, 64 * 1024, 64) },
+ { "m25p64", INFO(0x202017, 0, 64 * 1024, 128) },
+ { "m25p128", INFO(0x202018, 0, 256 * 1024, 64) },
+
+ { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2) },
+ { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4) },
+ { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4) },
+ { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8) },
+ { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16) },
+ { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32) },
+ { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64) },
+ { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128) },
+ { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64) },
+
+ { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2) },
+ { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16) },
+ { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32) },
+
+ { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4) },
+ { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16) },
+ { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K) },
+
+ { "m25px16", INFO(0x207115, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "m25px32", INFO(0x207116, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "m25px64", INFO(0x207117, 0, 64 * 1024, 128) },
+ { "m25px80", INFO(0x207114, 0, 64 * 1024, 16) },
+};
+
+/**
+ * micron_st_nor_read_fsr() - Read the Flag Status Register.
+ * @nor: pointer to 'struct spi_nor'
+ * @fsr: pointer to a DMA-able buffer where the value of the
+ * Flag Status Register will be written. Should be at least 2
+ * bytes.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int micron_st_nor_read_fsr(struct spi_nor *nor, u8 *fsr)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op = MICRON_ST_RDFSR_OP(fsr);
+
+ if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
+ op.addr.nbytes = nor->params->rdsr_addr_nbytes;
+ op.dummy.nbytes = nor->params->rdsr_dummy;
+ /*
+ * We don't want to read only one byte in DTR mode. So,
+ * read 2 and then discard the second byte.
+ */
+ op.data.nbytes = 2;
+ }
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDFSR, fsr,
+ 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading FSR\n", ret);
+
+ return ret;
+}
+
+/**
+ * micron_st_nor_clear_fsr() - Clear the Flag Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ */
+static void micron_st_nor_clear_fsr(struct spi_nor *nor)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op = MICRON_ST_CLFSR_OP;
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLFSR,
+ NULL, 0);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d clearing FSR\n", ret);
+}
+
+/**
+ * micron_st_nor_ready() - Query the Status Register as well as the Flag Status
+ * Register to see if the flash is ready for new commands. If there are any
+ * errors in the FSR clear them.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 1 if ready, 0 if not ready, -errno on errors.
+ */
+static int micron_st_nor_ready(struct spi_nor *nor)
+{
+ int sr_ready, ret;
+
+ sr_ready = spi_nor_sr_ready(nor);
+ if (sr_ready < 0)
+ return sr_ready;
+
+ ret = micron_st_nor_read_fsr(nor, nor->bouncebuf);
+ if (ret) {
+ /*
+ * Some controllers, such as Intel SPI, do not support low
+ * level operations such as reading the flag status
+ * register. They only expose small amount of high level
+ * operations to the software. If this is the case we use
+ * only the status register value.
+ */
+ return ret == -EOPNOTSUPP ? sr_ready : ret;
+ }
+
+ if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) {
+ if (nor->bouncebuf[0] & FSR_E_ERR)
+ dev_err(nor->dev, "Erase operation failed.\n");
+ else
+ dev_err(nor->dev, "Program operation failed.\n");
+
+ if (nor->bouncebuf[0] & FSR_PT_ERR)
+ dev_err(nor->dev,
+ "Attempted to modify a protected sector.\n");
+
+ micron_st_nor_clear_fsr(nor);
+
+ /*
+ * WEL bit remains set to one when an erase or page program
+ * error occurs. Issue a Write Disable command to protect
+ * against inadvertent writes that can possibly corrupt the
+ * contents of the memory.
+ */
+ ret = spi_nor_write_disable(nor);
+ if (ret)
+ return ret;
+
+ return -EIO;
+ }
+
+ return sr_ready && !!(nor->bouncebuf[0] & FSR_READY);
+}
+
+static void micron_st_nor_default_init(struct spi_nor *nor)
+{
+ nor->flags |= SNOR_F_HAS_LOCK;
+ nor->flags &= ~SNOR_F_HAS_16BIT_SR;
+ nor->params->quad_enable = NULL;
+}
+
+static int micron_st_nor_late_init(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ if (nor->info->mfr_flags & USE_FSR)
+ params->ready = micron_st_nor_ready;
+
+ if (!params->set_4byte_addr_mode)
+ params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_wren_en4b_ex4b;
+
+ return 0;
+}
+
+static const struct spi_nor_fixups micron_st_nor_fixups = {
+ .default_init = micron_st_nor_default_init,
+ .late_init = micron_st_nor_late_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_micron = {
+ .name = "micron",
+ .parts = micron_nor_parts,
+ .nparts = ARRAY_SIZE(micron_nor_parts),
+ .fixups = &micron_st_nor_fixups,
+};
+
+const struct spi_nor_manufacturer spi_nor_st = {
+ .name = "st",
+ .parts = st_nor_parts,
+ .nparts = ARRAY_SIZE(st_nor_parts),
+ .fixups = &micron_st_nor_fixups,
+};
diff --git a/drivers/mtd/spi-nor/otp.c b/drivers/mtd/spi-nor/otp.c
new file mode 100644
index 0000000000..9a729aa345
--- /dev/null
+++ b/drivers/mtd/spi-nor/otp.c
@@ -0,0 +1,507 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OTP support for SPI NOR flashes
+ *
+ * Copyright (C) 2021 Michael Walle <michael@walle.cc>
+ */
+
+#include <linux/log2.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+#define spi_nor_otp_region_len(nor) ((nor)->params->otp.org->len)
+#define spi_nor_otp_n_regions(nor) ((nor)->params->otp.org->n_regions)
+
+/**
+ * spi_nor_otp_read_secr() - read security register
+ * @nor: pointer to 'struct spi_nor'
+ * @addr: offset to read from
+ * @len: number of bytes to read
+ * @buf: pointer to dst buffer
+ *
+ * Read a security register by using the SPINOR_OP_RSECR commands.
+ *
+ * In Winbond/GigaDevice datasheets the term "security register" stands for
+ * an one-time-programmable memory area, consisting of multiple bytes (usually
+ * 256). Thus one "security register" maps to one OTP region.
+ *
+ * This method is used on GigaDevice and Winbond flashes.
+ *
+ * Please note, the read must not span multiple registers.
+ *
+ * Return: number of bytes read successfully, -errno otherwise
+ */
+int spi_nor_otp_read_secr(struct spi_nor *nor, loff_t addr, size_t len, u8 *buf)
+{
+ u8 addr_nbytes, read_opcode, read_dummy;
+ struct spi_mem_dirmap_desc *rdesc;
+ enum spi_nor_protocol read_proto;
+ int ret;
+
+ read_opcode = nor->read_opcode;
+ addr_nbytes = nor->addr_nbytes;
+ read_dummy = nor->read_dummy;
+ read_proto = nor->read_proto;
+ rdesc = nor->dirmap.rdesc;
+
+ nor->read_opcode = SPINOR_OP_RSECR;
+ nor->read_dummy = 8;
+ nor->read_proto = SNOR_PROTO_1_1_1;
+ nor->dirmap.rdesc = NULL;
+
+ ret = spi_nor_read_data(nor, addr, len, buf);
+
+ nor->read_opcode = read_opcode;
+ nor->addr_nbytes = addr_nbytes;
+ nor->read_dummy = read_dummy;
+ nor->read_proto = read_proto;
+ nor->dirmap.rdesc = rdesc;
+
+ return ret;
+}
+
+/**
+ * spi_nor_otp_write_secr() - write security register
+ * @nor: pointer to 'struct spi_nor'
+ * @addr: offset to write to
+ * @len: number of bytes to write
+ * @buf: pointer to src buffer
+ *
+ * Write a security register by using the SPINOR_OP_PSECR commands.
+ *
+ * For more information on the term "security register", see the documentation
+ * of spi_nor_otp_read_secr().
+ *
+ * This method is used on GigaDevice and Winbond flashes.
+ *
+ * Please note, the write must not span multiple registers.
+ *
+ * Return: number of bytes written successfully, -errno otherwise
+ */
+int spi_nor_otp_write_secr(struct spi_nor *nor, loff_t addr, size_t len,
+ const u8 *buf)
+{
+ enum spi_nor_protocol write_proto;
+ struct spi_mem_dirmap_desc *wdesc;
+ u8 addr_nbytes, program_opcode;
+ int ret, written;
+
+ program_opcode = nor->program_opcode;
+ addr_nbytes = nor->addr_nbytes;
+ write_proto = nor->write_proto;
+ wdesc = nor->dirmap.wdesc;
+
+ nor->program_opcode = SPINOR_OP_PSECR;
+ nor->write_proto = SNOR_PROTO_1_1_1;
+ nor->dirmap.wdesc = NULL;
+
+ /*
+ * We only support a write to one single page. For now all winbond
+ * flashes only have one page per security register.
+ */
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto out;
+
+ written = spi_nor_write_data(nor, addr, len, buf);
+ if (written < 0)
+ goto out;
+
+ ret = spi_nor_wait_till_ready(nor);
+
+out:
+ nor->program_opcode = program_opcode;
+ nor->addr_nbytes = addr_nbytes;
+ nor->write_proto = write_proto;
+ nor->dirmap.wdesc = wdesc;
+
+ return ret ?: written;
+}
+
+/**
+ * spi_nor_otp_erase_secr() - erase a security register
+ * @nor: pointer to 'struct spi_nor'
+ * @addr: offset of the security register to be erased
+ *
+ * Erase a security register by using the SPINOR_OP_ESECR command.
+ *
+ * For more information on the term "security register", see the documentation
+ * of spi_nor_otp_read_secr().
+ *
+ * This method is used on GigaDevice and Winbond flashes.
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+int spi_nor_otp_erase_secr(struct spi_nor *nor, loff_t addr)
+{
+ u8 erase_opcode = nor->erase_opcode;
+ int ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ nor->erase_opcode = SPINOR_OP_ESECR;
+ ret = spi_nor_erase_sector(nor, addr);
+ nor->erase_opcode = erase_opcode;
+ if (ret)
+ return ret;
+
+ return spi_nor_wait_till_ready(nor);
+}
+
+static int spi_nor_otp_lock_bit_cr(unsigned int region)
+{
+ static const int lock_bits[] = { SR2_LB1, SR2_LB2, SR2_LB3 };
+
+ if (region >= ARRAY_SIZE(lock_bits))
+ return -EINVAL;
+
+ return lock_bits[region];
+}
+
+/**
+ * spi_nor_otp_lock_sr2() - lock the OTP region
+ * @nor: pointer to 'struct spi_nor'
+ * @region: OTP region
+ *
+ * Lock the OTP region by writing the status register-2. This method is used on
+ * GigaDevice and Winbond flashes.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_otp_lock_sr2(struct spi_nor *nor, unsigned int region)
+{
+ u8 *cr = nor->bouncebuf;
+ int ret, lock_bit;
+
+ lock_bit = spi_nor_otp_lock_bit_cr(region);
+ if (lock_bit < 0)
+ return lock_bit;
+
+ ret = spi_nor_read_cr(nor, cr);
+ if (ret)
+ return ret;
+
+ /* no need to write the register if region is already locked */
+ if (cr[0] & lock_bit)
+ return 0;
+
+ cr[0] |= lock_bit;
+
+ return spi_nor_write_16bit_cr_and_check(nor, cr[0]);
+}
+
+/**
+ * spi_nor_otp_is_locked_sr2() - get the OTP region lock status
+ * @nor: pointer to 'struct spi_nor'
+ * @region: OTP region
+ *
+ * Retrieve the OTP region lock bit by reading the status register-2. This
+ * method is used on GigaDevice and Winbond flashes.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_otp_is_locked_sr2(struct spi_nor *nor, unsigned int region)
+{
+ u8 *cr = nor->bouncebuf;
+ int ret, lock_bit;
+
+ lock_bit = spi_nor_otp_lock_bit_cr(region);
+ if (lock_bit < 0)
+ return lock_bit;
+
+ ret = spi_nor_read_cr(nor, cr);
+ if (ret)
+ return ret;
+
+ return cr[0] & lock_bit;
+}
+
+static loff_t spi_nor_otp_region_start(const struct spi_nor *nor, unsigned int region)
+{
+ const struct spi_nor_otp_organization *org = nor->params->otp.org;
+
+ return org->base + region * org->offset;
+}
+
+static size_t spi_nor_otp_size(struct spi_nor *nor)
+{
+ return spi_nor_otp_n_regions(nor) * spi_nor_otp_region_len(nor);
+}
+
+/* Translate the file offsets from and to OTP regions. */
+static loff_t spi_nor_otp_region_to_offset(struct spi_nor *nor, unsigned int region)
+{
+ return region * spi_nor_otp_region_len(nor);
+}
+
+static unsigned int spi_nor_otp_offset_to_region(struct spi_nor *nor, loff_t ofs)
+{
+ return div64_u64(ofs, spi_nor_otp_region_len(nor));
+}
+
+static int spi_nor_mtd_otp_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ const struct spi_nor_otp_ops *ops = nor->params->otp.ops;
+ unsigned int n_regions = spi_nor_otp_n_regions(nor);
+ unsigned int i;
+ int ret, locked;
+
+ if (len < n_regions * sizeof(*buf))
+ return -ENOSPC;
+
+ ret = spi_nor_prep_and_lock(nor);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < n_regions; i++) {
+ buf->start = spi_nor_otp_region_to_offset(nor, i);
+ buf->length = spi_nor_otp_region_len(nor);
+
+ locked = ops->is_locked(nor, i);
+ if (locked < 0) {
+ ret = locked;
+ goto out;
+ }
+
+ buf->locked = !!locked;
+ buf++;
+ }
+
+ *retlen = n_regions * sizeof(*buf);
+
+out:
+ spi_nor_unlock_and_unprep(nor);
+
+ return ret;
+}
+
+static int spi_nor_mtd_otp_range_is_locked(struct spi_nor *nor, loff_t ofs,
+ size_t len)
+{
+ const struct spi_nor_otp_ops *ops = nor->params->otp.ops;
+ unsigned int region;
+ int locked;
+
+ /*
+ * If any of the affected OTP regions are locked the entire range is
+ * considered locked.
+ */
+ for (region = spi_nor_otp_offset_to_region(nor, ofs);
+ region <= spi_nor_otp_offset_to_region(nor, ofs + len - 1);
+ region++) {
+ locked = ops->is_locked(nor, region);
+ /* take the branch it is locked or in case of an error */
+ if (locked)
+ return locked;
+ }
+
+ return 0;
+}
+
+static int spi_nor_mtd_otp_read_write(struct mtd_info *mtd, loff_t ofs,
+ size_t total_len, size_t *retlen,
+ const u8 *buf, bool is_write)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ const struct spi_nor_otp_ops *ops = nor->params->otp.ops;
+ const size_t rlen = spi_nor_otp_region_len(nor);
+ loff_t rstart, rofs;
+ unsigned int region;
+ size_t len;
+ int ret;
+
+ if (ofs < 0 || ofs >= spi_nor_otp_size(nor))
+ return 0;
+
+ /* don't access beyond the end */
+ total_len = min_t(size_t, total_len, spi_nor_otp_size(nor) - ofs);
+
+ if (!total_len)
+ return 0;
+
+ ret = spi_nor_prep_and_lock(nor);
+ if (ret)
+ return ret;
+
+ if (is_write) {
+ ret = spi_nor_mtd_otp_range_is_locked(nor, ofs, total_len);
+ if (ret < 0) {
+ goto out;
+ } else if (ret) {
+ ret = -EROFS;
+ goto out;
+ }
+ }
+
+ while (total_len) {
+ /*
+ * The OTP regions are mapped into a contiguous area starting
+ * at 0 as expected by the MTD layer. This will map the MTD
+ * file offsets to the address of an OTP region as used in the
+ * actual SPI commands.
+ */
+ region = spi_nor_otp_offset_to_region(nor, ofs);
+ rstart = spi_nor_otp_region_start(nor, region);
+
+ /*
+ * The size of a OTP region is expected to be a power of two,
+ * thus we can just mask the lower bits and get the offset into
+ * a region.
+ */
+ rofs = ofs & (rlen - 1);
+
+ /* don't access beyond one OTP region */
+ len = min_t(size_t, total_len, rlen - rofs);
+
+ if (is_write)
+ ret = ops->write(nor, rstart + rofs, len, buf);
+ else
+ ret = ops->read(nor, rstart + rofs, len, (u8 *)buf);
+ if (ret == 0)
+ ret = -EIO;
+ if (ret < 0)
+ goto out;
+
+ *retlen += ret;
+ ofs += ret;
+ buf += ret;
+ total_len -= ret;
+ }
+ ret = 0;
+
+out:
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+}
+
+static int spi_nor_mtd_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u8 *buf)
+{
+ return spi_nor_mtd_otp_read_write(mtd, from, len, retlen, buf, false);
+}
+
+static int spi_nor_mtd_otp_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u8 *buf)
+{
+ return spi_nor_mtd_otp_read_write(mtd, to, len, retlen, buf, true);
+}
+
+static int spi_nor_mtd_otp_erase(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ const struct spi_nor_otp_ops *ops = nor->params->otp.ops;
+ const size_t rlen = spi_nor_otp_region_len(nor);
+ unsigned int region;
+ loff_t rstart;
+ int ret;
+
+ /* OTP erase is optional */
+ if (!ops->erase)
+ return -EOPNOTSUPP;
+
+ if (!len)
+ return 0;
+
+ if (from < 0 || (from + len) > spi_nor_otp_size(nor))
+ return -EINVAL;
+
+ /* the user has to explicitly ask for whole regions */
+ if (!IS_ALIGNED(len, rlen) || !IS_ALIGNED(from, rlen))
+ return -EINVAL;
+
+ ret = spi_nor_prep_and_lock(nor);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_mtd_otp_range_is_locked(nor, from, len);
+ if (ret < 0) {
+ goto out;
+ } else if (ret) {
+ ret = -EROFS;
+ goto out;
+ }
+
+ while (len) {
+ region = spi_nor_otp_offset_to_region(nor, from);
+ rstart = spi_nor_otp_region_start(nor, region);
+
+ ret = ops->erase(nor, rstart);
+ if (ret)
+ goto out;
+
+ len -= rlen;
+ from += rlen;
+ }
+
+out:
+ spi_nor_unlock_and_unprep(nor);
+
+ return ret;
+}
+
+static int spi_nor_mtd_otp_lock(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ const struct spi_nor_otp_ops *ops = nor->params->otp.ops;
+ const size_t rlen = spi_nor_otp_region_len(nor);
+ unsigned int region;
+ int ret;
+
+ if (from < 0 || (from + len) > spi_nor_otp_size(nor))
+ return -EINVAL;
+
+ /* the user has to explicitly ask for whole regions */
+ if (!IS_ALIGNED(len, rlen) || !IS_ALIGNED(from, rlen))
+ return -EINVAL;
+
+ ret = spi_nor_prep_and_lock(nor);
+ if (ret)
+ return ret;
+
+ while (len) {
+ region = spi_nor_otp_offset_to_region(nor, from);
+ ret = ops->lock(nor, region);
+ if (ret)
+ goto out;
+
+ len -= rlen;
+ from += rlen;
+ }
+
+out:
+ spi_nor_unlock_and_unprep(nor);
+
+ return ret;
+}
+
+void spi_nor_set_mtd_otp_ops(struct spi_nor *nor)
+{
+ struct mtd_info *mtd = &nor->mtd;
+
+ if (!nor->params->otp.ops)
+ return;
+
+ if (WARN_ON(!is_power_of_2(spi_nor_otp_region_len(nor))))
+ return;
+
+ /*
+ * We only support user_prot callbacks (yet).
+ *
+ * Some SPI NOR flashes like Macronix ones can be ordered in two
+ * different variants. One with a factory locked OTP area and one where
+ * it is left to the user to write to it. The factory locked OTP is
+ * usually preprogrammed with an "electrical serial number". We don't
+ * support these for now.
+ */
+ mtd->_get_user_prot_info = spi_nor_mtd_otp_info;
+ mtd->_read_user_prot_reg = spi_nor_mtd_otp_read;
+ mtd->_write_user_prot_reg = spi_nor_mtd_otp_write;
+ mtd->_lock_user_prot_reg = spi_nor_mtd_otp_lock;
+ mtd->_erase_user_prot_reg = spi_nor_mtd_otp_erase;
+}
diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c
new file mode 100644
index 0000000000..b3b11dfed7
--- /dev/null
+++ b/drivers/mtd/spi-nor/sfdp.c
@@ -0,0 +1,1570 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+
+#include "core.h"
+
+#define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb)
+#define SFDP_PARAM_HEADER_PTP(p) \
+ (((p)->parameter_table_pointer[2] << 16) | \
+ ((p)->parameter_table_pointer[1] << 8) | \
+ ((p)->parameter_table_pointer[0] << 0))
+#define SFDP_PARAM_HEADER_PARAM_LEN(p) ((p)->length * 4)
+
+#define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */
+#define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */
+#define SFDP_4BAIT_ID 0xff84 /* 4-byte Address Instruction Table */
+#define SFDP_PROFILE1_ID 0xff05 /* xSPI Profile 1.0 table. */
+#define SFDP_SCCR_MAP_ID 0xff87 /*
+ * Status, Control and Configuration
+ * Register Map.
+ */
+#define SFDP_SCCR_MAP_MC_ID 0xff88 /*
+ * Status, Control and Configuration
+ * Register Map Offsets for Multi-Chip
+ * SPI Memory Devices.
+ */
+
+#define SFDP_SIGNATURE 0x50444653U
+
+struct sfdp_header {
+ u32 signature; /* Ox50444653U <=> "SFDP" */
+ u8 minor;
+ u8 major;
+ u8 nph; /* 0-base number of parameter headers */
+ u8 unused;
+
+ /* Basic Flash Parameter Table. */
+ struct sfdp_parameter_header bfpt_header;
+};
+
+/* Fast Read settings. */
+struct sfdp_bfpt_read {
+ /* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */
+ u32 hwcaps;
+
+ /*
+ * The <supported_bit> bit in <supported_dword> BFPT DWORD tells us
+ * whether the Fast Read x-y-z command is supported.
+ */
+ u32 supported_dword;
+ u32 supported_bit;
+
+ /*
+ * The half-word at offset <setting_shift> in <setting_dword> BFPT DWORD
+ * encodes the op code, the number of mode clocks and the number of wait
+ * states to be used by Fast Read x-y-z command.
+ */
+ u32 settings_dword;
+ u32 settings_shift;
+
+ /* The SPI protocol for this Fast Read x-y-z command. */
+ enum spi_nor_protocol proto;
+};
+
+struct sfdp_bfpt_erase {
+ /*
+ * The half-word at offset <shift> in DWORD <dword> encodes the
+ * op code and erase sector size to be used by Sector Erase commands.
+ */
+ u32 dword;
+ u32 shift;
+};
+
+#define SMPT_CMD_ADDRESS_LEN_MASK GENMASK(23, 22)
+#define SMPT_CMD_ADDRESS_LEN_0 (0x0UL << 22)
+#define SMPT_CMD_ADDRESS_LEN_3 (0x1UL << 22)
+#define SMPT_CMD_ADDRESS_LEN_4 (0x2UL << 22)
+#define SMPT_CMD_ADDRESS_LEN_USE_CURRENT (0x3UL << 22)
+
+#define SMPT_CMD_READ_DUMMY_MASK GENMASK(19, 16)
+#define SMPT_CMD_READ_DUMMY_SHIFT 16
+#define SMPT_CMD_READ_DUMMY(_cmd) \
+ (((_cmd) & SMPT_CMD_READ_DUMMY_MASK) >> SMPT_CMD_READ_DUMMY_SHIFT)
+#define SMPT_CMD_READ_DUMMY_IS_VARIABLE 0xfUL
+
+#define SMPT_CMD_READ_DATA_MASK GENMASK(31, 24)
+#define SMPT_CMD_READ_DATA_SHIFT 24
+#define SMPT_CMD_READ_DATA(_cmd) \
+ (((_cmd) & SMPT_CMD_READ_DATA_MASK) >> SMPT_CMD_READ_DATA_SHIFT)
+
+#define SMPT_CMD_OPCODE_MASK GENMASK(15, 8)
+#define SMPT_CMD_OPCODE_SHIFT 8
+#define SMPT_CMD_OPCODE(_cmd) \
+ (((_cmd) & SMPT_CMD_OPCODE_MASK) >> SMPT_CMD_OPCODE_SHIFT)
+
+#define SMPT_MAP_REGION_COUNT_MASK GENMASK(23, 16)
+#define SMPT_MAP_REGION_COUNT_SHIFT 16
+#define SMPT_MAP_REGION_COUNT(_header) \
+ ((((_header) & SMPT_MAP_REGION_COUNT_MASK) >> \
+ SMPT_MAP_REGION_COUNT_SHIFT) + 1)
+
+#define SMPT_MAP_ID_MASK GENMASK(15, 8)
+#define SMPT_MAP_ID_SHIFT 8
+#define SMPT_MAP_ID(_header) \
+ (((_header) & SMPT_MAP_ID_MASK) >> SMPT_MAP_ID_SHIFT)
+
+#define SMPT_MAP_REGION_SIZE_MASK GENMASK(31, 8)
+#define SMPT_MAP_REGION_SIZE_SHIFT 8
+#define SMPT_MAP_REGION_SIZE(_region) \
+ (((((_region) & SMPT_MAP_REGION_SIZE_MASK) >> \
+ SMPT_MAP_REGION_SIZE_SHIFT) + 1) * 256)
+
+#define SMPT_MAP_REGION_ERASE_TYPE_MASK GENMASK(3, 0)
+#define SMPT_MAP_REGION_ERASE_TYPE(_region) \
+ ((_region) & SMPT_MAP_REGION_ERASE_TYPE_MASK)
+
+#define SMPT_DESC_TYPE_MAP BIT(1)
+#define SMPT_DESC_END BIT(0)
+
+#define SFDP_4BAIT_DWORD_MAX 2
+
+struct sfdp_4bait {
+ /* The hardware capability. */
+ u32 hwcaps;
+
+ /*
+ * The <supported_bit> bit in DWORD1 of the 4BAIT tells us whether
+ * the associated 4-byte address op code is supported.
+ */
+ u32 supported_bit;
+};
+
+/**
+ * spi_nor_read_raw() - raw read of serial flash memory. read_opcode,
+ * addr_nbytes and read_dummy members of the struct spi_nor
+ * should be previously set.
+ * @nor: pointer to a 'struct spi_nor'
+ * @addr: offset in the serial flash memory
+ * @len: number of bytes to read
+ * @buf: buffer where the data is copied into (dma-safe memory)
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf)
+{
+ ssize_t ret;
+
+ while (len) {
+ ret = spi_nor_read_data(nor, addr, len, buf);
+ if (ret < 0)
+ return ret;
+ if (!ret || ret > len)
+ return -EIO;
+
+ buf += ret;
+ addr += ret;
+ len -= ret;
+ }
+ return 0;
+}
+
+/**
+ * spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters.
+ * @nor: pointer to a 'struct spi_nor'
+ * @addr: offset in the SFDP area to start reading data from
+ * @len: number of bytes to read
+ * @buf: buffer where the SFDP data are copied into (dma-safe memory)
+ *
+ * Whatever the actual numbers of bytes for address and dummy cycles are
+ * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always
+ * followed by a 3-byte address and 8 dummy clock cycles.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
+ size_t len, void *buf)
+{
+ u8 addr_nbytes, read_opcode, read_dummy;
+ int ret;
+
+ read_opcode = nor->read_opcode;
+ addr_nbytes = nor->addr_nbytes;
+ read_dummy = nor->read_dummy;
+
+ nor->read_opcode = SPINOR_OP_RDSFDP;
+ nor->addr_nbytes = 3;
+ nor->read_dummy = 8;
+
+ ret = spi_nor_read_raw(nor, addr, len, buf);
+
+ nor->read_opcode = read_opcode;
+ nor->addr_nbytes = addr_nbytes;
+ nor->read_dummy = read_dummy;
+
+ return ret;
+}
+
+/**
+ * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters.
+ * @nor: pointer to a 'struct spi_nor'
+ * @addr: offset in the SFDP area to start reading data from
+ * @len: number of bytes to read
+ * @buf: buffer where the SFDP data are copied into
+ *
+ * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not
+ * guaranteed to be dma-safe.
+ *
+ * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp()
+ * otherwise.
+ */
+static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr,
+ size_t len, void *buf)
+{
+ void *dma_safe_buf;
+ int ret;
+
+ dma_safe_buf = kmalloc(len, GFP_KERNEL);
+ if (!dma_safe_buf)
+ return -ENOMEM;
+
+ ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf);
+ memcpy(buf, dma_safe_buf, len);
+ kfree(dma_safe_buf);
+
+ return ret;
+}
+
+static void
+spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read,
+ u16 half,
+ enum spi_nor_protocol proto)
+{
+ read->num_mode_clocks = (half >> 5) & 0x07;
+ read->num_wait_states = (half >> 0) & 0x1f;
+ read->opcode = (half >> 8) & 0xff;
+ read->proto = proto;
+}
+
+static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = {
+ /* Fast Read 1-1-2 */
+ {
+ SNOR_HWCAPS_READ_1_1_2,
+ SFDP_DWORD(1), BIT(16), /* Supported bit */
+ SFDP_DWORD(4), 0, /* Settings */
+ SNOR_PROTO_1_1_2,
+ },
+
+ /* Fast Read 1-2-2 */
+ {
+ SNOR_HWCAPS_READ_1_2_2,
+ SFDP_DWORD(1), BIT(20), /* Supported bit */
+ SFDP_DWORD(4), 16, /* Settings */
+ SNOR_PROTO_1_2_2,
+ },
+
+ /* Fast Read 2-2-2 */
+ {
+ SNOR_HWCAPS_READ_2_2_2,
+ SFDP_DWORD(5), BIT(0), /* Supported bit */
+ SFDP_DWORD(6), 16, /* Settings */
+ SNOR_PROTO_2_2_2,
+ },
+
+ /* Fast Read 1-1-4 */
+ {
+ SNOR_HWCAPS_READ_1_1_4,
+ SFDP_DWORD(1), BIT(22), /* Supported bit */
+ SFDP_DWORD(3), 16, /* Settings */
+ SNOR_PROTO_1_1_4,
+ },
+
+ /* Fast Read 1-4-4 */
+ {
+ SNOR_HWCAPS_READ_1_4_4,
+ SFDP_DWORD(1), BIT(21), /* Supported bit */
+ SFDP_DWORD(3), 0, /* Settings */
+ SNOR_PROTO_1_4_4,
+ },
+
+ /* Fast Read 4-4-4 */
+ {
+ SNOR_HWCAPS_READ_4_4_4,
+ SFDP_DWORD(5), BIT(4), /* Supported bit */
+ SFDP_DWORD(7), 16, /* Settings */
+ SNOR_PROTO_4_4_4,
+ },
+};
+
+static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = {
+ /* Erase Type 1 in DWORD8 bits[15:0] */
+ {SFDP_DWORD(8), 0},
+
+ /* Erase Type 2 in DWORD8 bits[31:16] */
+ {SFDP_DWORD(8), 16},
+
+ /* Erase Type 3 in DWORD9 bits[15:0] */
+ {SFDP_DWORD(9), 0},
+
+ /* Erase Type 4 in DWORD9 bits[31:16] */
+ {SFDP_DWORD(9), 16},
+};
+
+/**
+ * spi_nor_set_erase_settings_from_bfpt() - set erase type settings from BFPT
+ * @erase: pointer to a structure that describes a SPI NOR erase type
+ * @size: the size of the sector/block erased by the erase type
+ * @opcode: the SPI command op code to erase the sector/block
+ * @i: erase type index as sorted in the Basic Flash Parameter Table
+ *
+ * The supported Erase Types will be sorted at init in ascending order, with
+ * the smallest Erase Type size being the first member in the erase_type array
+ * of the spi_nor_erase_map structure. Save the Erase Type index as sorted in
+ * the Basic Flash Parameter Table since it will be used later on to
+ * synchronize with the supported Erase Types defined in SFDP optional tables.
+ */
+static void
+spi_nor_set_erase_settings_from_bfpt(struct spi_nor_erase_type *erase,
+ u32 size, u8 opcode, u8 i)
+{
+ erase->idx = i;
+ spi_nor_set_erase_type(erase, size, opcode);
+}
+
+/**
+ * spi_nor_map_cmp_erase_type() - compare the map's erase types by size
+ * @l: member in the left half of the map's erase_type array
+ * @r: member in the right half of the map's erase_type array
+ *
+ * Comparison function used in the sort() call to sort in ascending order the
+ * map's erase types, the smallest erase type size being the first member in the
+ * sorted erase_type array.
+ *
+ * Return: the result of @l->size - @r->size
+ */
+static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
+{
+ const struct spi_nor_erase_type *left = l, *right = r;
+
+ return left->size - right->size;
+}
+
+/**
+ * spi_nor_sort_erase_mask() - sort erase mask
+ * @map: the erase map of the SPI NOR
+ * @erase_mask: the erase type mask to be sorted
+ *
+ * Replicate the sort done for the map's erase types in BFPT: sort the erase
+ * mask in ascending order with the smallest erase type size starting from
+ * BIT(0) in the sorted erase mask.
+ *
+ * Return: sorted erase mask.
+ */
+static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
+{
+ struct spi_nor_erase_type *erase_type = map->erase_type;
+ int i;
+ u8 sorted_erase_mask = 0;
+
+ if (!erase_mask)
+ return 0;
+
+ /* Replicate the sort done for the map's erase types. */
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
+ if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx))
+ sorted_erase_mask |= BIT(i);
+
+ return sorted_erase_mask;
+}
+
+/**
+ * spi_nor_regions_sort_erase_types() - sort erase types in each region
+ * @map: the erase map of the SPI NOR
+ *
+ * Function assumes that the erase types defined in the erase map are already
+ * sorted in ascending order, with the smallest erase type size being the first
+ * member in the erase_type array. It replicates the sort done for the map's
+ * erase types. Each region's erase bitmask will indicate which erase types are
+ * supported from the sorted erase types defined in the erase map.
+ * Sort the all region's erase type at init in order to speed up the process of
+ * finding the best erase command at runtime.
+ */
+static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
+{
+ struct spi_nor_erase_region *region = map->regions;
+ u8 region_erase_mask, sorted_erase_mask;
+
+ while (region) {
+ region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
+
+ sorted_erase_mask = spi_nor_sort_erase_mask(map,
+ region_erase_mask);
+
+ /* Overwrite erase mask. */
+ region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
+ sorted_erase_mask;
+
+ region = spi_nor_region_next(region);
+ }
+}
+
+/**
+ * spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table.
+ * @nor: pointer to a 'struct spi_nor'
+ * @bfpt_header: pointer to the 'struct sfdp_parameter_header' describing
+ * the Basic Flash Parameter Table length and version
+ *
+ * The Basic Flash Parameter Table is the main and only mandatory table as
+ * defined by the SFDP (JESD216) specification.
+ * It provides us with the total size (memory density) of the data array and
+ * the number of address bytes for Fast Read, Page Program and Sector Erase
+ * commands.
+ * For Fast READ commands, it also gives the number of mode clock cycles and
+ * wait states (regrouped in the number of dummy clock cycles) for each
+ * supported instruction op code.
+ * For Page Program, the page size is now available since JESD216 rev A, however
+ * the supported instruction op codes are still not provided.
+ * For Sector Erase commands, this table stores the supported instruction op
+ * codes and the associated sector sizes.
+ * Finally, the Quad Enable Requirements (QER) are also available since JESD216
+ * rev A. The QER bits encode the manufacturer dependent procedure to be
+ * executed to set the Quad Enable (QE) bit in some internal register of the
+ * Quad SPI memory. Indeed the QE bit, when it exists, must be set before
+ * sending any Quad SPI command to the memory. Actually, setting the QE bit
+ * tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2
+ * and IO3 hence enabling 4 (Quad) I/O lines.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_bfpt(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+ struct spi_nor_erase_map *map = &params->erase_map;
+ struct spi_nor_erase_type *erase_type = map->erase_type;
+ struct sfdp_bfpt bfpt;
+ size_t len;
+ int i, cmd, err;
+ u32 addr, val;
+ u32 dword;
+ u16 half;
+ u8 erase_mask;
+
+ /* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */
+ if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
+ return -EINVAL;
+
+ /* Read the Basic Flash Parameter Table. */
+ len = min_t(size_t, sizeof(bfpt),
+ bfpt_header->length * sizeof(u32));
+ addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
+ memset(&bfpt, 0, sizeof(bfpt));
+ err = spi_nor_read_sfdp_dma_unsafe(nor, addr, len, &bfpt);
+ if (err < 0)
+ return err;
+
+ /* Fix endianness of the BFPT DWORDs. */
+ le32_to_cpu_array(bfpt.dwords, BFPT_DWORD_MAX);
+
+ /* Number of address bytes. */
+ switch (bfpt.dwords[SFDP_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
+ case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
+ case BFPT_DWORD1_ADDRESS_BYTES_3_OR_4:
+ params->addr_nbytes = 3;
+ params->addr_mode_nbytes = 3;
+ break;
+
+ case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
+ params->addr_nbytes = 4;
+ params->addr_mode_nbytes = 4;
+ break;
+
+ default:
+ break;
+ }
+
+ /* Flash Memory Density (in bits). */
+ val = bfpt.dwords[SFDP_DWORD(2)];
+ if (val & BIT(31)) {
+ val &= ~BIT(31);
+
+ /*
+ * Prevent overflows on params->size. Anyway, a NOR of 2^64
+ * bits is unlikely to exist so this error probably means
+ * the BFPT we are reading is corrupted/wrong.
+ */
+ if (val > 63)
+ return -EINVAL;
+
+ params->size = 1ULL << val;
+ } else {
+ params->size = val + 1;
+ }
+ params->size >>= 3; /* Convert to bytes. */
+
+ /* Fast Read settings. */
+ for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) {
+ const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i];
+ struct spi_nor_read_command *read;
+
+ if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) {
+ params->hwcaps.mask &= ~rd->hwcaps;
+ continue;
+ }
+
+ params->hwcaps.mask |= rd->hwcaps;
+ cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps);
+ read = &params->reads[cmd];
+ half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift;
+ spi_nor_set_read_settings_from_bfpt(read, half, rd->proto);
+ }
+
+ /*
+ * Sector Erase settings. Reinitialize the uniform erase map using the
+ * Erase Types defined in the bfpt table.
+ */
+ erase_mask = 0;
+ memset(&params->erase_map, 0, sizeof(params->erase_map));
+ for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) {
+ const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i];
+ u32 erasesize;
+ u8 opcode;
+
+ half = bfpt.dwords[er->dword] >> er->shift;
+ erasesize = half & 0xff;
+
+ /* erasesize == 0 means this Erase Type is not supported. */
+ if (!erasesize)
+ continue;
+
+ erasesize = 1U << erasesize;
+ opcode = (half >> 8) & 0xff;
+ erase_mask |= BIT(i);
+ spi_nor_set_erase_settings_from_bfpt(&erase_type[i], erasesize,
+ opcode, i);
+ }
+ spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
+ /*
+ * Sort all the map's Erase Types in ascending order with the smallest
+ * erase size being the first member in the erase_type array.
+ */
+ sort(erase_type, SNOR_ERASE_TYPE_MAX, sizeof(erase_type[0]),
+ spi_nor_map_cmp_erase_type, NULL);
+ /*
+ * Sort the erase types in the uniform region in order to update the
+ * uniform_erase_type bitmask. The bitmask will be used later on when
+ * selecting the uniform erase.
+ */
+ spi_nor_regions_sort_erase_types(map);
+ map->uniform_erase_type = map->uniform_region.offset &
+ SNOR_ERASE_TYPE_MASK;
+
+ /* Stop here if not JESD216 rev A or later. */
+ if (bfpt_header->length == BFPT_DWORD_MAX_JESD216)
+ return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt);
+
+ /* Page size: this field specifies 'N' so the page size = 2^N bytes. */
+ val = bfpt.dwords[SFDP_DWORD(11)];
+ val &= BFPT_DWORD11_PAGE_SIZE_MASK;
+ val >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
+ params->page_size = 1U << val;
+
+ /* Quad Enable Requirements. */
+ switch (bfpt.dwords[SFDP_DWORD(15)] & BFPT_DWORD15_QER_MASK) {
+ case BFPT_DWORD15_QER_NONE:
+ params->quad_enable = NULL;
+ break;
+
+ case BFPT_DWORD15_QER_SR2_BIT1_BUGGY:
+ /*
+ * Writing only one byte to the Status Register has the
+ * side-effect of clearing Status Register 2.
+ */
+ case BFPT_DWORD15_QER_SR2_BIT1_NO_RD:
+ /*
+ * Read Configuration Register (35h) instruction is not
+ * supported.
+ */
+ nor->flags |= SNOR_F_HAS_16BIT_SR | SNOR_F_NO_READ_CR;
+ params->quad_enable = spi_nor_sr2_bit1_quad_enable;
+ break;
+
+ case BFPT_DWORD15_QER_SR1_BIT6:
+ nor->flags &= ~SNOR_F_HAS_16BIT_SR;
+ params->quad_enable = spi_nor_sr1_bit6_quad_enable;
+ break;
+
+ case BFPT_DWORD15_QER_SR2_BIT7:
+ nor->flags &= ~SNOR_F_HAS_16BIT_SR;
+ params->quad_enable = spi_nor_sr2_bit7_quad_enable;
+ break;
+
+ case BFPT_DWORD15_QER_SR2_BIT1:
+ /*
+ * JESD216 rev B or later does not specify if writing only one
+ * byte to the Status Register clears or not the Status
+ * Register 2, so let's be cautious and keep the default
+ * assumption of a 16-bit Write Status (01h) command.
+ */
+ nor->flags |= SNOR_F_HAS_16BIT_SR;
+
+ params->quad_enable = spi_nor_sr2_bit1_quad_enable;
+ break;
+
+ default:
+ dev_dbg(nor->dev, "BFPT QER reserved value used\n");
+ break;
+ }
+
+ dword = bfpt.dwords[SFDP_DWORD(16)] & BFPT_DWORD16_4B_ADDR_MODE_MASK;
+ if (SFDP_MASK_CHECK(dword, BFPT_DWORD16_4B_ADDR_MODE_BRWR))
+ params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_brwr;
+ else if (SFDP_MASK_CHECK(dword, BFPT_DWORD16_4B_ADDR_MODE_WREN_EN4B_EX4B))
+ params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_wren_en4b_ex4b;
+ else if (SFDP_MASK_CHECK(dword, BFPT_DWORD16_4B_ADDR_MODE_EN4B_EX4B))
+ params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_en4b_ex4b;
+ else
+ dev_dbg(nor->dev, "BFPT: 4-Byte Address Mode method is not recognized or not implemented\n");
+
+ /* Soft Reset support. */
+ if (bfpt.dwords[SFDP_DWORD(16)] & BFPT_DWORD16_SWRST_EN_RST)
+ nor->flags |= SNOR_F_SOFT_RESET;
+
+ /* Stop here if not JESD216 rev C or later. */
+ if (bfpt_header->length == BFPT_DWORD_MAX_JESD216B)
+ return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt);
+
+ /* 8D-8D-8D command extension. */
+ switch (bfpt.dwords[SFDP_DWORD(18)] & BFPT_DWORD18_CMD_EXT_MASK) {
+ case BFPT_DWORD18_CMD_EXT_REP:
+ nor->cmd_ext_type = SPI_NOR_EXT_REPEAT;
+ break;
+
+ case BFPT_DWORD18_CMD_EXT_INV:
+ nor->cmd_ext_type = SPI_NOR_EXT_INVERT;
+ break;
+
+ case BFPT_DWORD18_CMD_EXT_RES:
+ dev_dbg(nor->dev, "Reserved command extension used\n");
+ break;
+
+ case BFPT_DWORD18_CMD_EXT_16B:
+ dev_dbg(nor->dev, "16-bit opcodes not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt);
+}
+
+/**
+ * spi_nor_smpt_addr_nbytes() - return the number of address bytes used in the
+ * configuration detection command.
+ * @nor: pointer to a 'struct spi_nor'
+ * @settings: configuration detection command descriptor, dword1
+ */
+static u8 spi_nor_smpt_addr_nbytes(const struct spi_nor *nor, const u32 settings)
+{
+ switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) {
+ case SMPT_CMD_ADDRESS_LEN_0:
+ return 0;
+ case SMPT_CMD_ADDRESS_LEN_3:
+ return 3;
+ case SMPT_CMD_ADDRESS_LEN_4:
+ return 4;
+ case SMPT_CMD_ADDRESS_LEN_USE_CURRENT:
+ default:
+ return nor->params->addr_mode_nbytes;
+ }
+}
+
+/**
+ * spi_nor_smpt_read_dummy() - return the configuration detection command read
+ * latency, in clock cycles.
+ * @nor: pointer to a 'struct spi_nor'
+ * @settings: configuration detection command descriptor, dword1
+ *
+ * Return: the number of dummy cycles for an SMPT read
+ */
+static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
+{
+ u8 read_dummy = SMPT_CMD_READ_DUMMY(settings);
+
+ if (read_dummy == SMPT_CMD_READ_DUMMY_IS_VARIABLE)
+ return nor->read_dummy;
+ return read_dummy;
+}
+
+/**
+ * spi_nor_get_map_in_use() - get the configuration map in use
+ * @nor: pointer to a 'struct spi_nor'
+ * @smpt: pointer to the sector map parameter table
+ * @smpt_len: sector map parameter table length
+ *
+ * Return: pointer to the map in use, ERR_PTR(-errno) otherwise.
+ */
+static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
+ u8 smpt_len)
+{
+ const u32 *ret;
+ u8 *buf;
+ u32 addr;
+ int err;
+ u8 i;
+ u8 addr_nbytes, read_opcode, read_dummy;
+ u8 read_data_mask, map_id;
+
+ /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
+ buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ addr_nbytes = nor->addr_nbytes;
+ read_dummy = nor->read_dummy;
+ read_opcode = nor->read_opcode;
+
+ map_id = 0;
+ /* Determine if there are any optional Detection Command Descriptors */
+ for (i = 0; i < smpt_len; i += 2) {
+ if (smpt[i] & SMPT_DESC_TYPE_MAP)
+ break;
+
+ read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
+ nor->addr_nbytes = spi_nor_smpt_addr_nbytes(nor, smpt[i]);
+ nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
+ nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
+ addr = smpt[i + 1];
+
+ err = spi_nor_read_raw(nor, addr, 1, buf);
+ if (err) {
+ ret = ERR_PTR(err);
+ goto out;
+ }
+
+ /*
+ * Build an index value that is used to select the Sector Map
+ * Configuration that is currently in use.
+ */
+ map_id = map_id << 1 | !!(*buf & read_data_mask);
+ }
+
+ /*
+ * If command descriptors are provided, they always precede map
+ * descriptors in the table. There is no need to start the iteration
+ * over smpt array all over again.
+ *
+ * Find the matching configuration map.
+ */
+ ret = ERR_PTR(-EINVAL);
+ while (i < smpt_len) {
+ if (SMPT_MAP_ID(smpt[i]) == map_id) {
+ ret = smpt + i;
+ break;
+ }
+
+ /*
+ * If there are no more configuration map descriptors and no
+ * configuration ID matched the configuration identifier, the
+ * sector address map is unknown.
+ */
+ if (smpt[i] & SMPT_DESC_END)
+ break;
+
+ /* increment the table index to the next map */
+ i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1;
+ }
+
+ /* fall through */
+out:
+ kfree(buf);
+ nor->addr_nbytes = addr_nbytes;
+ nor->read_dummy = read_dummy;
+ nor->read_opcode = read_opcode;
+ return ret;
+}
+
+static void spi_nor_region_mark_end(struct spi_nor_erase_region *region)
+{
+ region->offset |= SNOR_LAST_REGION;
+}
+
+static void spi_nor_region_mark_overlay(struct spi_nor_erase_region *region)
+{
+ region->offset |= SNOR_OVERLAID_REGION;
+}
+
+/**
+ * spi_nor_region_check_overlay() - set overlay bit when the region is overlaid
+ * @region: pointer to a structure that describes a SPI NOR erase region
+ * @erase: pointer to a structure that describes a SPI NOR erase type
+ * @erase_type: erase type bitmask
+ */
+static void
+spi_nor_region_check_overlay(struct spi_nor_erase_region *region,
+ const struct spi_nor_erase_type *erase,
+ const u8 erase_type)
+{
+ int i;
+
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ if (!(erase[i].size && erase_type & BIT(erase[i].idx)))
+ continue;
+ if (region->size & erase[i].size_mask) {
+ spi_nor_region_mark_overlay(region);
+ return;
+ }
+ }
+}
+
+/**
+ * spi_nor_init_non_uniform_erase_map() - initialize the non-uniform erase map
+ * @nor: pointer to a 'struct spi_nor'
+ * @smpt: pointer to the sector map parameter table
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
+ const u32 *smpt)
+{
+ struct spi_nor_erase_map *map = &nor->params->erase_map;
+ struct spi_nor_erase_type *erase = map->erase_type;
+ struct spi_nor_erase_region *region;
+ u64 offset;
+ u32 region_count;
+ int i, j;
+ u8 uniform_erase_type, save_uniform_erase_type;
+ u8 erase_type, regions_erase_type;
+
+ region_count = SMPT_MAP_REGION_COUNT(*smpt);
+ /*
+ * The regions will be freed when the driver detaches from the
+ * device.
+ */
+ region = devm_kcalloc(nor->dev, region_count, sizeof(*region),
+ GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+ map->regions = region;
+
+ uniform_erase_type = 0xff;
+ regions_erase_type = 0;
+ offset = 0;
+ /* Populate regions. */
+ for (i = 0; i < region_count; i++) {
+ j = i + 1; /* index for the region dword */
+ region[i].size = SMPT_MAP_REGION_SIZE(smpt[j]);
+ erase_type = SMPT_MAP_REGION_ERASE_TYPE(smpt[j]);
+ region[i].offset = offset | erase_type;
+
+ spi_nor_region_check_overlay(&region[i], erase, erase_type);
+
+ /*
+ * Save the erase types that are supported in all regions and
+ * can erase the entire flash memory.
+ */
+ uniform_erase_type &= erase_type;
+
+ /*
+ * regions_erase_type mask will indicate all the erase types
+ * supported in this configuration map.
+ */
+ regions_erase_type |= erase_type;
+
+ offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
+ region[i].size;
+ }
+ spi_nor_region_mark_end(&region[i - 1]);
+
+ save_uniform_erase_type = map->uniform_erase_type;
+ map->uniform_erase_type = spi_nor_sort_erase_mask(map,
+ uniform_erase_type);
+
+ if (!regions_erase_type) {
+ /*
+ * Roll back to the previous uniform_erase_type mask, SMPT is
+ * broken.
+ */
+ map->uniform_erase_type = save_uniform_erase_type;
+ return -EINVAL;
+ }
+
+ /*
+ * BFPT advertises all the erase types supported by all the possible
+ * map configurations. Mask out the erase types that are not supported
+ * by the current map configuration.
+ */
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
+ if (!(regions_erase_type & BIT(erase[i].idx)))
+ spi_nor_mask_erase_type(&erase[i]);
+
+ return 0;
+}
+
+/**
+ * spi_nor_parse_smpt() - parse Sector Map Parameter Table
+ * @nor: pointer to a 'struct spi_nor'
+ * @smpt_header: sector map parameter table header
+ *
+ * This table is optional, but when available, we parse it to identify the
+ * location and size of sectors within the main data array of the flash memory
+ * device and to identify which Erase Types are supported by each sector.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_smpt(struct spi_nor *nor,
+ const struct sfdp_parameter_header *smpt_header)
+{
+ const u32 *sector_map;
+ u32 *smpt;
+ size_t len;
+ u32 addr;
+ int ret;
+
+ /* Read the Sector Map Parameter Table. */
+ len = smpt_header->length * sizeof(*smpt);
+ smpt = kmalloc(len, GFP_KERNEL);
+ if (!smpt)
+ return -ENOMEM;
+
+ addr = SFDP_PARAM_HEADER_PTP(smpt_header);
+ ret = spi_nor_read_sfdp(nor, addr, len, smpt);
+ if (ret)
+ goto out;
+
+ /* Fix endianness of the SMPT DWORDs. */
+ le32_to_cpu_array(smpt, smpt_header->length);
+
+ sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length);
+ if (IS_ERR(sector_map)) {
+ ret = PTR_ERR(sector_map);
+ goto out;
+ }
+
+ ret = spi_nor_init_non_uniform_erase_map(nor, sector_map);
+ if (ret)
+ goto out;
+
+ spi_nor_regions_sort_erase_types(&nor->params->erase_map);
+ /* fall through */
+out:
+ kfree(smpt);
+ return ret;
+}
+
+/**
+ * spi_nor_parse_4bait() - parse the 4-Byte Address Instruction Table
+ * @nor: pointer to a 'struct spi_nor'.
+ * @param_header: pointer to the 'struct sfdp_parameter_header' describing
+ * the 4-Byte Address Instruction Table length and version.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_4bait(struct spi_nor *nor,
+ const struct sfdp_parameter_header *param_header)
+{
+ static const struct sfdp_4bait reads[] = {
+ { SNOR_HWCAPS_READ, BIT(0) },
+ { SNOR_HWCAPS_READ_FAST, BIT(1) },
+ { SNOR_HWCAPS_READ_1_1_2, BIT(2) },
+ { SNOR_HWCAPS_READ_1_2_2, BIT(3) },
+ { SNOR_HWCAPS_READ_1_1_4, BIT(4) },
+ { SNOR_HWCAPS_READ_1_4_4, BIT(5) },
+ { SNOR_HWCAPS_READ_1_1_1_DTR, BIT(13) },
+ { SNOR_HWCAPS_READ_1_2_2_DTR, BIT(14) },
+ { SNOR_HWCAPS_READ_1_4_4_DTR, BIT(15) },
+ };
+ static const struct sfdp_4bait programs[] = {
+ { SNOR_HWCAPS_PP, BIT(6) },
+ { SNOR_HWCAPS_PP_1_1_4, BIT(7) },
+ { SNOR_HWCAPS_PP_1_4_4, BIT(8) },
+ };
+ static const struct sfdp_4bait erases[SNOR_ERASE_TYPE_MAX] = {
+ { 0u /* not used */, BIT(9) },
+ { 0u /* not used */, BIT(10) },
+ { 0u /* not used */, BIT(11) },
+ { 0u /* not used */, BIT(12) },
+ };
+ struct spi_nor_flash_parameter *params = nor->params;
+ struct spi_nor_pp_command *params_pp = params->page_programs;
+ struct spi_nor_erase_map *map = &params->erase_map;
+ struct spi_nor_erase_type *erase_type = map->erase_type;
+ u32 *dwords;
+ size_t len;
+ u32 addr, discard_hwcaps, read_hwcaps, pp_hwcaps, erase_mask;
+ int i, ret;
+
+ if (param_header->major != SFDP_JESD216_MAJOR ||
+ param_header->length < SFDP_4BAIT_DWORD_MAX)
+ return -EINVAL;
+
+ /* Read the 4-byte Address Instruction Table. */
+ len = sizeof(*dwords) * SFDP_4BAIT_DWORD_MAX;
+
+ /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
+ dwords = kmalloc(len, GFP_KERNEL);
+ if (!dwords)
+ return -ENOMEM;
+
+ addr = SFDP_PARAM_HEADER_PTP(param_header);
+ ret = spi_nor_read_sfdp(nor, addr, len, dwords);
+ if (ret)
+ goto out;
+
+ /* Fix endianness of the 4BAIT DWORDs. */
+ le32_to_cpu_array(dwords, SFDP_4BAIT_DWORD_MAX);
+
+ /*
+ * Compute the subset of (Fast) Read commands for which the 4-byte
+ * version is supported.
+ */
+ discard_hwcaps = 0;
+ read_hwcaps = 0;
+ for (i = 0; i < ARRAY_SIZE(reads); i++) {
+ const struct sfdp_4bait *read = &reads[i];
+
+ discard_hwcaps |= read->hwcaps;
+ if ((params->hwcaps.mask & read->hwcaps) &&
+ (dwords[SFDP_DWORD(1)] & read->supported_bit))
+ read_hwcaps |= read->hwcaps;
+ }
+
+ /*
+ * Compute the subset of Page Program commands for which the 4-byte
+ * version is supported.
+ */
+ pp_hwcaps = 0;
+ for (i = 0; i < ARRAY_SIZE(programs); i++) {
+ const struct sfdp_4bait *program = &programs[i];
+
+ /*
+ * The 4 Byte Address Instruction (Optional) Table is the only
+ * SFDP table that indicates support for Page Program Commands.
+ * Bypass the params->hwcaps.mask and consider 4BAIT the biggest
+ * authority for specifying Page Program support.
+ */
+ discard_hwcaps |= program->hwcaps;
+ if (dwords[SFDP_DWORD(1)] & program->supported_bit)
+ pp_hwcaps |= program->hwcaps;
+ }
+
+ /*
+ * Compute the subset of Sector Erase commands for which the 4-byte
+ * version is supported.
+ */
+ erase_mask = 0;
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ const struct sfdp_4bait *erase = &erases[i];
+
+ if (dwords[SFDP_DWORD(1)] & erase->supported_bit)
+ erase_mask |= BIT(i);
+ }
+
+ /* Replicate the sort done for the map's erase types in BFPT. */
+ erase_mask = spi_nor_sort_erase_mask(map, erase_mask);
+
+ /*
+ * We need at least one 4-byte op code per read, program and erase
+ * operation; the .read(), .write() and .erase() hooks share the
+ * nor->addr_nbytes value.
+ */
+ if (!read_hwcaps || !pp_hwcaps || !erase_mask)
+ goto out;
+
+ /*
+ * Discard all operations from the 4-byte instruction set which are
+ * not supported by this memory.
+ */
+ params->hwcaps.mask &= ~discard_hwcaps;
+ params->hwcaps.mask |= (read_hwcaps | pp_hwcaps);
+
+ /* Use the 4-byte address instruction set. */
+ for (i = 0; i < SNOR_CMD_READ_MAX; i++) {
+ struct spi_nor_read_command *read_cmd = &params->reads[i];
+
+ read_cmd->opcode = spi_nor_convert_3to4_read(read_cmd->opcode);
+ }
+
+ /* 4BAIT is the only SFDP table that indicates page program support. */
+ if (pp_hwcaps & SNOR_HWCAPS_PP) {
+ spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP],
+ SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
+ /*
+ * Since xSPI Page Program opcode is backward compatible with
+ * Legacy SPI, use Legacy SPI opcode there as well.
+ */
+ spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_8_8_8_DTR],
+ SPINOR_OP_PP_4B, SNOR_PROTO_8_8_8_DTR);
+ }
+ if (pp_hwcaps & SNOR_HWCAPS_PP_1_1_4)
+ spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_1_4],
+ SPINOR_OP_PP_1_1_4_4B,
+ SNOR_PROTO_1_1_4);
+ if (pp_hwcaps & SNOR_HWCAPS_PP_1_4_4)
+ spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_4_4],
+ SPINOR_OP_PP_1_4_4_4B,
+ SNOR_PROTO_1_4_4);
+
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ if (erase_mask & BIT(i))
+ erase_type[i].opcode = (dwords[SFDP_DWORD(2)] >>
+ erase_type[i].idx * 8) & 0xFF;
+ else
+ spi_nor_mask_erase_type(&erase_type[i]);
+ }
+
+ /*
+ * We set SNOR_F_HAS_4BAIT in order to skip spi_nor_set_4byte_opcodes()
+ * later because we already did the conversion to 4byte opcodes. Also,
+ * this latest function implements a legacy quirk for the erase size of
+ * Spansion memory. However this quirk is no longer needed with new
+ * SFDP compliant memories.
+ */
+ params->addr_nbytes = 4;
+ nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT;
+
+ /* fall through */
+out:
+ kfree(dwords);
+ return ret;
+}
+
+#define PROFILE1_DWORD1_RDSR_ADDR_BYTES BIT(29)
+#define PROFILE1_DWORD1_RDSR_DUMMY BIT(28)
+#define PROFILE1_DWORD1_RD_FAST_CMD GENMASK(15, 8)
+#define PROFILE1_DWORD4_DUMMY_200MHZ GENMASK(11, 7)
+#define PROFILE1_DWORD5_DUMMY_166MHZ GENMASK(31, 27)
+#define PROFILE1_DWORD5_DUMMY_133MHZ GENMASK(21, 17)
+#define PROFILE1_DWORD5_DUMMY_100MHZ GENMASK(11, 7)
+
+/**
+ * spi_nor_parse_profile1() - parse the xSPI Profile 1.0 table
+ * @nor: pointer to a 'struct spi_nor'
+ * @profile1_header: pointer to the 'struct sfdp_parameter_header' describing
+ * the Profile 1.0 Table length and version.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_profile1(struct spi_nor *nor,
+ const struct sfdp_parameter_header *profile1_header)
+{
+ u32 *dwords, addr;
+ size_t len;
+ int ret;
+ u8 dummy, opcode;
+
+ len = profile1_header->length * sizeof(*dwords);
+ dwords = kmalloc(len, GFP_KERNEL);
+ if (!dwords)
+ return -ENOMEM;
+
+ addr = SFDP_PARAM_HEADER_PTP(profile1_header);
+ ret = spi_nor_read_sfdp(nor, addr, len, dwords);
+ if (ret)
+ goto out;
+
+ le32_to_cpu_array(dwords, profile1_header->length);
+
+ /* Get 8D-8D-8D fast read opcode and dummy cycles. */
+ opcode = FIELD_GET(PROFILE1_DWORD1_RD_FAST_CMD, dwords[SFDP_DWORD(1)]);
+
+ /* Set the Read Status Register dummy cycles and dummy address bytes. */
+ if (dwords[SFDP_DWORD(1)] & PROFILE1_DWORD1_RDSR_DUMMY)
+ nor->params->rdsr_dummy = 8;
+ else
+ nor->params->rdsr_dummy = 4;
+
+ if (dwords[SFDP_DWORD(1)] & PROFILE1_DWORD1_RDSR_ADDR_BYTES)
+ nor->params->rdsr_addr_nbytes = 4;
+ else
+ nor->params->rdsr_addr_nbytes = 0;
+
+ /*
+ * We don't know what speed the controller is running at. Find the
+ * dummy cycles for the fastest frequency the flash can run at to be
+ * sure we are never short of dummy cycles. A value of 0 means the
+ * frequency is not supported.
+ *
+ * Default to PROFILE1_DUMMY_DEFAULT if we don't find anything, and let
+ * flashes set the correct value if needed in their fixup hooks.
+ */
+ dummy = FIELD_GET(PROFILE1_DWORD4_DUMMY_200MHZ, dwords[SFDP_DWORD(4)]);
+ if (!dummy)
+ dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_166MHZ,
+ dwords[SFDP_DWORD(5)]);
+ if (!dummy)
+ dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_133MHZ,
+ dwords[SFDP_DWORD(5)]);
+ if (!dummy)
+ dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_100MHZ,
+ dwords[SFDP_DWORD(5)]);
+ if (!dummy)
+ dev_dbg(nor->dev,
+ "Can't find dummy cycles from Profile 1.0 table\n");
+
+ /* Round up to an even value to avoid tripping controllers up. */
+ dummy = round_up(dummy, 2);
+
+ /* Update the fast read settings. */
+ nor->params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR;
+ spi_nor_set_read_settings(&nor->params->reads[SNOR_CMD_READ_8_8_8_DTR],
+ 0, dummy, opcode,
+ SNOR_PROTO_8_8_8_DTR);
+
+ /*
+ * Page Program is "Required Command" in the xSPI Profile 1.0. Update
+ * the params->hwcaps.mask here.
+ */
+ nor->params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR;
+
+out:
+ kfree(dwords);
+ return ret;
+}
+
+#define SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE BIT(31)
+
+/**
+ * spi_nor_parse_sccr() - Parse the Status, Control and Configuration Register
+ * Map.
+ * @nor: pointer to a 'struct spi_nor'
+ * @sccr_header: pointer to the 'struct sfdp_parameter_header' describing
+ * the SCCR Map table length and version.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_sccr(struct spi_nor *nor,
+ const struct sfdp_parameter_header *sccr_header)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+ u32 *dwords, addr;
+ size_t len;
+ int ret;
+
+ len = sccr_header->length * sizeof(*dwords);
+ dwords = kmalloc(len, GFP_KERNEL);
+ if (!dwords)
+ return -ENOMEM;
+
+ addr = SFDP_PARAM_HEADER_PTP(sccr_header);
+ ret = spi_nor_read_sfdp(nor, addr, len, dwords);
+ if (ret)
+ goto out;
+
+ le32_to_cpu_array(dwords, sccr_header->length);
+
+ /* Address offset for volatile registers (die 0) */
+ if (!params->vreg_offset) {
+ params->vreg_offset = devm_kmalloc(nor->dev, sizeof(*dwords),
+ GFP_KERNEL);
+ if (!params->vreg_offset) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+ params->vreg_offset[0] = dwords[SFDP_DWORD(1)];
+ params->n_dice = 1;
+
+ if (FIELD_GET(SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE,
+ dwords[SFDP_DWORD(22)]))
+ nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE;
+
+out:
+ kfree(dwords);
+ return ret;
+}
+
+/**
+ * spi_nor_parse_sccr_mc() - Parse the Status, Control and Configuration
+ * Register Map Offsets for Multi-Chip SPI Memory
+ * Devices.
+ * @nor: pointer to a 'struct spi_nor'
+ * @sccr_mc_header: pointer to the 'struct sfdp_parameter_header' describing
+ * the SCCR Map offsets table length and version.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_sccr_mc(struct spi_nor *nor,
+ const struct sfdp_parameter_header *sccr_mc_header)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+ u32 *dwords, addr;
+ u8 i, n_dice;
+ size_t len;
+ int ret;
+
+ len = sccr_mc_header->length * sizeof(*dwords);
+ dwords = kmalloc(len, GFP_KERNEL);
+ if (!dwords)
+ return -ENOMEM;
+
+ addr = SFDP_PARAM_HEADER_PTP(sccr_mc_header);
+ ret = spi_nor_read_sfdp(nor, addr, len, dwords);
+ if (ret)
+ goto out;
+
+ le32_to_cpu_array(dwords, sccr_mc_header->length);
+
+ /*
+ * Pair of DOWRDs (volatile and non-volatile register offsets) per
+ * additional die. Hence, length = 2 * (number of additional dice).
+ */
+ n_dice = 1 + sccr_mc_header->length / 2;
+
+ /* Address offset for volatile registers of additional dice */
+ params->vreg_offset =
+ devm_krealloc(nor->dev, params->vreg_offset,
+ n_dice * sizeof(*dwords),
+ GFP_KERNEL);
+ if (!params->vreg_offset) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 1; i < n_dice; i++)
+ params->vreg_offset[i] = dwords[SFDP_DWORD(i) * 2];
+
+ params->n_dice = n_dice;
+
+out:
+ kfree(dwords);
+ return ret;
+}
+
+/**
+ * spi_nor_post_sfdp_fixups() - Updates the flash's parameters and settings
+ * after SFDP has been parsed. Called only for flashes that define JESD216 SFDP
+ * tables.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Used to tweak various flash parameters when information provided by the SFDP
+ * tables are wrong.
+ */
+static int spi_nor_post_sfdp_fixups(struct spi_nor *nor)
+{
+ int ret;
+
+ if (nor->manufacturer && nor->manufacturer->fixups &&
+ nor->manufacturer->fixups->post_sfdp) {
+ ret = nor->manufacturer->fixups->post_sfdp(nor);
+ if (ret)
+ return ret;
+ }
+
+ if (nor->info->fixups && nor->info->fixups->post_sfdp)
+ return nor->info->fixups->post_sfdp(nor);
+
+ return 0;
+}
+
+/**
+ * spi_nor_check_sfdp_signature() - check for a valid SFDP signature
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Used to detect if the flash supports the RDSFDP command as well as the
+ * presence of a valid SFDP table.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_check_sfdp_signature(struct spi_nor *nor)
+{
+ u32 signature;
+ int err;
+
+ /* Get the SFDP header. */
+ err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(signature),
+ &signature);
+ if (err < 0)
+ return err;
+
+ /* Check the SFDP signature. */
+ if (le32_to_cpu(signature) != SFDP_SIGNATURE)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * The Serial Flash Discoverable Parameters are described by the JEDEC JESD216
+ * specification. This is a standard which tends to supported by almost all
+ * (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at
+ * runtime the main parameters needed to perform basic SPI flash operations such
+ * as Fast Read, Page Program or Sector Erase commands.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_parse_sfdp(struct spi_nor *nor)
+{
+ const struct sfdp_parameter_header *param_header, *bfpt_header;
+ struct sfdp_parameter_header *param_headers = NULL;
+ struct sfdp_header header;
+ struct device *dev = nor->dev;
+ struct sfdp *sfdp;
+ size_t sfdp_size;
+ size_t psize;
+ int i, err;
+
+ /* Get the SFDP header. */
+ err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header);
+ if (err < 0)
+ return err;
+
+ /* Check the SFDP header version. */
+ if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
+ header.major != SFDP_JESD216_MAJOR)
+ return -EINVAL;
+
+ /*
+ * Verify that the first and only mandatory parameter header is a
+ * Basic Flash Parameter Table header as specified in JESD216.
+ */
+ bfpt_header = &header.bfpt_header;
+ if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID ||
+ bfpt_header->major != SFDP_JESD216_MAJOR)
+ return -EINVAL;
+
+ sfdp_size = SFDP_PARAM_HEADER_PTP(bfpt_header) +
+ SFDP_PARAM_HEADER_PARAM_LEN(bfpt_header);
+
+ /*
+ * Allocate memory then read all parameter headers with a single
+ * Read SFDP command. These parameter headers will actually be parsed
+ * twice: a first time to get the latest revision of the basic flash
+ * parameter table, then a second time to handle the supported optional
+ * tables.
+ * Hence we read the parameter headers once for all to reduce the
+ * processing time. Also we use kmalloc() instead of devm_kmalloc()
+ * because we don't need to keep these parameter headers: the allocated
+ * memory is always released with kfree() before exiting this function.
+ */
+ if (header.nph) {
+ psize = header.nph * sizeof(*param_headers);
+
+ param_headers = kmalloc(psize, GFP_KERNEL);
+ if (!param_headers)
+ return -ENOMEM;
+
+ err = spi_nor_read_sfdp(nor, sizeof(header),
+ psize, param_headers);
+ if (err < 0) {
+ dev_dbg(dev, "failed to read SFDP parameter headers\n");
+ goto exit;
+ }
+ }
+
+ /*
+ * Cache the complete SFDP data. It is not (easily) possible to fetch
+ * SFDP after probe time and we need it for the sysfs access.
+ */
+ for (i = 0; i < header.nph; i++) {
+ param_header = &param_headers[i];
+ sfdp_size = max_t(size_t, sfdp_size,
+ SFDP_PARAM_HEADER_PTP(param_header) +
+ SFDP_PARAM_HEADER_PARAM_LEN(param_header));
+ }
+
+ /*
+ * Limit the total size to a reasonable value to avoid allocating too
+ * much memory just of because the flash returned some insane values.
+ */
+ if (sfdp_size > PAGE_SIZE) {
+ dev_dbg(dev, "SFDP data (%zu) too big, truncating\n",
+ sfdp_size);
+ sfdp_size = PAGE_SIZE;
+ }
+
+ sfdp = devm_kzalloc(dev, sizeof(*sfdp), GFP_KERNEL);
+ if (!sfdp) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ /*
+ * The SFDP is organized in chunks of DWORDs. Thus, in theory, the
+ * sfdp_size should be a multiple of DWORDs. But in case a flash
+ * is not spec compliant, make sure that we have enough space to store
+ * the complete SFDP data.
+ */
+ sfdp->num_dwords = DIV_ROUND_UP(sfdp_size, sizeof(*sfdp->dwords));
+ sfdp->dwords = devm_kcalloc(dev, sfdp->num_dwords,
+ sizeof(*sfdp->dwords), GFP_KERNEL);
+ if (!sfdp->dwords) {
+ err = -ENOMEM;
+ devm_kfree(dev, sfdp);
+ goto exit;
+ }
+
+ err = spi_nor_read_sfdp(nor, 0, sfdp_size, sfdp->dwords);
+ if (err < 0) {
+ dev_dbg(dev, "failed to read SFDP data\n");
+ devm_kfree(dev, sfdp->dwords);
+ devm_kfree(dev, sfdp);
+ goto exit;
+ }
+
+ nor->sfdp = sfdp;
+
+ /*
+ * Check other parameter headers to get the latest revision of
+ * the basic flash parameter table.
+ */
+ for (i = 0; i < header.nph; i++) {
+ param_header = &param_headers[i];
+
+ if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID &&
+ param_header->major == SFDP_JESD216_MAJOR &&
+ (param_header->minor > bfpt_header->minor ||
+ (param_header->minor == bfpt_header->minor &&
+ param_header->length > bfpt_header->length)))
+ bfpt_header = param_header;
+ }
+
+ err = spi_nor_parse_bfpt(nor, bfpt_header);
+ if (err)
+ goto exit;
+
+ /* Parse optional parameter tables. */
+ for (i = 0; i < header.nph; i++) {
+ param_header = &param_headers[i];
+
+ switch (SFDP_PARAM_HEADER_ID(param_header)) {
+ case SFDP_SECTOR_MAP_ID:
+ err = spi_nor_parse_smpt(nor, param_header);
+ break;
+
+ case SFDP_4BAIT_ID:
+ err = spi_nor_parse_4bait(nor, param_header);
+ break;
+
+ case SFDP_PROFILE1_ID:
+ err = spi_nor_parse_profile1(nor, param_header);
+ break;
+
+ case SFDP_SCCR_MAP_ID:
+ err = spi_nor_parse_sccr(nor, param_header);
+ break;
+
+ case SFDP_SCCR_MAP_MC_ID:
+ err = spi_nor_parse_sccr_mc(nor, param_header);
+ break;
+
+ default:
+ break;
+ }
+
+ if (err) {
+ dev_warn(dev, "Failed to parse optional parameter table: %04x\n",
+ SFDP_PARAM_HEADER_ID(param_header));
+ /*
+ * Let's not drop all information we extracted so far
+ * if optional table parsers fail. In case of failing,
+ * each optional parser is responsible to roll back to
+ * the previously known spi_nor data.
+ */
+ err = 0;
+ }
+ }
+
+ err = spi_nor_post_sfdp_fixups(nor);
+exit:
+ kfree(param_headers);
+ return err;
+}
diff --git a/drivers/mtd/spi-nor/sfdp.h b/drivers/mtd/spi-nor/sfdp.h
new file mode 100644
index 0000000000..6eb99e1cdd
--- /dev/null
+++ b/drivers/mtd/spi-nor/sfdp.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#ifndef __LINUX_MTD_SFDP_H
+#define __LINUX_MTD_SFDP_H
+
+/* SFDP revisions */
+#define SFDP_JESD216_MAJOR 1
+#define SFDP_JESD216_MINOR 0
+#define SFDP_JESD216A_MINOR 5
+#define SFDP_JESD216B_MINOR 6
+
+/* SFDP DWORDS are indexed from 1 but C arrays are indexed from 0. */
+#define SFDP_DWORD(i) ((i) - 1)
+#define SFDP_MASK_CHECK(dword, mask) (((dword) & (mask)) == (mask))
+
+/* Basic Flash Parameter Table */
+
+/* JESD216 rev D defines a Basic Flash Parameter Table of 20 DWORDs. */
+#define BFPT_DWORD_MAX 20
+
+struct sfdp_bfpt {
+ u32 dwords[BFPT_DWORD_MAX];
+};
+
+/* The first version of JESD216 defined only 9 DWORDs. */
+#define BFPT_DWORD_MAX_JESD216 9
+#define BFPT_DWORD_MAX_JESD216B 16
+
+/* 1st DWORD. */
+#define BFPT_DWORD1_FAST_READ_1_1_2 BIT(16)
+#define BFPT_DWORD1_ADDRESS_BYTES_MASK GENMASK(18, 17)
+#define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY (0x0UL << 17)
+#define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4 (0x1UL << 17)
+#define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY (0x2UL << 17)
+#define BFPT_DWORD1_DTR BIT(19)
+#define BFPT_DWORD1_FAST_READ_1_2_2 BIT(20)
+#define BFPT_DWORD1_FAST_READ_1_4_4 BIT(21)
+#define BFPT_DWORD1_FAST_READ_1_1_4 BIT(22)
+
+/* 5th DWORD. */
+#define BFPT_DWORD5_FAST_READ_2_2_2 BIT(0)
+#define BFPT_DWORD5_FAST_READ_4_4_4 BIT(4)
+
+/* 11th DWORD. */
+#define BFPT_DWORD11_PAGE_SIZE_SHIFT 4
+#define BFPT_DWORD11_PAGE_SIZE_MASK GENMASK(7, 4)
+
+/* 15th DWORD. */
+
+/*
+ * (from JESD216 rev B)
+ * Quad Enable Requirements (QER):
+ * - 000b: Device does not have a QE bit. Device detects 1-1-4 and 1-4-4
+ * reads based on instruction. DQ3/HOLD# functions are hold during
+ * instruction phase.
+ * - 001b: QE is bit 1 of status register 2. It is set via Write Status with
+ * two data bytes where bit 1 of the second byte is one.
+ * [...]
+ * Writing only one byte to the status register has the side-effect of
+ * clearing status register 2, including the QE bit. The 100b code is
+ * used if writing one byte to the status register does not modify
+ * status register 2.
+ * - 010b: QE is bit 6 of status register 1. It is set via Write Status with
+ * one data byte where bit 6 is one.
+ * [...]
+ * - 011b: QE is bit 7 of status register 2. It is set via Write status
+ * register 2 instruction 3Eh with one data byte where bit 7 is one.
+ * [...]
+ * The status register 2 is read using instruction 3Fh.
+ * - 100b: QE is bit 1 of status register 2. It is set via Write Status with
+ * two data bytes where bit 1 of the second byte is one.
+ * [...]
+ * In contrast to the 001b code, writing one byte to the status
+ * register does not modify status register 2.
+ * - 101b: QE is bit 1 of status register 2. Status register 1 is read using
+ * Read Status instruction 05h. Status register2 is read using
+ * instruction 35h. QE is set via Write Status instruction 01h with
+ * two data bytes where bit 1 of the second byte is one.
+ * [...]
+ */
+#define BFPT_DWORD15_QER_MASK GENMASK(22, 20)
+#define BFPT_DWORD15_QER_NONE (0x0UL << 20) /* Micron */
+#define BFPT_DWORD15_QER_SR2_BIT1_BUGGY (0x1UL << 20)
+#define BFPT_DWORD15_QER_SR1_BIT6 (0x2UL << 20) /* Macronix */
+#define BFPT_DWORD15_QER_SR2_BIT7 (0x3UL << 20)
+#define BFPT_DWORD15_QER_SR2_BIT1_NO_RD (0x4UL << 20)
+#define BFPT_DWORD15_QER_SR2_BIT1 (0x5UL << 20) /* Spansion */
+
+#define BFPT_DWORD16_EN4B_MASK GENMASK(31, 24)
+#define BFPT_DWORD16_EN4B_ALWAYS_4B BIT(30)
+#define BFPT_DWORD16_EN4B_4B_OPCODES BIT(29)
+#define BFPT_DWORD16_EN4B_16BIT_NV_CR BIT(28)
+#define BFPT_DWORD16_EN4B_BRWR BIT(27)
+#define BFPT_DWORD16_EN4B_WREAR BIT(26)
+#define BFPT_DWORD16_EN4B_WREN_EN4B BIT(25)
+#define BFPT_DWORD16_EN4B_EN4B BIT(24)
+#define BFPT_DWORD16_EX4B_MASK GENMASK(18, 14)
+#define BFPT_DWORD16_EX4B_16BIT_NV_CR BIT(18)
+#define BFPT_DWORD16_EX4B_BRWR BIT(17)
+#define BFPT_DWORD16_EX4B_WREAR BIT(16)
+#define BFPT_DWORD16_EX4B_WREN_EX4B BIT(15)
+#define BFPT_DWORD16_EX4B_EX4B BIT(14)
+#define BFPT_DWORD16_4B_ADDR_MODE_MASK \
+ (BFPT_DWORD16_EN4B_MASK | BFPT_DWORD16_EX4B_MASK)
+#define BFPT_DWORD16_4B_ADDR_MODE_16BIT_NV_CR \
+ (BFPT_DWORD16_EN4B_16BIT_NV_CR | BFPT_DWORD16_EX4B_16BIT_NV_CR)
+#define BFPT_DWORD16_4B_ADDR_MODE_BRWR \
+ (BFPT_DWORD16_EN4B_BRWR | BFPT_DWORD16_EX4B_BRWR)
+#define BFPT_DWORD16_4B_ADDR_MODE_WREAR \
+ (BFPT_DWORD16_EN4B_WREAR | BFPT_DWORD16_EX4B_WREAR)
+#define BFPT_DWORD16_4B_ADDR_MODE_WREN_EN4B_EX4B \
+ (BFPT_DWORD16_EN4B_WREN_EN4B | BFPT_DWORD16_EX4B_WREN_EX4B)
+#define BFPT_DWORD16_4B_ADDR_MODE_EN4B_EX4B \
+ (BFPT_DWORD16_EN4B_EN4B | BFPT_DWORD16_EX4B_EX4B)
+#define BFPT_DWORD16_SWRST_EN_RST BIT(12)
+
+#define BFPT_DWORD18_CMD_EXT_MASK GENMASK(30, 29)
+#define BFPT_DWORD18_CMD_EXT_REP (0x0UL << 29) /* Repeat */
+#define BFPT_DWORD18_CMD_EXT_INV (0x1UL << 29) /* Invert */
+#define BFPT_DWORD18_CMD_EXT_RES (0x2UL << 29) /* Reserved */
+#define BFPT_DWORD18_CMD_EXT_16B (0x3UL << 29) /* 16-bit opcode */
+
+struct sfdp_parameter_header {
+ u8 id_lsb;
+ u8 minor;
+ u8 major;
+ u8 length; /* in double words */
+ u8 parameter_table_pointer[3]; /* byte address */
+ u8 id_msb;
+};
+
+#endif /* __LINUX_MTD_SFDP_H */
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
new file mode 100644
index 0000000000..709822fced
--- /dev/null
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -0,0 +1,989 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+/* flash_info mfr_flag. Used to clear sticky prorietary SR bits. */
+#define USE_CLSR BIT(0)
+#define USE_CLPEF BIT(1)
+
+#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */
+#define SPINOR_OP_CLPEF 0x82 /* Clear program/erase failure flags */
+#define SPINOR_OP_RD_ANY_REG 0x65 /* Read any register */
+#define SPINOR_OP_WR_ANY_REG 0x71 /* Write any register */
+#define SPINOR_REG_CYPRESS_VREG 0x00800000
+#define SPINOR_REG_CYPRESS_STR1 0x0
+#define SPINOR_REG_CYPRESS_STR1V \
+ (SPINOR_REG_CYPRESS_VREG + SPINOR_REG_CYPRESS_STR1)
+#define SPINOR_REG_CYPRESS_CFR1 0x2
+#define SPINOR_REG_CYPRESS_CFR1_QUAD_EN BIT(1) /* Quad Enable */
+#define SPINOR_REG_CYPRESS_CFR2 0x3
+#define SPINOR_REG_CYPRESS_CFR2V \
+ (SPINOR_REG_CYPRESS_VREG + SPINOR_REG_CYPRESS_CFR2)
+#define SPINOR_REG_CYPRESS_CFR2_MEMLAT_MASK GENMASK(3, 0)
+#define SPINOR_REG_CYPRESS_CFR2_MEMLAT_11_24 0xb
+#define SPINOR_REG_CYPRESS_CFR2_ADRBYT BIT(7)
+#define SPINOR_REG_CYPRESS_CFR3 0x4
+#define SPINOR_REG_CYPRESS_CFR3_PGSZ BIT(4) /* Page size. */
+#define SPINOR_REG_CYPRESS_CFR5 0x6
+#define SPINOR_REG_CYPRESS_CFR5_BIT6 BIT(6)
+#define SPINOR_REG_CYPRESS_CFR5_DDR BIT(1)
+#define SPINOR_REG_CYPRESS_CFR5_OPI BIT(0)
+#define SPINOR_REG_CYPRESS_CFR5_OCT_DTR_EN \
+ (SPINOR_REG_CYPRESS_CFR5_BIT6 | SPINOR_REG_CYPRESS_CFR5_DDR | \
+ SPINOR_REG_CYPRESS_CFR5_OPI)
+#define SPINOR_REG_CYPRESS_CFR5_OCT_DTR_DS SPINOR_REG_CYPRESS_CFR5_BIT6
+#define SPINOR_OP_CYPRESS_RD_FAST 0xee
+#define SPINOR_REG_CYPRESS_ARCFN 0x00000006
+
+/* Cypress SPI NOR flash operations. */
+#define CYPRESS_NOR_WR_ANY_REG_OP(naddr, addr, ndata, buf) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WR_ANY_REG, 0), \
+ SPI_MEM_OP_ADDR(naddr, addr, 0), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(ndata, buf, 0))
+
+#define CYPRESS_NOR_RD_ANY_REG_OP(naddr, addr, ndummy, buf) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RD_ANY_REG, 0), \
+ SPI_MEM_OP_ADDR(naddr, addr, 0), \
+ SPI_MEM_OP_DUMMY(ndummy, 0), \
+ SPI_MEM_OP_DATA_IN(1, buf, 0))
+
+#define SPANSION_OP(opcode) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 0), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+/**
+ * struct spansion_nor_params - Spansion private parameters.
+ * @clsr: Clear Status Register or Clear Program and Erase Failure Flag
+ * opcode.
+ */
+struct spansion_nor_params {
+ u8 clsr;
+};
+
+/**
+ * spansion_nor_clear_sr() - Clear the Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ */
+static void spansion_nor_clear_sr(struct spi_nor *nor)
+{
+ const struct spansion_nor_params *priv_params = nor->params->priv;
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op = SPANSION_OP(priv_params->clsr);
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLSR,
+ NULL, 0);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d clearing SR\n", ret);
+}
+
+static int cypress_nor_sr_ready_and_clear_reg(struct spi_nor *nor, u64 addr)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+ struct spi_mem_op op =
+ CYPRESS_NOR_RD_ANY_REG_OP(params->addr_mode_nbytes, addr,
+ 0, nor->bouncebuf);
+ int ret;
+
+ if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
+ op.dummy.nbytes = params->rdsr_dummy;
+ op.data.nbytes = 2;
+ }
+
+ ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) {
+ if (nor->bouncebuf[0] & SR_E_ERR)
+ dev_err(nor->dev, "Erase Error occurred\n");
+ else
+ dev_err(nor->dev, "Programming Error occurred\n");
+
+ spansion_nor_clear_sr(nor);
+
+ ret = spi_nor_write_disable(nor);
+ if (ret)
+ return ret;
+
+ return -EIO;
+ }
+
+ return !(nor->bouncebuf[0] & SR_WIP);
+}
+/**
+ * cypress_nor_sr_ready_and_clear() - Query the Status Register of each die by
+ * using Read Any Register command to see if the whole flash is ready for new
+ * commands and clear it if there are any errors.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 1 if ready, 0 if not ready, -errno on errors.
+ */
+static int cypress_nor_sr_ready_and_clear(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+ u64 addr;
+ int ret;
+ u8 i;
+
+ for (i = 0; i < params->n_dice; i++) {
+ addr = params->vreg_offset[i] + SPINOR_REG_CYPRESS_STR1;
+ ret = cypress_nor_sr_ready_and_clear_reg(nor, addr);
+ if (ret < 0)
+ return ret;
+ else if (ret == 0)
+ return 0;
+ }
+
+ return 1;
+}
+
+static int cypress_nor_set_memlat(struct spi_nor *nor, u64 addr)
+{
+ struct spi_mem_op op;
+ u8 *buf = nor->bouncebuf;
+ int ret;
+ u8 addr_mode_nbytes = nor->params->addr_mode_nbytes;
+
+ op = (struct spi_mem_op)
+ CYPRESS_NOR_RD_ANY_REG_OP(addr_mode_nbytes, addr, 0, buf);
+
+ ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ /* Use 24 dummy cycles for memory array reads. */
+ *buf &= ~SPINOR_REG_CYPRESS_CFR2_MEMLAT_MASK;
+ *buf |= FIELD_PREP(SPINOR_REG_CYPRESS_CFR2_MEMLAT_MASK,
+ SPINOR_REG_CYPRESS_CFR2_MEMLAT_11_24);
+ op = (struct spi_mem_op)
+ CYPRESS_NOR_WR_ANY_REG_OP(addr_mode_nbytes, addr, 1, buf);
+
+ ret = spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ nor->read_dummy = 24;
+
+ return 0;
+}
+
+static int cypress_nor_set_octal_dtr_bits(struct spi_nor *nor, u64 addr)
+{
+ struct spi_mem_op op;
+ u8 *buf = nor->bouncebuf;
+
+ /* Set the octal and DTR enable bits. */
+ buf[0] = SPINOR_REG_CYPRESS_CFR5_OCT_DTR_EN;
+ op = (struct spi_mem_op)
+ CYPRESS_NOR_WR_ANY_REG_OP(nor->params->addr_mode_nbytes,
+ addr, 1, buf);
+
+ return spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto);
+}
+
+static int cypress_nor_octal_dtr_en(struct spi_nor *nor)
+{
+ const struct spi_nor_flash_parameter *params = nor->params;
+ u8 *buf = nor->bouncebuf;
+ u64 addr;
+ int i, ret;
+
+ for (i = 0; i < params->n_dice; i++) {
+ addr = params->vreg_offset[i] + SPINOR_REG_CYPRESS_CFR2;
+ ret = cypress_nor_set_memlat(nor, addr);
+ if (ret)
+ return ret;
+
+ addr = params->vreg_offset[i] + SPINOR_REG_CYPRESS_CFR5;
+ ret = cypress_nor_set_octal_dtr_bits(nor, addr);
+ if (ret)
+ return ret;
+ }
+
+ /* Read flash ID to make sure the switch was successful. */
+ ret = spi_nor_read_id(nor, nor->addr_nbytes, 3, buf,
+ SNOR_PROTO_8_8_8_DTR);
+ if (ret) {
+ dev_dbg(nor->dev, "error %d reading JEDEC ID after enabling 8D-8D-8D mode\n", ret);
+ return ret;
+ }
+
+ if (memcmp(buf, nor->info->id, nor->info->id_len))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int cypress_nor_set_single_spi_bits(struct spi_nor *nor, u64 addr)
+{
+ struct spi_mem_op op;
+ u8 *buf = nor->bouncebuf;
+
+ /*
+ * The register is 1-byte wide, but 1-byte transactions are not allowed
+ * in 8D-8D-8D mode. Since there is no register at the next location,
+ * just initialize the value to 0 and let the transaction go on.
+ */
+ buf[0] = SPINOR_REG_CYPRESS_CFR5_OCT_DTR_DS;
+ buf[1] = 0;
+ op = (struct spi_mem_op)
+ CYPRESS_NOR_WR_ANY_REG_OP(nor->addr_nbytes, addr, 2, buf);
+ return spi_nor_write_any_volatile_reg(nor, &op, SNOR_PROTO_8_8_8_DTR);
+}
+
+static int cypress_nor_octal_dtr_dis(struct spi_nor *nor)
+{
+ const struct spi_nor_flash_parameter *params = nor->params;
+ u8 *buf = nor->bouncebuf;
+ u64 addr;
+ int i, ret;
+
+ for (i = 0; i < params->n_dice; i++) {
+ addr = params->vreg_offset[i] + SPINOR_REG_CYPRESS_CFR5;
+ ret = cypress_nor_set_single_spi_bits(nor, addr);
+ if (ret)
+ return ret;
+ }
+
+ /* Read flash ID to make sure the switch was successful. */
+ ret = spi_nor_read_id(nor, 0, 0, buf, SNOR_PROTO_1_1_1);
+ if (ret) {
+ dev_dbg(nor->dev, "error %d reading JEDEC ID after disabling 8D-8D-8D mode\n", ret);
+ return ret;
+ }
+
+ if (memcmp(buf, nor->info->id, nor->info->id_len))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int cypress_nor_quad_enable_volatile_reg(struct spi_nor *nor, u64 addr)
+{
+ struct spi_mem_op op;
+ u8 addr_mode_nbytes = nor->params->addr_mode_nbytes;
+ u8 cfr1v_written;
+ int ret;
+
+ op = (struct spi_mem_op)
+ CYPRESS_NOR_RD_ANY_REG_OP(addr_mode_nbytes, addr, 0,
+ nor->bouncebuf);
+
+ ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR1_QUAD_EN)
+ return 0;
+
+ /* Update the Quad Enable bit. */
+ nor->bouncebuf[0] |= SPINOR_REG_CYPRESS_CFR1_QUAD_EN;
+ op = (struct spi_mem_op)
+ CYPRESS_NOR_WR_ANY_REG_OP(addr_mode_nbytes, addr, 1,
+ nor->bouncebuf);
+ ret = spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ cfr1v_written = nor->bouncebuf[0];
+
+ /* Read back and check it. */
+ op = (struct spi_mem_op)
+ CYPRESS_NOR_RD_ANY_REG_OP(addr_mode_nbytes, addr, 0,
+ nor->bouncebuf);
+ ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] != cfr1v_written) {
+ dev_err(nor->dev, "CFR1: Read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * cypress_nor_quad_enable_volatile() - enable Quad I/O mode in volatile
+ * register.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * It is recommended to update volatile registers in the field application due
+ * to a risk of the non-volatile registers corruption by power interrupt. This
+ * function sets Quad Enable bit in CFR1 volatile. If users set the Quad Enable
+ * bit in the CFR1 non-volatile in advance (typically by a Flash programmer
+ * before mounting Flash on PCB), the Quad Enable bit in the CFR1 volatile is
+ * also set during Flash power-up.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int cypress_nor_quad_enable_volatile(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+ u64 addr;
+ u8 i;
+ int ret;
+
+ for (i = 0; i < params->n_dice; i++) {
+ addr = params->vreg_offset[i] + SPINOR_REG_CYPRESS_CFR1;
+ ret = cypress_nor_quad_enable_volatile_reg(nor, addr);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * cypress_nor_determine_addr_mode_by_sr1() - Determine current address mode
+ * (3 or 4-byte) by querying status
+ * register 1 (SR1).
+ * @nor: pointer to a 'struct spi_nor'
+ * @addr_mode: ponter to a buffer where we return the determined
+ * address mode.
+ *
+ * This function tries to determine current address mode by comparing SR1 value
+ * from RDSR1(no address), RDAR(3-byte address), and RDAR(4-byte address).
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int cypress_nor_determine_addr_mode_by_sr1(struct spi_nor *nor,
+ u8 *addr_mode)
+{
+ struct spi_mem_op op =
+ CYPRESS_NOR_RD_ANY_REG_OP(3, SPINOR_REG_CYPRESS_STR1V, 0,
+ nor->bouncebuf);
+ bool is3byte, is4byte;
+ int ret;
+
+ ret = spi_nor_read_sr(nor, &nor->bouncebuf[1]);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ is3byte = (nor->bouncebuf[0] == nor->bouncebuf[1]);
+
+ op = (struct spi_mem_op)
+ CYPRESS_NOR_RD_ANY_REG_OP(4, SPINOR_REG_CYPRESS_STR1V, 0,
+ nor->bouncebuf);
+ ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ is4byte = (nor->bouncebuf[0] == nor->bouncebuf[1]);
+
+ if (is3byte == is4byte)
+ return -EIO;
+ if (is3byte)
+ *addr_mode = 3;
+ else
+ *addr_mode = 4;
+
+ return 0;
+}
+
+/**
+ * cypress_nor_set_addr_mode_nbytes() - Set the number of address bytes mode of
+ * current address mode.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Determine current address mode by reading SR1 with different methods, then
+ * query CFR2V[7] to confirm. If determination is failed, force enter to 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int cypress_nor_set_addr_mode_nbytes(struct spi_nor *nor)
+{
+ struct spi_mem_op op;
+ u8 addr_mode;
+ int ret;
+
+ /*
+ * Read SR1 by RDSR1 and RDAR(3- AND 4-byte addr). Use write enable
+ * that sets bit-1 in SR1.
+ */
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+ ret = cypress_nor_determine_addr_mode_by_sr1(nor, &addr_mode);
+ if (ret) {
+ ret = spi_nor_set_4byte_addr_mode(nor, true);
+ if (ret)
+ return ret;
+ return spi_nor_write_disable(nor);
+ }
+ ret = spi_nor_write_disable(nor);
+ if (ret)
+ return ret;
+
+ /*
+ * Query CFR2V and make sure no contradiction between determined address
+ * mode and CFR2V[7].
+ */
+ op = (struct spi_mem_op)
+ CYPRESS_NOR_RD_ANY_REG_OP(addr_mode, SPINOR_REG_CYPRESS_CFR2V,
+ 0, nor->bouncebuf);
+ ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR2_ADRBYT) {
+ if (addr_mode != 4)
+ return spi_nor_set_4byte_addr_mode(nor, true);
+ } else {
+ if (addr_mode != 3)
+ return spi_nor_set_4byte_addr_mode(nor, true);
+ }
+
+ nor->params->addr_nbytes = addr_mode;
+ nor->params->addr_mode_nbytes = addr_mode;
+
+ return 0;
+}
+
+/**
+ * cypress_nor_get_page_size() - Get flash page size configuration.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * The BFPT table advertises a 512B or 256B page size depending on part but the
+ * page size is actually configurable (with the default being 256B). Read from
+ * CFR3V[4] and set the correct size.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int cypress_nor_get_page_size(struct spi_nor *nor)
+{
+ struct spi_mem_op op =
+ CYPRESS_NOR_RD_ANY_REG_OP(nor->params->addr_mode_nbytes,
+ 0, 0, nor->bouncebuf);
+ struct spi_nor_flash_parameter *params = nor->params;
+ int ret;
+ u8 i;
+
+ /*
+ * Use the minimum common page size configuration. Programming 256-byte
+ * under 512-byte page size configuration is safe.
+ */
+ params->page_size = 256;
+ for (i = 0; i < params->n_dice; i++) {
+ op.addr.val = params->vreg_offset[i] + SPINOR_REG_CYPRESS_CFR3;
+
+ ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ if (!(nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR3_PGSZ))
+ return 0;
+ }
+
+ params->page_size = 512;
+
+ return 0;
+}
+
+static void cypress_nor_ecc_init(struct spi_nor *nor)
+{
+ /*
+ * Programming is supported only in 16-byte ECC data unit granularity.
+ * Byte-programming, bit-walking, or multiple program operations to the
+ * same ECC data unit without an erase are not allowed.
+ */
+ nor->params->writesize = 16;
+ nor->flags |= SNOR_F_ECC;
+}
+
+static int
+s25fs256t_post_bfpt_fixup(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt)
+{
+ struct spi_mem_op op;
+ int ret;
+
+ ret = cypress_nor_set_addr_mode_nbytes(nor);
+ if (ret)
+ return ret;
+
+ /* Read Architecture Configuration Register (ARCFN) */
+ op = (struct spi_mem_op)
+ CYPRESS_NOR_RD_ANY_REG_OP(nor->params->addr_mode_nbytes,
+ SPINOR_REG_CYPRESS_ARCFN, 1,
+ nor->bouncebuf);
+ ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ /* ARCFN value must be 0 if uniform sector is selected */
+ if (nor->bouncebuf[0])
+ return -ENODEV;
+
+ return 0;
+}
+
+static int s25fs256t_post_sfdp_fixup(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ /*
+ * S25FS256T does not define the SCCR map, but we would like to use the
+ * same code base for both single and multi chip package devices, thus
+ * set the vreg_offset and n_dice to be able to do so.
+ */
+ params->vreg_offset = devm_kmalloc(nor->dev, sizeof(u32), GFP_KERNEL);
+ if (!params->vreg_offset)
+ return -ENOMEM;
+
+ params->vreg_offset[0] = SPINOR_REG_CYPRESS_VREG;
+ params->n_dice = 1;
+
+ /* PP_1_1_4_4B is supported but missing in 4BAIT. */
+ params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_1_1_4],
+ SPINOR_OP_PP_1_1_4_4B,
+ SNOR_PROTO_1_1_4);
+
+ return cypress_nor_get_page_size(nor);
+}
+
+static int s25fs256t_late_init(struct spi_nor *nor)
+{
+ cypress_nor_ecc_init(nor);
+
+ return 0;
+}
+
+static struct spi_nor_fixups s25fs256t_fixups = {
+ .post_bfpt = s25fs256t_post_bfpt_fixup,
+ .post_sfdp = s25fs256t_post_sfdp_fixup,
+ .late_init = s25fs256t_late_init,
+};
+
+static int
+s25hx_t_post_bfpt_fixup(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt)
+{
+ int ret;
+
+ ret = cypress_nor_set_addr_mode_nbytes(nor);
+ if (ret)
+ return ret;
+
+ /* Replace Quad Enable with volatile version */
+ nor->params->quad_enable = cypress_nor_quad_enable_volatile;
+
+ return 0;
+}
+
+static int s25hx_t_post_sfdp_fixup(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+ struct spi_nor_erase_type *erase_type = params->erase_map.erase_type;
+ unsigned int i;
+
+ if (!params->n_dice || !params->vreg_offset) {
+ dev_err(nor->dev, "%s failed. The volatile register offset could not be retrieved from SFDP.\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+
+ /* The 2 Gb parts duplicate info and advertise 4 dice instead of 2. */
+ if (params->size == SZ_256M)
+ params->n_dice = 2;
+
+ /*
+ * In some parts, 3byte erase opcodes are advertised by 4BAIT.
+ * Convert them to 4byte erase opcodes.
+ */
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ switch (erase_type[i].opcode) {
+ case SPINOR_OP_SE:
+ erase_type[i].opcode = SPINOR_OP_SE_4B;
+ break;
+ case SPINOR_OP_BE_4K:
+ erase_type[i].opcode = SPINOR_OP_BE_4K_4B;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return cypress_nor_get_page_size(nor);
+}
+
+static int s25hx_t_late_init(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ /* Fast Read 4B requires mode cycles */
+ params->reads[SNOR_CMD_READ_FAST].num_mode_clocks = 8;
+ params->ready = cypress_nor_sr_ready_and_clear;
+ cypress_nor_ecc_init(nor);
+
+ return 0;
+}
+
+static struct spi_nor_fixups s25hx_t_fixups = {
+ .post_bfpt = s25hx_t_post_bfpt_fixup,
+ .post_sfdp = s25hx_t_post_sfdp_fixup,
+ .late_init = s25hx_t_late_init,
+};
+
+/**
+ * cypress_nor_set_octal_dtr() - Enable or disable octal DTR on Cypress flashes.
+ * @nor: pointer to a 'struct spi_nor'
+ * @enable: whether to enable or disable Octal DTR
+ *
+ * This also sets the memory access latency cycles to 24 to allow the flash to
+ * run at up to 200MHz.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int cypress_nor_set_octal_dtr(struct spi_nor *nor, bool enable)
+{
+ return enable ? cypress_nor_octal_dtr_en(nor) :
+ cypress_nor_octal_dtr_dis(nor);
+}
+
+static int s28hx_t_post_sfdp_fixup(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ if (!params->n_dice || !params->vreg_offset) {
+ dev_err(nor->dev, "%s failed. The volatile register offset could not be retrieved from SFDP.\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+
+ /* The 2 Gb parts duplicate info and advertise 4 dice instead of 2. */
+ if (params->size == SZ_256M)
+ params->n_dice = 2;
+
+ /*
+ * On older versions of the flash the xSPI Profile 1.0 table has the
+ * 8D-8D-8D Fast Read opcode as 0x00. But it actually should be 0xEE.
+ */
+ if (params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode == 0)
+ params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode =
+ SPINOR_OP_CYPRESS_RD_FAST;
+
+ /* This flash is also missing the 4-byte Page Program opcode bit. */
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
+ SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
+ /*
+ * Since xSPI Page Program opcode is backward compatible with
+ * Legacy SPI, use Legacy SPI opcode there as well.
+ */
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_8_8_8_DTR],
+ SPINOR_OP_PP_4B, SNOR_PROTO_8_8_8_DTR);
+
+ /*
+ * The xSPI Profile 1.0 table advertises the number of additional
+ * address bytes needed for Read Status Register command as 0 but the
+ * actual value for that is 4.
+ */
+ params->rdsr_addr_nbytes = 4;
+
+ return cypress_nor_get_page_size(nor);
+}
+
+static int s28hx_t_post_bfpt_fixup(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt)
+{
+ return cypress_nor_set_addr_mode_nbytes(nor);
+}
+
+static int s28hx_t_late_init(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ params->set_octal_dtr = cypress_nor_set_octal_dtr;
+ params->ready = cypress_nor_sr_ready_and_clear;
+ cypress_nor_ecc_init(nor);
+
+ return 0;
+}
+
+static const struct spi_nor_fixups s28hx_t_fixups = {
+ .post_sfdp = s28hx_t_post_sfdp_fixup,
+ .post_bfpt = s28hx_t_post_bfpt_fixup,
+ .late_init = s28hx_t_late_init,
+};
+
+static int
+s25fs_s_nor_post_bfpt_fixups(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt)
+{
+ /*
+ * The S25FS-S chip family reports 512-byte pages in BFPT but
+ * in reality the write buffer still wraps at the safe default
+ * of 256 bytes. Overwrite the page size advertised by BFPT
+ * to get the writes working.
+ */
+ nor->params->page_size = 256;
+
+ return 0;
+}
+
+static const struct spi_nor_fixups s25fs_s_nor_fixups = {
+ .post_bfpt = s25fs_s_nor_post_bfpt_fixups,
+};
+
+static const struct flash_info spansion_nor_parts[] = {
+ /* Spansion/Cypress -- single (large) sector size only, at least
+ * for the chips listed here (without boot sectors).
+ */
+ { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_CLSR)
+ },
+ { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_CLSR)
+ },
+ { "s25fl256s0", INFO6(0x010219, 0x4d0080, 256 * 1024, 128)
+ NO_SFDP_FLAGS(SPI_NOR_SKIP_SFDP | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_CLSR)
+ },
+ { "s25fl256s1", INFO6(0x010219, 0x4d0180, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_CLSR)
+ },
+ { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256)
+ FLAGS(SPI_NOR_HAS_LOCK)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_CLSR)
+ },
+ { "s25fs128s1", INFO6(0x012018, 0x4d0181, 64 * 1024, 256)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_CLSR)
+ .fixups = &s25fs_s_nor_fixups, },
+ { "s25fs256s0", INFO6(0x010219, 0x4d0081, 256 * 1024, 128)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_CLSR)
+ },
+ { "s25fs256s1", INFO6(0x010219, 0x4d0181, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_CLSR)
+ },
+ { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_CLSR)
+ .fixups = &s25fs_s_nor_fixups, },
+ { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64) },
+ { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256) },
+ { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_CLSR)
+ },
+ { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_CLSR)
+ },
+ { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8) },
+ { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16) },
+ { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32) },
+ { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64) },
+ { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128) },
+ { "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "s25fs256t", INFO6(0x342b19, 0x0f0890, 0, 0)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
+ .fixups = &s25fs256t_fixups },
+ { "s25hl512t", INFO6(0x342a1a, 0x0f0390, 0, 0)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
+ .fixups = &s25hx_t_fixups },
+ { "s25hl01gt", INFO6(0x342a1b, 0x0f0390, 0, 0)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
+ .fixups = &s25hx_t_fixups },
+ { "s25hl02gt", INFO6(0x342a1c, 0x0f0090, 0, 0)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
+ FLAGS(NO_CHIP_ERASE)
+ .fixups = &s25hx_t_fixups },
+ { "s25hs512t", INFO6(0x342b1a, 0x0f0390, 0, 0)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
+ .fixups = &s25hx_t_fixups },
+ { "s25hs01gt", INFO6(0x342b1b, 0x0f0390, 0, 0)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
+ .fixups = &s25hx_t_fixups },
+ { "s25hs02gt", INFO6(0x342b1c, 0x0f0090, 0, 0)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
+ FLAGS(NO_CHIP_ERASE)
+ .fixups = &s25hx_t_fixups },
+ { "cy15x104q", INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1)
+ FLAGS(SPI_NOR_NO_ERASE) },
+ { "s28hl512t", INFO(0x345a1a, 0, 0, 0)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
+ .fixups = &s28hx_t_fixups,
+ },
+ { "s28hl01gt", INFO(0x345a1b, 0, 0, 0)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
+ .fixups = &s28hx_t_fixups,
+ },
+ { "s28hs512t", INFO(0x345b1a, 0, 0, 0)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
+ .fixups = &s28hx_t_fixups,
+ },
+ { "s28hs01gt", INFO(0x345b1b, 0, 0, 0)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
+ .fixups = &s28hx_t_fixups,
+ },
+ { "s28hs02gt", INFO(0x345b1c, 0, 0, 0)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
+ .fixups = &s28hx_t_fixups,
+ },
+};
+
+/**
+ * spansion_nor_sr_ready_and_clear() - Query the Status Register to see if the
+ * flash is ready for new commands and clear it if there are any errors.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 1 if ready, 0 if not ready, -errno on errors.
+ */
+static int spansion_nor_sr_ready_and_clear(struct spi_nor *nor)
+{
+ int ret;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) {
+ if (nor->bouncebuf[0] & SR_E_ERR)
+ dev_err(nor->dev, "Erase Error occurred\n");
+ else
+ dev_err(nor->dev, "Programming Error occurred\n");
+
+ spansion_nor_clear_sr(nor);
+
+ /*
+ * WEL bit remains set to one when an erase or page program
+ * error occurs. Issue a Write Disable command to protect
+ * against inadvertent writes that can possibly corrupt the
+ * contents of the memory.
+ */
+ ret = spi_nor_write_disable(nor);
+ if (ret)
+ return ret;
+
+ return -EIO;
+ }
+
+ return !(nor->bouncebuf[0] & SR_WIP);
+}
+
+static int spansion_nor_late_init(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+ struct spansion_nor_params *priv_params;
+ u8 mfr_flags = nor->info->mfr_flags;
+
+ if (params->size > SZ_16M) {
+ nor->flags |= SNOR_F_4B_OPCODES;
+ /* No small sector erase for 4-byte command set */
+ nor->erase_opcode = SPINOR_OP_SE;
+ nor->mtd.erasesize = nor->info->sector_size;
+ }
+
+ if (mfr_flags & (USE_CLSR | USE_CLPEF)) {
+ priv_params = devm_kmalloc(nor->dev, sizeof(*priv_params),
+ GFP_KERNEL);
+ if (!priv_params)
+ return -ENOMEM;
+
+ if (mfr_flags & USE_CLSR)
+ priv_params->clsr = SPINOR_OP_CLSR;
+ else if (mfr_flags & USE_CLPEF)
+ priv_params->clsr = SPINOR_OP_CLPEF;
+
+ params->priv = priv_params;
+ params->ready = spansion_nor_sr_ready_and_clear;
+ }
+
+ return 0;
+}
+
+static const struct spi_nor_fixups spansion_nor_fixups = {
+ .late_init = spansion_nor_late_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_spansion = {
+ .name = "spansion",
+ .parts = spansion_nor_parts,
+ .nparts = ARRAY_SIZE(spansion_nor_parts),
+ .fixups = &spansion_nor_fixups,
+};
diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c
new file mode 100644
index 0000000000..197d2c1101
--- /dev/null
+++ b/drivers/mtd/spi-nor/sst.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+/* SST flash_info mfr_flag. Used to specify SST byte programming. */
+#define SST_WRITE BIT(0)
+
+#define SST26VF_CR_BPNV BIT(3)
+
+static int sst26vf_nor_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ return -EOPNOTSUPP;
+}
+
+static int sst26vf_nor_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ int ret;
+
+ /* We only support unlocking the entire flash array. */
+ if (ofs != 0 || len != nor->params->size)
+ return -EINVAL;
+
+ ret = spi_nor_read_cr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ if (!(nor->bouncebuf[0] & SST26VF_CR_BPNV)) {
+ dev_dbg(nor->dev, "Any block has been permanently locked\n");
+ return -EINVAL;
+ }
+
+ return spi_nor_global_block_unlock(nor);
+}
+
+static int sst26vf_nor_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ return -EOPNOTSUPP;
+}
+
+static const struct spi_nor_locking_ops sst26vf_nor_locking_ops = {
+ .lock = sst26vf_nor_lock,
+ .unlock = sst26vf_nor_unlock,
+ .is_locked = sst26vf_nor_is_locked,
+};
+
+static int sst26vf_nor_late_init(struct spi_nor *nor)
+{
+ nor->params->locking_ops = &sst26vf_nor_locking_ops;
+
+ return 0;
+}
+
+static const struct spi_nor_fixups sst26vf_nor_fixups = {
+ .late_init = sst26vf_nor_late_init,
+};
+
+static const struct flash_info sst_nor_parts[] = {
+ /* SST -- large erase sizes are "overlays", "sectors" are 4K */
+ { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ MFR_FLAGS(SST_WRITE) },
+ { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ MFR_FLAGS(SST_WRITE) },
+ { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ MFR_FLAGS(SST_WRITE) },
+ { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ MFR_FLAGS(SST_WRITE) },
+ { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP |
+ SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ MFR_FLAGS(SST_WRITE) },
+ { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ MFR_FLAGS(SST_WRITE) },
+ { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ MFR_FLAGS(SST_WRITE) },
+ { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4)
+ FLAGS(SPI_NOR_HAS_LOCK)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8)
+ FLAGS(SPI_NOR_HAS_LOCK)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ MFR_FLAGS(SST_WRITE) },
+ { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ MFR_FLAGS(SST_WRITE) },
+ { "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "sst26vf032b", INFO(0xbf2642, 0, 0, 0)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ PARSE_SFDP
+ .fixups = &sst26vf_nor_fixups },
+ { "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ .fixups = &sst26vf_nor_fixups },
+};
+
+static int sst_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ size_t actual = 0;
+ int ret;
+
+ dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
+
+ ret = spi_nor_prep_and_lock(nor);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto out;
+
+ nor->sst_write_second = false;
+
+ /* Start write from odd address. */
+ if (to % 2) {
+ nor->program_opcode = SPINOR_OP_BP;
+
+ /* write one byte. */
+ ret = spi_nor_write_data(nor, to, 1, buf);
+ if (ret < 0)
+ goto out;
+ WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto out;
+
+ to++;
+ actual++;
+ }
+
+ /* Write out most of the data here. */
+ for (; actual < len - 1; actual += 2) {
+ nor->program_opcode = SPINOR_OP_AAI_WP;
+
+ /* write two bytes. */
+ ret = spi_nor_write_data(nor, to, 2, buf + actual);
+ if (ret < 0)
+ goto out;
+ WARN(ret != 2, "While writing 2 bytes written %i bytes\n", ret);
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto out;
+ to += 2;
+ nor->sst_write_second = true;
+ }
+ nor->sst_write_second = false;
+
+ ret = spi_nor_write_disable(nor);
+ if (ret)
+ goto out;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto out;
+
+ /* Write out trailing byte if it exists. */
+ if (actual != len) {
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto out;
+
+ nor->program_opcode = SPINOR_OP_BP;
+ ret = spi_nor_write_data(nor, to, 1, buf + actual);
+ if (ret < 0)
+ goto out;
+ WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto out;
+
+ actual += 1;
+
+ ret = spi_nor_write_disable(nor);
+ }
+out:
+ *retlen += actual;
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+}
+
+static int sst_nor_late_init(struct spi_nor *nor)
+{
+ if (nor->info->mfr_flags & SST_WRITE)
+ nor->mtd._write = sst_nor_write;
+
+ return 0;
+}
+
+static const struct spi_nor_fixups sst_nor_fixups = {
+ .late_init = sst_nor_late_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_sst = {
+ .name = "sst",
+ .parts = sst_nor_parts,
+ .nparts = ARRAY_SIZE(sst_nor_parts),
+ .fixups = &sst_nor_fixups,
+};
diff --git a/drivers/mtd/spi-nor/swp.c b/drivers/mtd/spi-nor/swp.c
new file mode 100644
index 0000000000..5ab9d53248
--- /dev/null
+++ b/drivers/mtd/spi-nor/swp.c
@@ -0,0 +1,432 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SPI NOR Software Write Protection logic.
+ *
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static u8 spi_nor_get_sr_bp_mask(struct spi_nor *nor)
+{
+ u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+
+ if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6)
+ return mask | SR_BP3_BIT6;
+
+ if (nor->flags & SNOR_F_HAS_4BIT_BP)
+ return mask | SR_BP3;
+
+ return mask;
+}
+
+static u8 spi_nor_get_sr_tb_mask(struct spi_nor *nor)
+{
+ if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
+ return SR_TB_BIT6;
+ else
+ return SR_TB_BIT5;
+}
+
+static u64 spi_nor_get_min_prot_length_sr(struct spi_nor *nor)
+{
+ unsigned int bp_slots, bp_slots_needed;
+ u8 mask = spi_nor_get_sr_bp_mask(nor);
+
+ /* Reserved one for "protect none" and one for "protect all". */
+ bp_slots = (1 << hweight8(mask)) - 2;
+ bp_slots_needed = ilog2(nor->info->n_sectors);
+
+ if (bp_slots_needed > bp_slots)
+ return nor->info->sector_size <<
+ (bp_slots_needed - bp_slots);
+ else
+ return nor->info->sector_size;
+}
+
+static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs,
+ uint64_t *len)
+{
+ struct mtd_info *mtd = &nor->mtd;
+ u64 min_prot_len;
+ u8 mask = spi_nor_get_sr_bp_mask(nor);
+ u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
+ u8 bp, val = sr & mask;
+
+ if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3_BIT6)
+ val = (val & ~SR_BP3_BIT6) | SR_BP3;
+
+ bp = val >> SR_BP_SHIFT;
+
+ if (!bp) {
+ /* No protection */
+ *ofs = 0;
+ *len = 0;
+ return;
+ }
+
+ min_prot_len = spi_nor_get_min_prot_length_sr(nor);
+ *len = min_prot_len << (bp - 1);
+
+ if (*len > mtd->size)
+ *len = mtd->size;
+
+ if (nor->flags & SNOR_F_HAS_SR_TB && sr & tb_mask)
+ *ofs = 0;
+ else
+ *ofs = mtd->size - *len;
+}
+
+/*
+ * Return true if the entire region is locked (if @locked is true) or unlocked
+ * (if @locked is false); false otherwise.
+ */
+static bool spi_nor_check_lock_status_sr(struct spi_nor *nor, loff_t ofs,
+ uint64_t len, u8 sr, bool locked)
+{
+ loff_t lock_offs, lock_offs_max, offs_max;
+ uint64_t lock_len;
+
+ if (!len)
+ return true;
+
+ spi_nor_get_locked_range_sr(nor, sr, &lock_offs, &lock_len);
+
+ lock_offs_max = lock_offs + lock_len;
+ offs_max = ofs + len;
+
+ if (locked)
+ /* Requested range is a sub-range of locked range */
+ return (offs_max <= lock_offs_max) && (ofs >= lock_offs);
+ else
+ /* Requested range does not overlap with locked range */
+ return (ofs >= lock_offs_max) || (offs_max <= lock_offs);
+}
+
+static bool spi_nor_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
+ u8 sr)
+{
+ return spi_nor_check_lock_status_sr(nor, ofs, len, sr, true);
+}
+
+static bool spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs,
+ uint64_t len, u8 sr)
+{
+ return spi_nor_check_lock_status_sr(nor, ofs, len, sr, false);
+}
+
+/*
+ * Lock a region of the flash. Compatible with ST Micro and similar flash.
+ * Supports the block protection bits BP{0,1,2}/BP{0,1,2,3} in the status
+ * register
+ * (SR). Does not support these features found in newer SR bitfields:
+ * - SEC: sector/block protect - only handle SEC=0 (block protect)
+ * - CMP: complement protect - only support CMP=0 (range is not complemented)
+ *
+ * Support for the following is provided conditionally for some flash:
+ * - TB: top/bottom protect
+ *
+ * Sample table portion for 8MB flash (Winbond w25q64fw):
+ *
+ * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
+ * --------------------------------------------------------------------------
+ * X | X | 0 | 0 | 0 | NONE | NONE
+ * 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64
+ * 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32
+ * 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16
+ * 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8
+ * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
+ * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
+ * X | X | 1 | 1 | 1 | 8 MB | ALL
+ * ------|-------|-------|-------|-------|---------------|-------------------
+ * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64
+ * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32
+ * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16
+ * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8
+ * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4
+ * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2
+ *
+ * Returns negative on errors, 0 on success.
+ */
+static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ struct mtd_info *mtd = &nor->mtd;
+ u64 min_prot_len;
+ int ret, status_old, status_new;
+ u8 mask = spi_nor_get_sr_bp_mask(nor);
+ u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
+ u8 pow, val;
+ loff_t lock_len;
+ bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
+ bool use_top;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ status_old = nor->bouncebuf[0];
+
+ /* If nothing in our range is unlocked, we don't need to do anything */
+ if (spi_nor_is_locked_sr(nor, ofs, len, status_old))
+ return 0;
+
+ /* If anything below us is unlocked, we can't use 'bottom' protection */
+ if (!spi_nor_is_locked_sr(nor, 0, ofs, status_old))
+ can_be_bottom = false;
+
+ /* If anything above us is unlocked, we can't use 'top' protection */
+ if (!spi_nor_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
+ status_old))
+ can_be_top = false;
+
+ if (!can_be_bottom && !can_be_top)
+ return -EINVAL;
+
+ /* Prefer top, if both are valid */
+ use_top = can_be_top;
+
+ /* lock_len: length of region that should end up locked */
+ if (use_top)
+ lock_len = mtd->size - ofs;
+ else
+ lock_len = ofs + len;
+
+ if (lock_len == mtd->size) {
+ val = mask;
+ } else {
+ min_prot_len = spi_nor_get_min_prot_length_sr(nor);
+ pow = ilog2(lock_len) - ilog2(min_prot_len) + 1;
+ val = pow << SR_BP_SHIFT;
+
+ if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3)
+ val = (val & ~SR_BP3) | SR_BP3_BIT6;
+
+ if (val & ~mask)
+ return -EINVAL;
+
+ /* Don't "lock" with no region! */
+ if (!(val & mask))
+ return -EINVAL;
+ }
+
+ status_new = (status_old & ~mask & ~tb_mask) | val;
+
+ /*
+ * Disallow further writes if WP# pin is neither left floating nor
+ * wrongly tied to GND (that includes internal pull-downs).
+ * WP# pin hard strapped to GND can be a valid use case.
+ */
+ if (!(nor->flags & SNOR_F_NO_WP))
+ status_new |= SR_SRWD;
+
+ if (!use_top)
+ status_new |= tb_mask;
+
+ /* Don't bother if they're the same */
+ if (status_new == status_old)
+ return 0;
+
+ /* Only modify protection if it will not unlock other areas */
+ if ((status_new & mask) < (status_old & mask))
+ return -EINVAL;
+
+ return spi_nor_write_sr_and_check(nor, status_new);
+}
+
+/*
+ * Unlock a region of the flash. See spi_nor_sr_lock() for more info
+ *
+ * Returns negative on errors, 0 on success.
+ */
+static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ struct mtd_info *mtd = &nor->mtd;
+ u64 min_prot_len;
+ int ret, status_old, status_new;
+ u8 mask = spi_nor_get_sr_bp_mask(nor);
+ u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
+ u8 pow, val;
+ loff_t lock_len;
+ bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
+ bool use_top;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ status_old = nor->bouncebuf[0];
+
+ /* If nothing in our range is locked, we don't need to do anything */
+ if (spi_nor_is_unlocked_sr(nor, ofs, len, status_old))
+ return 0;
+
+ /* If anything below us is locked, we can't use 'top' protection */
+ if (!spi_nor_is_unlocked_sr(nor, 0, ofs, status_old))
+ can_be_top = false;
+
+ /* If anything above us is locked, we can't use 'bottom' protection */
+ if (!spi_nor_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
+ status_old))
+ can_be_bottom = false;
+
+ if (!can_be_bottom && !can_be_top)
+ return -EINVAL;
+
+ /* Prefer top, if both are valid */
+ use_top = can_be_top;
+
+ /* lock_len: length of region that should remain locked */
+ if (use_top)
+ lock_len = mtd->size - (ofs + len);
+ else
+ lock_len = ofs;
+
+ if (lock_len == 0) {
+ val = 0; /* fully unlocked */
+ } else {
+ min_prot_len = spi_nor_get_min_prot_length_sr(nor);
+ pow = ilog2(lock_len) - ilog2(min_prot_len) + 1;
+ val = pow << SR_BP_SHIFT;
+
+ if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3)
+ val = (val & ~SR_BP3) | SR_BP3_BIT6;
+
+ /* Some power-of-two sizes are not supported */
+ if (val & ~mask)
+ return -EINVAL;
+ }
+
+ status_new = (status_old & ~mask & ~tb_mask) | val;
+
+ /* Don't protect status register if we're fully unlocked */
+ if (lock_len == 0)
+ status_new &= ~SR_SRWD;
+
+ if (!use_top)
+ status_new |= tb_mask;
+
+ /* Don't bother if they're the same */
+ if (status_new == status_old)
+ return 0;
+
+ /* Only modify protection if it will not lock other areas */
+ if ((status_new & mask) > (status_old & mask))
+ return -EINVAL;
+
+ return spi_nor_write_sr_and_check(nor, status_new);
+}
+
+/*
+ * Check if a region of the flash is (completely) locked. See spi_nor_sr_lock()
+ * for more info.
+ *
+ * Returns 1 if entire region is locked, 0 if any portion is unlocked, and
+ * negative on errors.
+ */
+static int spi_nor_sr_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ int ret;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ return spi_nor_is_locked_sr(nor, ofs, len, nor->bouncebuf[0]);
+}
+
+static const struct spi_nor_locking_ops spi_nor_sr_locking_ops = {
+ .lock = spi_nor_sr_lock,
+ .unlock = spi_nor_sr_unlock,
+ .is_locked = spi_nor_sr_is_locked,
+};
+
+void spi_nor_init_default_locking_ops(struct spi_nor *nor)
+{
+ nor->params->locking_ops = &spi_nor_sr_locking_ops;
+}
+
+static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ ret = spi_nor_prep_and_lock(nor);
+ if (ret)
+ return ret;
+
+ ret = nor->params->locking_ops->lock(nor, ofs, len);
+
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+}
+
+static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ ret = spi_nor_prep_and_lock(nor);
+ if (ret)
+ return ret;
+
+ ret = nor->params->locking_ops->unlock(nor, ofs, len);
+
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+}
+
+static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ ret = spi_nor_prep_and_lock(nor);
+ if (ret)
+ return ret;
+
+ ret = nor->params->locking_ops->is_locked(nor, ofs, len);
+
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+}
+
+/**
+ * spi_nor_try_unlock_all() - Tries to unlock the entire flash memory array.
+ * @nor: pointer to a 'struct spi_nor'.
+ *
+ * Some SPI NOR flashes are write protected by default after a power-on reset
+ * cycle, in order to avoid inadvertent writes during power-up. Backward
+ * compatibility imposes to unlock the entire flash memory array at power-up
+ * by default.
+ *
+ * Unprotecting the entire flash array will fail for boards which are hardware
+ * write-protected. Thus any errors are ignored.
+ */
+void spi_nor_try_unlock_all(struct spi_nor *nor)
+{
+ int ret;
+
+ if (!(nor->flags & SNOR_F_HAS_LOCK))
+ return;
+
+ dev_dbg(nor->dev, "Unprotecting entire flash array\n");
+
+ ret = spi_nor_unlock(&nor->mtd, 0, nor->params->size);
+ if (ret)
+ dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n");
+}
+
+void spi_nor_set_mtd_locking_ops(struct spi_nor *nor)
+{
+ struct mtd_info *mtd = &nor->mtd;
+
+ if (!nor->params->locking_ops)
+ return;
+
+ mtd->_lock = spi_nor_lock;
+ mtd->_unlock = spi_nor_unlock;
+ mtd->_is_locked = spi_nor_is_locked;
+}
diff --git a/drivers/mtd/spi-nor/sysfs.c b/drivers/mtd/spi-nor/sysfs.c
new file mode 100644
index 0000000000..c09bb832b3
--- /dev/null
+++ b/drivers/mtd/spi-nor/sysfs.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/mtd/spi-nor.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/sysfs.h>
+
+#include "core.h"
+
+static ssize_t manufacturer_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct spi_mem *spimem = spi_get_drvdata(spi);
+ struct spi_nor *nor = spi_mem_get_drvdata(spimem);
+
+ return sysfs_emit(buf, "%s\n", nor->manufacturer->name);
+}
+static DEVICE_ATTR_RO(manufacturer);
+
+static ssize_t partname_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct spi_mem *spimem = spi_get_drvdata(spi);
+ struct spi_nor *nor = spi_mem_get_drvdata(spimem);
+
+ return sysfs_emit(buf, "%s\n", nor->info->name);
+}
+static DEVICE_ATTR_RO(partname);
+
+static ssize_t jedec_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct spi_mem *spimem = spi_get_drvdata(spi);
+ struct spi_nor *nor = spi_mem_get_drvdata(spimem);
+ const u8 *id = nor->info->id_len ? nor->info->id : nor->id;
+ u8 id_len = nor->info->id_len ?: SPI_NOR_MAX_ID_LEN;
+
+ return sysfs_emit(buf, "%*phN\n", id_len, id);
+}
+static DEVICE_ATTR_RO(jedec_id);
+
+static struct attribute *spi_nor_sysfs_entries[] = {
+ &dev_attr_manufacturer.attr,
+ &dev_attr_partname.attr,
+ &dev_attr_jedec_id.attr,
+ NULL
+};
+
+static ssize_t sfdp_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct spi_device *spi = to_spi_device(kobj_to_dev(kobj));
+ struct spi_mem *spimem = spi_get_drvdata(spi);
+ struct spi_nor *nor = spi_mem_get_drvdata(spimem);
+ struct sfdp *sfdp = nor->sfdp;
+ size_t sfdp_size = sfdp->num_dwords * sizeof(*sfdp->dwords);
+
+ return memory_read_from_buffer(buf, count, &off, nor->sfdp->dwords,
+ sfdp_size);
+}
+static BIN_ATTR_RO(sfdp, 0);
+
+static struct bin_attribute *spi_nor_sysfs_bin_entries[] = {
+ &bin_attr_sfdp,
+ NULL
+};
+
+static umode_t spi_nor_sysfs_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct spi_device *spi = to_spi_device(kobj_to_dev(kobj));
+ struct spi_mem *spimem = spi_get_drvdata(spi);
+ struct spi_nor *nor = spi_mem_get_drvdata(spimem);
+
+ if (attr == &dev_attr_manufacturer.attr && !nor->manufacturer)
+ return 0;
+ if (attr == &dev_attr_jedec_id.attr && !nor->info->id_len && !nor->id)
+ return 0;
+
+ return 0444;
+}
+
+static umode_t spi_nor_sysfs_is_bin_visible(struct kobject *kobj,
+ struct bin_attribute *attr, int n)
+{
+ struct spi_device *spi = to_spi_device(kobj_to_dev(kobj));
+ struct spi_mem *spimem = spi_get_drvdata(spi);
+ struct spi_nor *nor = spi_mem_get_drvdata(spimem);
+
+ if (attr == &bin_attr_sfdp && nor->sfdp)
+ return 0444;
+
+ return 0;
+}
+
+static const struct attribute_group spi_nor_sysfs_group = {
+ .name = "spi-nor",
+ .is_visible = spi_nor_sysfs_is_visible,
+ .is_bin_visible = spi_nor_sysfs_is_bin_visible,
+ .attrs = spi_nor_sysfs_entries,
+ .bin_attrs = spi_nor_sysfs_bin_entries,
+};
+
+const struct attribute_group *spi_nor_sysfs_groups[] = {
+ &spi_nor_sysfs_group,
+ NULL
+};
diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c
new file mode 100644
index 0000000000..cd99c9a1c5
--- /dev/null
+++ b/drivers/mtd/spi-nor/winbond.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+#define WINBOND_NOR_OP_RDEAR 0xc8 /* Read Extended Address Register */
+#define WINBOND_NOR_OP_WREAR 0xc5 /* Write Extended Address Register */
+
+#define WINBOND_NOR_WREAR_OP(buf) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(WINBOND_NOR_OP_WREAR, 0), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(1, buf, 0))
+
+static int
+w25q256_post_bfpt_fixups(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt)
+{
+ /*
+ * W25Q256JV supports 4B opcodes but W25Q256FV does not.
+ * Unfortunately, Winbond has re-used the same JEDEC ID for both
+ * variants which prevents us from defining a new entry in the parts
+ * table.
+ * To differentiate between W25Q256JV and W25Q256FV check SFDP header
+ * version: only JV has JESD216A compliant structure (version 5).
+ */
+ if (bfpt_header->major == SFDP_JESD216_MAJOR &&
+ bfpt_header->minor == SFDP_JESD216A_MINOR)
+ nor->flags |= SNOR_F_4B_OPCODES;
+
+ return 0;
+}
+
+static const struct spi_nor_fixups w25q256_fixups = {
+ .post_bfpt = w25q256_post_bfpt_fixups,
+};
+
+static const struct flash_info winbond_nor_parts[] = {
+ /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
+ { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024, 32)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ OTP_INFO(256, 3, 0x1000, 0x1000) },
+ { "w25q32jv", INFO(0xef7016, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25q32jwm", INFO(0xef8016, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ OTP_INFO(256, 3, 0x1000, 0x1000) },
+ { "w25q64jwm", INFO(0xef8017, 0, 64 * 1024, 128)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25q128jwm", INFO(0xef8018, 0, 64 * 1024, 256)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25q256jwm", INFO(0xef8019, 0, 64 * 1024, 512)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25q64jvm", INFO(0xef7017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q128", INFO(0xef4018, 0, 0, 0)
+ PARSE_SFDP
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ .fixups = &w25q256_fixups },
+ { "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512)
+ PARSE_SFDP },
+ { "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ |
+ SPI_NOR_DUAL_READ) },
+ { "w25q512nwq", INFO(0xef6020, 0, 0, 0)
+ PARSE_SFDP
+ OTP_INFO(256, 3, 0x1000, 0x1000) },
+ { "w25q512nwm", INFO(0xef8020, 0, 64 * 1024, 1024)
+ PARSE_SFDP
+ OTP_INFO(256, 3, 0x1000, 0x1000) },
+ { "w25q512jvq", INFO(0xef4020, 0, 64 * 1024, 1024)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+};
+
+/**
+ * winbond_nor_write_ear() - Write Extended Address Register.
+ * @nor: pointer to 'struct spi_nor'.
+ * @ear: value to write to the Extended Address Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int winbond_nor_write_ear(struct spi_nor *nor, u8 ear)
+{
+ int ret;
+
+ nor->bouncebuf[0] = ear;
+
+ if (nor->spimem) {
+ struct spi_mem_op op = WINBOND_NOR_WREAR_OP(nor->bouncebuf);
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = spi_nor_controller_ops_write_reg(nor,
+ WINBOND_NOR_OP_WREAR,
+ nor->bouncebuf, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d writing EAR\n", ret);
+
+ return ret;
+}
+
+/**
+ * winbond_nor_set_4byte_addr_mode() - Set 4-byte address mode for Winbond
+ * flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int winbond_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
+{
+ int ret;
+
+ ret = spi_nor_set_4byte_addr_mode_en4b_ex4b(nor, enable);
+ if (ret || enable)
+ return ret;
+
+ /*
+ * On Winbond W25Q256FV, leaving 4byte mode causes the Extended Address
+ * Register to be set to 1, so all 3-byte-address reads come from the
+ * second 16M. We must clear the register to enable normal behavior.
+ */
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ ret = winbond_nor_write_ear(nor, 0);
+ if (ret)
+ return ret;
+
+ return spi_nor_write_disable(nor);
+}
+
+static const struct spi_nor_otp_ops winbond_nor_otp_ops = {
+ .read = spi_nor_otp_read_secr,
+ .write = spi_nor_otp_write_secr,
+ .erase = spi_nor_otp_erase_secr,
+ .lock = spi_nor_otp_lock_sr2,
+ .is_locked = spi_nor_otp_is_locked_sr2,
+};
+
+static int winbond_nor_late_init(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ if (params->otp.org->n_regions)
+ params->otp.ops = &winbond_nor_otp_ops;
+
+ /*
+ * Winbond seems to require that the Extended Address Register to be set
+ * to zero when exiting the 4-Byte Address Mode, at least for W25Q256FV.
+ * This requirement is not described in the JESD216 SFDP standard, thus
+ * it is Winbond specific. Since we do not know if other Winbond flashes
+ * have the same requirement, play safe and overwrite the method parsed
+ * from BFPT, if any.
+ */
+ params->set_4byte_addr_mode = winbond_nor_set_4byte_addr_mode;
+
+ return 0;
+}
+
+static const struct spi_nor_fixups winbond_nor_fixups = {
+ .late_init = winbond_nor_late_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_winbond = {
+ .name = "winbond",
+ .parts = winbond_nor_parts,
+ .nparts = ARRAY_SIZE(winbond_nor_parts),
+ .fixups = &winbond_nor_fixups,
+};
diff --git a/drivers/mtd/spi-nor/xilinx.c b/drivers/mtd/spi-nor/xilinx.c
new file mode 100644
index 0000000000..00d53eae5e
--- /dev/null
+++ b/drivers/mtd/spi-nor/xilinx.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+#define XILINX_OP_SE 0x50 /* Sector erase */
+#define XILINX_OP_PP 0x82 /* Page program */
+#define XILINX_OP_RDSR 0xd7 /* Read status register */
+
+#define XSR_PAGESIZE BIT(0) /* Page size in Po2 or Linear */
+#define XSR_RDY BIT(7) /* Ready */
+
+#define XILINX_RDSR_OP(buf) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(XILINX_OP_RDSR, 0), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_IN(1, buf, 0))
+
+#define S3AN_INFO(_jedec_id, _n_sectors, _page_size) \
+ .id = { \
+ ((_jedec_id) >> 16) & 0xff, \
+ ((_jedec_id) >> 8) & 0xff, \
+ (_jedec_id) & 0xff \
+ }, \
+ .id_len = 3, \
+ .sector_size = (8 * (_page_size)), \
+ .n_sectors = (_n_sectors), \
+ .page_size = (_page_size), \
+ .n_banks = 1, \
+ .addr_nbytes = 3, \
+ .flags = SPI_NOR_NO_FR
+
+/* Xilinx S3AN share MFR with Atmel SPI NOR */
+static const struct flash_info xilinx_nor_parts[] = {
+ /* Xilinx S3AN Internal Flash */
+ { "3S50AN", S3AN_INFO(0x1f2200, 64, 264) },
+ { "3S200AN", S3AN_INFO(0x1f2400, 256, 264) },
+ { "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
+ { "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
+ { "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
+};
+
+/*
+ * This code converts an address to the Default Address Mode, that has non
+ * power of two page sizes. We must support this mode because it is the default
+ * mode supported by Xilinx tools, it can access the whole flash area and
+ * changing over to the Power-of-two mode is irreversible and corrupts the
+ * original data.
+ * Addr can safely be unsigned int, the biggest S3AN device is smaller than
+ * 4 MiB.
+ */
+static u32 s3an_nor_convert_addr(struct spi_nor *nor, u32 addr)
+{
+ u32 page_size = nor->params->page_size;
+ u32 offset, page;
+
+ offset = addr % page_size;
+ page = addr / page_size;
+ page <<= (page_size > 512) ? 10 : 9;
+
+ return page | offset;
+}
+
+/**
+ * xilinx_nor_read_sr() - Read the Status Register on S3AN flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr: pointer to a DMA-able buffer where the value of the
+ * Status Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int xilinx_nor_read_sr(struct spi_nor *nor, u8 *sr)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op = XILINX_RDSR_OP(sr);
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = spi_nor_controller_ops_read_reg(nor, XILINX_OP_RDSR, sr,
+ 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading SR\n", ret);
+
+ return ret;
+}
+
+/**
+ * xilinx_nor_sr_ready() - Query the Status Register of the S3AN flash to see
+ * if the flash is ready for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 1 if ready, 0 if not ready, -errno on errors.
+ */
+static int xilinx_nor_sr_ready(struct spi_nor *nor)
+{
+ int ret;
+
+ ret = xilinx_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ return !!(nor->bouncebuf[0] & XSR_RDY);
+}
+
+static int xilinx_nor_setup(struct spi_nor *nor,
+ const struct spi_nor_hwcaps *hwcaps)
+{
+ u32 page_size;
+ int ret;
+
+ ret = xilinx_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ nor->erase_opcode = XILINX_OP_SE;
+ nor->program_opcode = XILINX_OP_PP;
+ nor->read_opcode = SPINOR_OP_READ;
+ nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
+
+ /*
+ * This flashes have a page size of 264 or 528 bytes (known as
+ * Default addressing mode). It can be changed to a more standard
+ * Power of two mode where the page size is 256/512. This comes
+ * with a price: there is 3% less of space, the data is corrupted
+ * and the page size cannot be changed back to default addressing
+ * mode.
+ *
+ * The current addressing mode can be read from the XRDSR register
+ * and should not be changed, because is a destructive operation.
+ */
+ if (nor->bouncebuf[0] & XSR_PAGESIZE) {
+ /* Flash in Power of 2 mode */
+ page_size = (nor->params->page_size == 264) ? 256 : 512;
+ nor->params->page_size = page_size;
+ nor->mtd.writebufsize = page_size;
+ nor->params->size = 8 * page_size * nor->info->n_sectors;
+ nor->mtd.erasesize = 8 * page_size;
+ } else {
+ /* Flash in Default addressing mode */
+ nor->params->convert_addr = s3an_nor_convert_addr;
+ nor->mtd.erasesize = nor->info->sector_size;
+ }
+
+ return 0;
+}
+
+static int xilinx_nor_late_init(struct spi_nor *nor)
+{
+ nor->params->setup = xilinx_nor_setup;
+ nor->params->ready = xilinx_nor_sr_ready;
+
+ return 0;
+}
+
+static const struct spi_nor_fixups xilinx_nor_fixups = {
+ .late_init = xilinx_nor_late_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_xilinx = {
+ .name = "xilinx",
+ .parts = xilinx_nor_parts,
+ .nparts = ARRAY_SIZE(xilinx_nor_parts),
+ .fixups = &xilinx_nor_fixups,
+};
diff --git a/drivers/mtd/spi-nor/xmc.c b/drivers/mtd/spi-nor/xmc.c
new file mode 100644
index 0000000000..051411e863
--- /dev/null
+++ b/drivers/mtd/spi-nor/xmc.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info xmc_nor_parts[] = {
+ /* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
+ { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+};
+
+const struct spi_nor_manufacturer spi_nor_xmc = {
+ .name = "xmc",
+ .parts = xmc_nor_parts,
+ .nparts = ARRAY_SIZE(xmc_nor_parts),
+};
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
new file mode 100644
index 0000000000..04da685c36
--- /dev/null
+++ b/drivers/mtd/ssfdc.c
@@ -0,0 +1,455 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Linux driver for SSFDC Flash Translation Layer (Read only)
+ * © 2005 Eptar srl
+ * Author: Claudio Lanconelli <lanconelli.claudio@eptar.com>
+ *
+ * Based on NTFL and MTDBLOCK_RO drivers
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/hdreg.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/blktrans.h>
+
+struct ssfdcr_record {
+ struct mtd_blktrans_dev mbd;
+ int usecount;
+ unsigned char heads;
+ unsigned char sectors;
+ unsigned short cylinders;
+ int cis_block; /* block n. containing CIS/IDI */
+ int erase_size; /* phys_block_size */
+ unsigned short *logic_block_map; /* all zones (max 8192 phys blocks on
+ the 128MiB) */
+ int map_len; /* n. phys_blocks on the card */
+};
+
+#define SSFDCR_MAJOR 257
+#define SSFDCR_PARTN_BITS 3
+
+#define SECTOR_SIZE 512
+#define SECTOR_SHIFT 9
+#define OOB_SIZE 16
+
+#define MAX_LOGIC_BLK_PER_ZONE 1000
+#define MAX_PHYS_BLK_PER_ZONE 1024
+
+#define KiB(x) ( (x) * 1024L )
+#define MiB(x) ( KiB(x) * 1024L )
+
+/** CHS Table
+ 1MiB 2MiB 4MiB 8MiB 16MiB 32MiB 64MiB 128MiB
+NCylinder 125 125 250 250 500 500 500 500
+NHead 4 4 4 4 4 8 8 16
+NSector 4 8 8 16 16 16 32 32
+SumSector 2,000 4,000 8,000 16,000 32,000 64,000 128,000 256,000
+SectorSize 512 512 512 512 512 512 512 512
+**/
+
+typedef struct {
+ unsigned long size;
+ unsigned short cyl;
+ unsigned char head;
+ unsigned char sec;
+} chs_entry_t;
+
+/* Must be ordered by size */
+static const chs_entry_t chs_table[] = {
+ { MiB( 1), 125, 4, 4 },
+ { MiB( 2), 125, 4, 8 },
+ { MiB( 4), 250, 4, 8 },
+ { MiB( 8), 250, 4, 16 },
+ { MiB( 16), 500, 4, 16 },
+ { MiB( 32), 500, 8, 16 },
+ { MiB( 64), 500, 8, 32 },
+ { MiB(128), 500, 16, 32 },
+ { 0 },
+};
+
+static int get_chs(unsigned long size, unsigned short *cyl, unsigned char *head,
+ unsigned char *sec)
+{
+ int k;
+ int found = 0;
+
+ k = 0;
+ while (chs_table[k].size > 0 && size > chs_table[k].size)
+ k++;
+
+ if (chs_table[k].size > 0) {
+ if (cyl)
+ *cyl = chs_table[k].cyl;
+ if (head)
+ *head = chs_table[k].head;
+ if (sec)
+ *sec = chs_table[k].sec;
+ found = 1;
+ }
+
+ return found;
+}
+
+/* These bytes are the signature for the CIS/IDI sector */
+static const uint8_t cis_numbers[] = {
+ 0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
+};
+
+/* Read and check for a valid CIS sector */
+static int get_valid_cis_sector(struct mtd_info *mtd)
+{
+ int ret, k, cis_sector;
+ size_t retlen;
+ loff_t offset;
+ uint8_t *sect_buf;
+
+ cis_sector = -1;
+
+ sect_buf = kmalloc(SECTOR_SIZE, GFP_KERNEL);
+ if (!sect_buf)
+ goto out;
+
+ /*
+ * Look for CIS/IDI sector on the first GOOD block (give up after 4 bad
+ * blocks). If the first good block doesn't contain CIS number the flash
+ * is not SSFDC formatted
+ */
+ for (k = 0, offset = 0; k < 4; k++, offset += mtd->erasesize) {
+ if (mtd_block_isbad(mtd, offset)) {
+ ret = mtd_read(mtd, offset, SECTOR_SIZE, &retlen,
+ sect_buf);
+
+ /* CIS pattern match on the sector buffer */
+ if (ret < 0 || retlen != SECTOR_SIZE) {
+ printk(KERN_WARNING
+ "SSFDC_RO:can't read CIS/IDI sector\n");
+ } else if (!memcmp(sect_buf, cis_numbers,
+ sizeof(cis_numbers))) {
+ /* Found */
+ cis_sector = (int)(offset >> SECTOR_SHIFT);
+ } else {
+ pr_debug("SSFDC_RO: CIS/IDI sector not found"
+ " on %s (mtd%d)\n", mtd->name,
+ mtd->index);
+ }
+ break;
+ }
+ }
+
+ kfree(sect_buf);
+ out:
+ return cis_sector;
+}
+
+/* Read physical sector (wrapper to MTD_READ) */
+static int read_physical_sector(struct mtd_info *mtd, uint8_t *sect_buf,
+ int sect_no)
+{
+ int ret;
+ size_t retlen;
+ loff_t offset = (loff_t)sect_no << SECTOR_SHIFT;
+
+ ret = mtd_read(mtd, offset, SECTOR_SIZE, &retlen, sect_buf);
+ if (ret < 0 || retlen != SECTOR_SIZE)
+ return -1;
+
+ return 0;
+}
+
+/* Read redundancy area (wrapper to MTD_READ_OOB */
+static int read_raw_oob(struct mtd_info *mtd, loff_t offs, uint8_t *buf)
+{
+ struct mtd_oob_ops ops = { };
+ int ret;
+
+ ops.mode = MTD_OPS_RAW;
+ ops.ooboffs = 0;
+ ops.ooblen = OOB_SIZE;
+ ops.oobbuf = buf;
+ ops.datbuf = NULL;
+
+ ret = mtd_read_oob(mtd, offs, &ops);
+ if (ret < 0 || ops.oobretlen != OOB_SIZE)
+ return -1;
+
+ return 0;
+}
+
+/* Parity calculator on a word of n bit size */
+static int get_parity(int number, int size)
+{
+ int k;
+ int parity;
+
+ parity = 1;
+ for (k = 0; k < size; k++) {
+ parity += (number >> k);
+ parity &= 1;
+ }
+ return parity;
+}
+
+/* Read and validate the logical block address field stored in the OOB */
+static int get_logical_address(uint8_t *oob_buf)
+{
+ int block_address, parity;
+ int offset[2] = {6, 11}; /* offset of the 2 address fields within OOB */
+ int j;
+ int ok = 0;
+
+ /*
+ * Look for the first valid logical address
+ * Valid address has fixed pattern on most significant bits and
+ * parity check
+ */
+ for (j = 0; j < ARRAY_SIZE(offset); j++) {
+ block_address = ((int)oob_buf[offset[j]] << 8) |
+ oob_buf[offset[j]+1];
+
+ /* Check for the signature bits in the address field (MSBits) */
+ if ((block_address & ~0x7FF) == 0x1000) {
+ parity = block_address & 0x01;
+ block_address &= 0x7FF;
+ block_address >>= 1;
+
+ if (get_parity(block_address, 10) != parity) {
+ pr_debug("SSFDC_RO: logical address field%d"
+ "parity error(0x%04X)\n", j+1,
+ block_address);
+ } else {
+ ok = 1;
+ break;
+ }
+ }
+ }
+
+ if (!ok)
+ block_address = -2;
+
+ pr_debug("SSFDC_RO: get_logical_address() %d\n",
+ block_address);
+
+ return block_address;
+}
+
+/* Build the logic block map */
+static int build_logical_block_map(struct ssfdcr_record *ssfdc)
+{
+ unsigned long offset;
+ uint8_t oob_buf[OOB_SIZE];
+ int ret, block_address, phys_block;
+ struct mtd_info *mtd = ssfdc->mbd.mtd;
+
+ pr_debug("SSFDC_RO: build_block_map() nblks=%d (%luK)\n",
+ ssfdc->map_len,
+ (unsigned long)ssfdc->map_len * ssfdc->erase_size / 1024);
+
+ /* Scan every physical block, skip CIS block */
+ for (phys_block = ssfdc->cis_block + 1; phys_block < ssfdc->map_len;
+ phys_block++) {
+ offset = (unsigned long)phys_block * ssfdc->erase_size;
+ if (mtd_block_isbad(mtd, offset))
+ continue; /* skip bad blocks */
+
+ ret = read_raw_oob(mtd, offset, oob_buf);
+ if (ret < 0) {
+ pr_debug("SSFDC_RO: mtd read_oob() failed at %lu\n",
+ offset);
+ return -1;
+ }
+ block_address = get_logical_address(oob_buf);
+
+ /* Skip invalid addresses */
+ if (block_address >= 0 &&
+ block_address < MAX_LOGIC_BLK_PER_ZONE) {
+ int zone_index;
+
+ zone_index = phys_block / MAX_PHYS_BLK_PER_ZONE;
+ block_address += zone_index * MAX_LOGIC_BLK_PER_ZONE;
+ ssfdc->logic_block_map[block_address] =
+ (unsigned short)phys_block;
+
+ pr_debug("SSFDC_RO: build_block_map() phys_block=%d,"
+ "logic_block_addr=%d, zone=%d\n",
+ phys_block, block_address, zone_index);
+ }
+ }
+ return 0;
+}
+
+static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+{
+ struct ssfdcr_record *ssfdc;
+ int cis_sector;
+
+ /* Check for small page NAND flash */
+ if (!mtd_type_is_nand(mtd) || mtd->oobsize != OOB_SIZE ||
+ mtd->size > UINT_MAX)
+ return;
+
+ /* Check for SSDFC format by reading CIS/IDI sector */
+ cis_sector = get_valid_cis_sector(mtd);
+ if (cis_sector == -1)
+ return;
+
+ ssfdc = kzalloc(sizeof(struct ssfdcr_record), GFP_KERNEL);
+ if (!ssfdc)
+ return;
+
+ ssfdc->mbd.mtd = mtd;
+ ssfdc->mbd.devnum = -1;
+ ssfdc->mbd.tr = tr;
+ ssfdc->mbd.readonly = 1;
+
+ ssfdc->cis_block = cis_sector / (mtd->erasesize >> SECTOR_SHIFT);
+ ssfdc->erase_size = mtd->erasesize;
+ ssfdc->map_len = (u32)mtd->size / mtd->erasesize;
+
+ pr_debug("SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n",
+ ssfdc->cis_block, ssfdc->erase_size, ssfdc->map_len,
+ DIV_ROUND_UP(ssfdc->map_len, MAX_PHYS_BLK_PER_ZONE));
+
+ /* Set geometry */
+ ssfdc->heads = 16;
+ ssfdc->sectors = 32;
+ get_chs(mtd->size, NULL, &ssfdc->heads, &ssfdc->sectors);
+ ssfdc->cylinders = (unsigned short)(((u32)mtd->size >> SECTOR_SHIFT) /
+ ((long)ssfdc->sectors * (long)ssfdc->heads));
+
+ pr_debug("SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n",
+ ssfdc->cylinders, ssfdc->heads , ssfdc->sectors,
+ (long)ssfdc->cylinders * (long)ssfdc->heads *
+ (long)ssfdc->sectors);
+
+ ssfdc->mbd.size = (long)ssfdc->heads * (long)ssfdc->cylinders *
+ (long)ssfdc->sectors;
+
+ /* Allocate logical block map */
+ ssfdc->logic_block_map =
+ kmalloc_array(ssfdc->map_len,
+ sizeof(ssfdc->logic_block_map[0]), GFP_KERNEL);
+ if (!ssfdc->logic_block_map)
+ goto out_err;
+ memset(ssfdc->logic_block_map, 0xff, sizeof(ssfdc->logic_block_map[0]) *
+ ssfdc->map_len);
+
+ /* Build logical block map */
+ if (build_logical_block_map(ssfdc) < 0)
+ goto out_err;
+
+ /* Register device + partitions */
+ if (add_mtd_blktrans_dev(&ssfdc->mbd))
+ goto out_err;
+
+ printk(KERN_INFO "SSFDC_RO: Found ssfdc%c on mtd%d (%s)\n",
+ ssfdc->mbd.devnum + 'a', mtd->index, mtd->name);
+ return;
+
+out_err:
+ kfree(ssfdc->logic_block_map);
+ kfree(ssfdc);
+}
+
+static void ssfdcr_remove_dev(struct mtd_blktrans_dev *dev)
+{
+ struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev;
+
+ pr_debug("SSFDC_RO: remove_dev (i=%d)\n", dev->devnum);
+
+ del_mtd_blktrans_dev(dev);
+ kfree(ssfdc->logic_block_map);
+}
+
+static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,
+ unsigned long logic_sect_no, char *buf)
+{
+ struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev;
+ int sectors_per_block, offset, block_address;
+
+ sectors_per_block = ssfdc->erase_size >> SECTOR_SHIFT;
+ offset = (int)(logic_sect_no % sectors_per_block);
+ block_address = (int)(logic_sect_no / sectors_per_block);
+
+ pr_debug("SSFDC_RO: ssfdcr_readsect(%lu) sec_per_blk=%d, ofst=%d,"
+ " block_addr=%d\n", logic_sect_no, sectors_per_block, offset,
+ block_address);
+
+ BUG_ON(block_address >= ssfdc->map_len);
+
+ block_address = ssfdc->logic_block_map[block_address];
+
+ pr_debug("SSFDC_RO: ssfdcr_readsect() phys_block_addr=%d\n",
+ block_address);
+
+ if (block_address < 0xffff) {
+ unsigned long sect_no;
+
+ sect_no = (unsigned long)block_address * sectors_per_block +
+ offset;
+
+ pr_debug("SSFDC_RO: ssfdcr_readsect() phys_sect_no=%lu\n",
+ sect_no);
+
+ if (read_physical_sector(ssfdc->mbd.mtd, buf, sect_no) < 0)
+ return -EIO;
+ } else {
+ memset(buf, 0xff, SECTOR_SIZE);
+ }
+
+ return 0;
+}
+
+static int ssfdcr_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
+{
+ struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev;
+
+ pr_debug("SSFDC_RO: ssfdcr_getgeo() C=%d, H=%d, S=%d\n",
+ ssfdc->cylinders, ssfdc->heads, ssfdc->sectors);
+
+ geo->heads = ssfdc->heads;
+ geo->sectors = ssfdc->sectors;
+ geo->cylinders = ssfdc->cylinders;
+
+ return 0;
+}
+
+/****************************************************************************
+ *
+ * Module stuff
+ *
+ ****************************************************************************/
+
+static struct mtd_blktrans_ops ssfdcr_tr = {
+ .name = "ssfdc",
+ .major = SSFDCR_MAJOR,
+ .part_bits = SSFDCR_PARTN_BITS,
+ .blksize = SECTOR_SIZE,
+ .getgeo = ssfdcr_getgeo,
+ .readsect = ssfdcr_readsect,
+ .add_mtd = ssfdcr_add_mtd,
+ .remove_dev = ssfdcr_remove_dev,
+ .owner = THIS_MODULE,
+};
+
+static int __init init_ssfdcr(void)
+{
+ printk(KERN_INFO "SSFDC read-only Flash Translation layer\n");
+
+ return register_mtd_blktrans(&ssfdcr_tr);
+}
+
+static void __exit cleanup_ssfdcr(void)
+{
+ deregister_mtd_blktrans(&ssfdcr_tr);
+}
+
+module_init(init_ssfdcr);
+module_exit(cleanup_ssfdcr);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Claudio Lanconelli <lanconelli.claudio@eptar.com>");
+MODULE_DESCRIPTION("Flash Translation Layer for read-only SSFDC SmartMedia card");
diff --git a/drivers/mtd/tests/Makefile b/drivers/mtd/tests/Makefile
new file mode 100644
index 0000000000..5de0378f90
--- /dev/null
+++ b/drivers/mtd/tests/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_MTD_TESTS) += mtd_oobtest.o
+obj-$(CONFIG_MTD_TESTS) += mtd_pagetest.o
+obj-$(CONFIG_MTD_TESTS) += mtd_readtest.o
+obj-$(CONFIG_MTD_TESTS) += mtd_speedtest.o
+obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o
+obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o
+obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o
+obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o
+obj-$(CONFIG_MTD_TESTS) += mtd_nandbiterrs.o
+
+mtd_oobtest-objs := oobtest.o mtd_test.o
+mtd_pagetest-objs := pagetest.o mtd_test.o
+mtd_readtest-objs := readtest.o mtd_test.o
+mtd_speedtest-objs := speedtest.o mtd_test.o
+mtd_stresstest-objs := stresstest.o mtd_test.o
+mtd_subpagetest-objs := subpagetest.o mtd_test.o
+mtd_torturetest-objs := torturetest.o mtd_test.o
+mtd_nandbiterrs-objs := nandbiterrs.o mtd_test.o
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
new file mode 100644
index 0000000000..824cc1c03b
--- /dev/null
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/random.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
+
+#include "mtd_test.h"
+
+/*
+ * Test the implementation for software ECC
+ *
+ * No actual MTD device is needed, So we don't need to warry about losing
+ * important data by human error.
+ *
+ * This covers possible patterns of corruption which can be reliably corrected
+ * or detected.
+ */
+
+#if IS_ENABLED(CONFIG_MTD_RAW_NAND)
+
+struct nand_ecc_test {
+ const char *name;
+ void (*prepare)(void *, void *, void *, void *, const size_t);
+ int (*verify)(void *, void *, void *, const size_t);
+};
+
+/*
+ * The reason for this __change_bit_le() instead of __change_bit() is to inject
+ * bit error properly within the region which is not a multiple of
+ * sizeof(unsigned long) on big-endian systems
+ */
+#ifdef __LITTLE_ENDIAN
+#define __change_bit_le(nr, addr) __change_bit(nr, addr)
+#elif defined(__BIG_ENDIAN)
+#define __change_bit_le(nr, addr) \
+ __change_bit((nr) ^ ((BITS_PER_LONG - 1) & ~0x7), addr)
+#else
+#error "Unknown byte order"
+#endif
+
+static void single_bit_error_data(void *error_data, void *correct_data,
+ size_t size)
+{
+ unsigned int offset = get_random_u32_below(size * BITS_PER_BYTE);
+
+ memcpy(error_data, correct_data, size);
+ __change_bit_le(offset, error_data);
+}
+
+static void double_bit_error_data(void *error_data, void *correct_data,
+ size_t size)
+{
+ unsigned int offset[2];
+
+ offset[0] = get_random_u32_below(size * BITS_PER_BYTE);
+ do {
+ offset[1] = get_random_u32_below(size * BITS_PER_BYTE);
+ } while (offset[0] == offset[1]);
+
+ memcpy(error_data, correct_data, size);
+
+ __change_bit_le(offset[0], error_data);
+ __change_bit_le(offset[1], error_data);
+}
+
+static unsigned int random_ecc_bit(size_t size)
+{
+ unsigned int offset = get_random_u32_below(3 * BITS_PER_BYTE);
+
+ if (size == 256) {
+ /*
+ * Don't inject a bit error into the insignificant bits (16th
+ * and 17th bit) in ECC code for 256 byte data block
+ */
+ while (offset == 16 || offset == 17)
+ offset = get_random_u32_below(3 * BITS_PER_BYTE);
+ }
+
+ return offset;
+}
+
+static void single_bit_error_ecc(void *error_ecc, void *correct_ecc,
+ size_t size)
+{
+ unsigned int offset = random_ecc_bit(size);
+
+ memcpy(error_ecc, correct_ecc, 3);
+ __change_bit_le(offset, error_ecc);
+}
+
+static void double_bit_error_ecc(void *error_ecc, void *correct_ecc,
+ size_t size)
+{
+ unsigned int offset[2];
+
+ offset[0] = random_ecc_bit(size);
+ do {
+ offset[1] = random_ecc_bit(size);
+ } while (offset[0] == offset[1]);
+
+ memcpy(error_ecc, correct_ecc, 3);
+ __change_bit_le(offset[0], error_ecc);
+ __change_bit_le(offset[1], error_ecc);
+}
+
+static void no_bit_error(void *error_data, void *error_ecc,
+ void *correct_data, void *correct_ecc, const size_t size)
+{
+ memcpy(error_data, correct_data, size);
+ memcpy(error_ecc, correct_ecc, 3);
+}
+
+static int no_bit_error_verify(void *error_data, void *error_ecc,
+ void *correct_data, const size_t size)
+{
+ bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC);
+ unsigned char calc_ecc[3];
+ int ret;
+
+ ecc_sw_hamming_calculate(error_data, size, calc_ecc, sm_order);
+ ret = ecc_sw_hamming_correct(error_data, error_ecc, calc_ecc, size,
+ sm_order);
+ if (ret == 0 && !memcmp(correct_data, error_data, size))
+ return 0;
+
+ return -EINVAL;
+}
+
+static void single_bit_error_in_data(void *error_data, void *error_ecc,
+ void *correct_data, void *correct_ecc, const size_t size)
+{
+ single_bit_error_data(error_data, correct_data, size);
+ memcpy(error_ecc, correct_ecc, 3);
+}
+
+static void single_bit_error_in_ecc(void *error_data, void *error_ecc,
+ void *correct_data, void *correct_ecc, const size_t size)
+{
+ memcpy(error_data, correct_data, size);
+ single_bit_error_ecc(error_ecc, correct_ecc, size);
+}
+
+static int single_bit_error_correct(void *error_data, void *error_ecc,
+ void *correct_data, const size_t size)
+{
+ bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC);
+ unsigned char calc_ecc[3];
+ int ret;
+
+ ecc_sw_hamming_calculate(error_data, size, calc_ecc, sm_order);
+ ret = ecc_sw_hamming_correct(error_data, error_ecc, calc_ecc, size,
+ sm_order);
+ if (ret == 1 && !memcmp(correct_data, error_data, size))
+ return 0;
+
+ return -EINVAL;
+}
+
+static void double_bit_error_in_data(void *error_data, void *error_ecc,
+ void *correct_data, void *correct_ecc, const size_t size)
+{
+ double_bit_error_data(error_data, correct_data, size);
+ memcpy(error_ecc, correct_ecc, 3);
+}
+
+static void single_bit_error_in_data_and_ecc(void *error_data, void *error_ecc,
+ void *correct_data, void *correct_ecc, const size_t size)
+{
+ single_bit_error_data(error_data, correct_data, size);
+ single_bit_error_ecc(error_ecc, correct_ecc, size);
+}
+
+static void double_bit_error_in_ecc(void *error_data, void *error_ecc,
+ void *correct_data, void *correct_ecc, const size_t size)
+{
+ memcpy(error_data, correct_data, size);
+ double_bit_error_ecc(error_ecc, correct_ecc, size);
+}
+
+static int double_bit_error_detect(void *error_data, void *error_ecc,
+ void *correct_data, const size_t size)
+{
+ bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC);
+ unsigned char calc_ecc[3];
+ int ret;
+
+ ecc_sw_hamming_calculate(error_data, size, calc_ecc, sm_order);
+ ret = ecc_sw_hamming_correct(error_data, error_ecc, calc_ecc, size,
+ sm_order);
+
+ return (ret == -EBADMSG) ? 0 : -EINVAL;
+}
+
+static const struct nand_ecc_test nand_ecc_test[] = {
+ {
+ .name = "no-bit-error",
+ .prepare = no_bit_error,
+ .verify = no_bit_error_verify,
+ },
+ {
+ .name = "single-bit-error-in-data-correct",
+ .prepare = single_bit_error_in_data,
+ .verify = single_bit_error_correct,
+ },
+ {
+ .name = "single-bit-error-in-ecc-correct",
+ .prepare = single_bit_error_in_ecc,
+ .verify = single_bit_error_correct,
+ },
+ {
+ .name = "double-bit-error-in-data-detect",
+ .prepare = double_bit_error_in_data,
+ .verify = double_bit_error_detect,
+ },
+ {
+ .name = "single-bit-error-in-data-and-ecc-detect",
+ .prepare = single_bit_error_in_data_and_ecc,
+ .verify = double_bit_error_detect,
+ },
+ {
+ .name = "double-bit-error-in-ecc-detect",
+ .prepare = double_bit_error_in_ecc,
+ .verify = double_bit_error_detect,
+ },
+};
+
+static void dump_data_ecc(void *error_data, void *error_ecc, void *correct_data,
+ void *correct_ecc, const size_t size)
+{
+ pr_info("hexdump of error data:\n");
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
+ error_data, size, false);
+ print_hex_dump(KERN_INFO, "hexdump of error ecc: ",
+ DUMP_PREFIX_NONE, 16, 1, error_ecc, 3, false);
+
+ pr_info("hexdump of correct data:\n");
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
+ correct_data, size, false);
+ print_hex_dump(KERN_INFO, "hexdump of correct ecc: ",
+ DUMP_PREFIX_NONE, 16, 1, correct_ecc, 3, false);
+}
+
+static int nand_ecc_test_run(const size_t size)
+{
+ bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC);
+ int i;
+ int err = 0;
+ void *error_data;
+ void *error_ecc;
+ void *correct_data;
+ void *correct_ecc;
+
+ error_data = kmalloc(size, GFP_KERNEL);
+ error_ecc = kmalloc(3, GFP_KERNEL);
+ correct_data = kmalloc(size, GFP_KERNEL);
+ correct_ecc = kmalloc(3, GFP_KERNEL);
+
+ if (!error_data || !error_ecc || !correct_data || !correct_ecc) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ get_random_bytes(correct_data, size);
+ ecc_sw_hamming_calculate(correct_data, size, correct_ecc, sm_order);
+ for (i = 0; i < ARRAY_SIZE(nand_ecc_test); i++) {
+ nand_ecc_test[i].prepare(error_data, error_ecc,
+ correct_data, correct_ecc, size);
+ err = nand_ecc_test[i].verify(error_data, error_ecc,
+ correct_data, size);
+
+ if (err) {
+ pr_err("not ok - %s-%zd\n",
+ nand_ecc_test[i].name, size);
+ dump_data_ecc(error_data, error_ecc,
+ correct_data, correct_ecc, size);
+ break;
+ }
+ pr_info("ok - %s-%zd\n",
+ nand_ecc_test[i].name, size);
+
+ err = mtdtest_relax();
+ if (err)
+ break;
+ }
+error:
+ kfree(error_data);
+ kfree(error_ecc);
+ kfree(correct_data);
+ kfree(correct_ecc);
+
+ return err;
+}
+
+#else
+
+static int nand_ecc_test_run(const size_t size)
+{
+ return 0;
+}
+
+#endif
+
+static int __init ecc_test_init(void)
+{
+ int err;
+
+ err = nand_ecc_test_run(256);
+ if (err)
+ return err;
+
+ return nand_ecc_test_run(512);
+}
+
+static void __exit ecc_test_exit(void)
+{
+}
+
+module_init(ecc_test_init);
+module_exit(ecc_test_exit);
+
+MODULE_DESCRIPTION("NAND ECC function test module");
+MODULE_AUTHOR("Akinobu Mita");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/mtd_test.c b/drivers/mtd/tests/mtd_test.c
new file mode 100644
index 0000000000..c84250beff
--- /dev/null
+++ b/drivers/mtd/tests/mtd_test.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0
+#define pr_fmt(fmt) "mtd_test: " fmt
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/printk.h>
+
+#include "mtd_test.h"
+
+int mtdtest_erase_eraseblock(struct mtd_info *mtd, unsigned int ebnum)
+{
+ int err;
+ struct erase_info ei;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+
+ memset(&ei, 0, sizeof(struct erase_info));
+ ei.addr = addr;
+ ei.len = mtd->erasesize;
+
+ err = mtd_erase(mtd, &ei);
+ if (err) {
+ pr_info("error %d while erasing EB %d\n", err, ebnum);
+ return err;
+ }
+
+ return 0;
+}
+
+static int is_block_bad(struct mtd_info *mtd, unsigned int ebnum)
+{
+ int ret;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+
+ ret = mtd_block_isbad(mtd, addr);
+ if (ret)
+ pr_info("block %d is bad\n", ebnum);
+
+ return ret;
+}
+
+int mtdtest_scan_for_bad_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
+ unsigned int eb, int ebcnt)
+{
+ int i, bad = 0;
+
+ if (!mtd_can_have_bb(mtd))
+ return 0;
+
+ pr_info("scanning for bad eraseblocks\n");
+ for (i = 0; i < ebcnt; ++i) {
+ bbt[i] = is_block_bad(mtd, eb + i) ? 1 : 0;
+ if (bbt[i])
+ bad += 1;
+ cond_resched();
+ }
+ pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
+
+ return 0;
+}
+
+int mtdtest_erase_good_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
+ unsigned int eb, int ebcnt)
+{
+ int err;
+ unsigned int i;
+
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = mtdtest_erase_eraseblock(mtd, eb + i);
+ if (err)
+ return err;
+ cond_resched();
+ }
+
+ return 0;
+}
+
+int mtdtest_read(struct mtd_info *mtd, loff_t addr, size_t size, void *buf)
+{
+ size_t read;
+ int err;
+
+ err = mtd_read(mtd, addr, size, &read, buf);
+ /* Ignore corrected ECC errors */
+ if (mtd_is_bitflip(err))
+ err = 0;
+ if (!err && read != size)
+ err = -EIO;
+ if (err)
+ pr_err("error: read failed at %#llx\n", addr);
+
+ return err;
+}
+
+int mtdtest_write(struct mtd_info *mtd, loff_t addr, size_t size,
+ const void *buf)
+{
+ size_t written;
+ int err;
+
+ err = mtd_write(mtd, addr, size, &written, buf);
+ if (!err && written != size)
+ err = -EIO;
+ if (err)
+ pr_err("error: write failed at %#llx\n", addr);
+
+ return err;
+}
diff --git a/drivers/mtd/tests/mtd_test.h b/drivers/mtd/tests/mtd_test.h
new file mode 100644
index 0000000000..5a6e3bbe04
--- /dev/null
+++ b/drivers/mtd/tests/mtd_test.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/mtd/mtd.h>
+#include <linux/sched/signal.h>
+
+static inline int mtdtest_relax(void)
+{
+ cond_resched();
+ if (signal_pending(current)) {
+ pr_info("aborting test due to pending signal!\n");
+ return -EINTR;
+ }
+
+ return 0;
+}
+
+int mtdtest_erase_eraseblock(struct mtd_info *mtd, unsigned int ebnum);
+int mtdtest_scan_for_bad_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
+ unsigned int eb, int ebcnt);
+int mtdtest_erase_good_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
+ unsigned int eb, int ebcnt);
+
+int mtdtest_read(struct mtd_info *mtd, loff_t addr, size_t size, void *buf);
+int mtdtest_write(struct mtd_info *mtd, loff_t addr, size_t size,
+ const void *buf);
diff --git a/drivers/mtd/tests/nandbiterrs.c b/drivers/mtd/tests/nandbiterrs.c
new file mode 100644
index 0000000000..98d7508f95
--- /dev/null
+++ b/drivers/mtd/tests/nandbiterrs.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2012 NetCommWireless
+ * Iwo Mergler <Iwo.Mergler@netcommwireless.com.au>
+ *
+ * Test for multi-bit error recovery on a NAND page This mostly tests the
+ * ECC controller / driver.
+ *
+ * There are two test modes:
+ *
+ * 0 - artificially inserting bit errors until the ECC fails
+ * This is the default method and fairly quick. It should
+ * be independent of the quality of the FLASH.
+ *
+ * 1 - re-writing the same pattern repeatedly until the ECC fails.
+ * This method relies on the physics of NAND FLASH to eventually
+ * generate '0' bits if '1' has been written sufficient times.
+ * Depending on the NAND, the first bit errors will appear after
+ * 1000 or more writes and then will usually snowball, reaching the
+ * limits of the ECC quickly.
+ *
+ * The test stops after 10000 cycles, should your FLASH be
+ * exceptionally good and not generate bit errors before that. Try
+ * a different page in that case.
+ *
+ * Please note that neither of these tests will significantly 'use up' any
+ * FLASH endurance. Only a maximum of two erase operations will be performed.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mtd/mtd.h>
+#include <linux/err.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/slab.h>
+#include "mtd_test.h"
+
+static int dev;
+module_param(dev, int, S_IRUGO);
+MODULE_PARM_DESC(dev, "MTD device number to use");
+
+static unsigned page_offset;
+module_param(page_offset, uint, S_IRUGO);
+MODULE_PARM_DESC(page_offset, "Page number relative to dev start");
+
+static unsigned seed;
+module_param(seed, uint, S_IRUGO);
+MODULE_PARM_DESC(seed, "Random seed");
+
+static int mode;
+module_param(mode, int, S_IRUGO);
+MODULE_PARM_DESC(mode, "0=incremental errors, 1=overwrite test");
+
+static unsigned max_overwrite = 10000;
+
+static loff_t offset; /* Offset of the page we're using. */
+static unsigned eraseblock; /* Eraseblock number for our page. */
+
+/* We assume that the ECC can correct up to a certain number
+ * of biterrors per subpage. */
+static unsigned subsize; /* Size of subpages */
+static unsigned subcount; /* Number of subpages per page */
+
+static struct mtd_info *mtd; /* MTD device */
+
+static uint8_t *wbuffer; /* One page write / compare buffer */
+static uint8_t *rbuffer; /* One page read buffer */
+
+/* 'random' bytes from known offsets */
+static uint8_t hash(unsigned offset)
+{
+ unsigned v = offset;
+ unsigned char c;
+ v ^= 0x7f7edfd3;
+ v = v ^ (v >> 3);
+ v = v ^ (v >> 5);
+ v = v ^ (v >> 13);
+ c = v & 0xFF;
+ /* Reverse bits of result. */
+ c = (c & 0x0F) << 4 | (c & 0xF0) >> 4;
+ c = (c & 0x33) << 2 | (c & 0xCC) >> 2;
+ c = (c & 0x55) << 1 | (c & 0xAA) >> 1;
+ return c;
+}
+
+/* Writes wbuffer to page */
+static int write_page(int log)
+{
+ if (log)
+ pr_info("write_page\n");
+
+ return mtdtest_write(mtd, offset, mtd->writesize, wbuffer);
+}
+
+/* Re-writes the data area while leaving the OOB alone. */
+static int rewrite_page(int log)
+{
+ int err = 0;
+ struct mtd_oob_ops ops = { };
+
+ if (log)
+ pr_info("rewrite page\n");
+
+ ops.mode = MTD_OPS_RAW; /* No ECC */
+ ops.len = mtd->writesize;
+ ops.retlen = 0;
+ ops.ooblen = 0;
+ ops.oobretlen = 0;
+ ops.ooboffs = 0;
+ ops.datbuf = wbuffer;
+ ops.oobbuf = NULL;
+
+ err = mtd_write_oob(mtd, offset, &ops);
+ if (err || ops.retlen != mtd->writesize) {
+ pr_err("error: write_oob failed (%d)\n", err);
+ if (!err)
+ err = -EIO;
+ }
+
+ return err;
+}
+
+/* Reads page into rbuffer. Returns number of corrected bit errors (>=0)
+ * or error (<0) */
+static int read_page(int log)
+{
+ int err = 0;
+ size_t read;
+ struct mtd_ecc_stats oldstats;
+
+ if (log)
+ pr_info("read_page\n");
+
+ /* Saving last mtd stats */
+ memcpy(&oldstats, &mtd->ecc_stats, sizeof(oldstats));
+
+ err = mtd_read(mtd, offset, mtd->writesize, &read, rbuffer);
+ if (!err || err == -EUCLEAN)
+ err = mtd->ecc_stats.corrected - oldstats.corrected;
+
+ if (err < 0 || read != mtd->writesize) {
+ pr_err("error: read failed at %#llx\n", (long long)offset);
+ if (err >= 0)
+ err = -EIO;
+ }
+
+ return err;
+}
+
+/* Verifies rbuffer against random sequence */
+static int verify_page(int log)
+{
+ unsigned i, errs = 0;
+
+ if (log)
+ pr_info("verify_page\n");
+
+ for (i = 0; i < mtd->writesize; i++) {
+ if (rbuffer[i] != hash(i+seed)) {
+ pr_err("Error: page offset %u, expected %02x, got %02x\n",
+ i, hash(i+seed), rbuffer[i]);
+ errs++;
+ }
+ }
+
+ if (errs)
+ return -EIO;
+ else
+ return 0;
+}
+
+#define CBIT(v, n) ((v) & (1 << (n)))
+#define BCLR(v, n) ((v) = (v) & ~(1 << (n)))
+
+/* Finds the first '1' bit in wbuffer starting at offset 'byte'
+ * and sets it to '0'. */
+static int insert_biterror(unsigned byte)
+{
+ int bit;
+
+ while (byte < mtd->writesize) {
+ for (bit = 7; bit >= 0; bit--) {
+ if (CBIT(wbuffer[byte], bit)) {
+ BCLR(wbuffer[byte], bit);
+ pr_info("Inserted biterror @ %u/%u\n", byte, bit);
+ return 0;
+ }
+ }
+ byte++;
+ }
+ pr_err("biterror: Failed to find a '1' bit\n");
+ return -EIO;
+}
+
+/* Writes 'random' data to page and then introduces deliberate bit
+ * errors into the page, while verifying each step. */
+static int incremental_errors_test(void)
+{
+ int err = 0;
+ unsigned i;
+ unsigned errs_per_subpage = 0;
+
+ pr_info("incremental biterrors test\n");
+
+ for (i = 0; i < mtd->writesize; i++)
+ wbuffer[i] = hash(i+seed);
+
+ err = write_page(1);
+ if (err)
+ goto exit;
+
+ while (1) {
+
+ err = rewrite_page(1);
+ if (err)
+ goto exit;
+
+ err = read_page(1);
+ if (err > 0)
+ pr_info("Read reported %d corrected bit errors\n", err);
+ if (err < 0) {
+ pr_err("After %d biterrors per subpage, read reported error %d\n",
+ errs_per_subpage, err);
+ err = 0;
+ goto exit;
+ }
+
+ err = verify_page(1);
+ if (err) {
+ pr_err("ECC failure, read data is incorrect despite read success\n");
+ goto exit;
+ }
+
+ pr_info("Successfully corrected %d bit errors per subpage\n",
+ errs_per_subpage);
+
+ for (i = 0; i < subcount; i++) {
+ err = insert_biterror(i * subsize);
+ if (err < 0)
+ goto exit;
+ }
+ errs_per_subpage++;
+ }
+
+exit:
+ return err;
+}
+
+
+/* Writes 'random' data to page and then re-writes that same data repeatedly.
+ This eventually develops bit errors (bits written as '1' will slowly become
+ '0'), which are corrected as far as the ECC is capable of. */
+static int overwrite_test(void)
+{
+ int err = 0;
+ unsigned i;
+ unsigned max_corrected = 0;
+ unsigned opno = 0;
+ /* We don't expect more than this many correctable bit errors per
+ * page. */
+ #define MAXBITS 512
+ static unsigned bitstats[MAXBITS]; /* bit error histogram. */
+
+ memset(bitstats, 0, sizeof(bitstats));
+
+ pr_info("overwrite biterrors test\n");
+
+ for (i = 0; i < mtd->writesize; i++)
+ wbuffer[i] = hash(i+seed);
+
+ err = write_page(1);
+ if (err)
+ goto exit;
+
+ while (opno < max_overwrite) {
+
+ err = write_page(0);
+ if (err)
+ break;
+
+ err = read_page(0);
+ if (err >= 0) {
+ if (err >= MAXBITS) {
+ pr_info("Implausible number of bit errors corrected\n");
+ err = -EIO;
+ break;
+ }
+ bitstats[err]++;
+ if (err > max_corrected) {
+ max_corrected = err;
+ pr_info("Read reported %d corrected bit errors\n",
+ err);
+ }
+ } else { /* err < 0 */
+ pr_info("Read reported error %d\n", err);
+ err = 0;
+ break;
+ }
+
+ err = verify_page(0);
+ if (err) {
+ bitstats[max_corrected] = opno;
+ pr_info("ECC failure, read data is incorrect despite read success\n");
+ break;
+ }
+
+ err = mtdtest_relax();
+ if (err)
+ break;
+
+ opno++;
+ }
+
+ /* At this point bitstats[0] contains the number of ops with no bit
+ * errors, bitstats[1] the number of ops with 1 bit error, etc. */
+ pr_info("Bit error histogram (%d operations total):\n", opno);
+ for (i = 0; i < max_corrected; i++)
+ pr_info("Page reads with %3d corrected bit errors: %d\n",
+ i, bitstats[i]);
+
+exit:
+ return err;
+}
+
+static int __init mtd_nandbiterrs_init(void)
+{
+ int err = 0;
+
+ printk("\n");
+ printk(KERN_INFO "==================================================\n");
+ pr_info("MTD device: %d\n", dev);
+
+ mtd = get_mtd_device(NULL, dev);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ pr_err("error: cannot get MTD device\n");
+ goto exit_mtddev;
+ }
+
+ if (!mtd_type_is_nand(mtd)) {
+ pr_info("this test requires NAND flash\n");
+ err = -ENODEV;
+ goto exit_nand;
+ }
+
+ pr_info("MTD device size %llu, eraseblock=%u, page=%u, oob=%u\n",
+ (unsigned long long)mtd->size, mtd->erasesize,
+ mtd->writesize, mtd->oobsize);
+
+ subsize = mtd->writesize >> mtd->subpage_sft;
+ subcount = mtd->writesize / subsize;
+
+ pr_info("Device uses %d subpages of %d bytes\n", subcount, subsize);
+
+ offset = (loff_t)page_offset * mtd->writesize;
+ eraseblock = mtd_div_by_eb(offset, mtd);
+
+ pr_info("Using page=%u, offset=%llu, eraseblock=%u\n",
+ page_offset, offset, eraseblock);
+
+ wbuffer = kmalloc(mtd->writesize, GFP_KERNEL);
+ if (!wbuffer) {
+ err = -ENOMEM;
+ goto exit_wbuffer;
+ }
+
+ rbuffer = kmalloc(mtd->writesize, GFP_KERNEL);
+ if (!rbuffer) {
+ err = -ENOMEM;
+ goto exit_rbuffer;
+ }
+
+ err = mtdtest_erase_eraseblock(mtd, eraseblock);
+ if (err)
+ goto exit_error;
+
+ if (mode == 0)
+ err = incremental_errors_test();
+ else
+ err = overwrite_test();
+
+ if (err)
+ goto exit_error;
+
+ /* We leave the block un-erased in case of test failure. */
+ err = mtdtest_erase_eraseblock(mtd, eraseblock);
+ if (err)
+ goto exit_error;
+
+ err = -EIO;
+ pr_info("finished successfully.\n");
+ printk(KERN_INFO "==================================================\n");
+
+exit_error:
+ kfree(rbuffer);
+exit_rbuffer:
+ kfree(wbuffer);
+exit_wbuffer:
+ /* Nothing */
+exit_nand:
+ put_mtd_device(mtd);
+exit_mtddev:
+ return err;
+}
+
+static void __exit mtd_nandbiterrs_exit(void)
+{
+ return;
+}
+
+module_init(mtd_nandbiterrs_init);
+module_exit(mtd_nandbiterrs_exit);
+
+MODULE_DESCRIPTION("NAND bit error recovery test");
+MODULE_AUTHOR("Iwo Mergler");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
new file mode 100644
index 0000000000..13fed39893
--- /dev/null
+++ b/drivers/mtd/tests/oobtest.c
@@ -0,0 +1,733 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2006-2008 Nokia Corporation
+ *
+ * Test OOB read and write on MTD device.
+ *
+ * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/div64.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/err.h>
+#include <linux/mtd/mtd.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/random.h>
+
+#include "mtd_test.h"
+
+static int dev = -EINVAL;
+static int bitflip_limit;
+module_param(dev, int, S_IRUGO);
+MODULE_PARM_DESC(dev, "MTD device number to use");
+module_param(bitflip_limit, int, S_IRUGO);
+MODULE_PARM_DESC(bitflip_limit, "Max. allowed bitflips per page");
+
+static struct mtd_info *mtd;
+static unsigned char *readbuf;
+static unsigned char *writebuf;
+static unsigned char *bbt;
+
+static int ebcnt;
+static int pgcnt;
+static int errcnt;
+static int use_offset;
+static int use_len;
+static int use_len_max;
+static int vary_offset;
+static struct rnd_state rnd_state;
+
+static void do_vary_offset(void)
+{
+ use_len -= 1;
+ if (use_len < 1) {
+ use_offset += 1;
+ if (use_offset >= use_len_max)
+ use_offset = 0;
+ use_len = use_len_max - use_offset;
+ }
+}
+
+static int write_eraseblock(int ebnum)
+{
+ int i;
+ struct mtd_oob_ops ops = { };
+ int err = 0;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+
+ prandom_bytes_state(&rnd_state, writebuf, use_len_max * pgcnt);
+ for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = 0;
+ ops.retlen = 0;
+ ops.ooblen = use_len;
+ ops.oobretlen = 0;
+ ops.ooboffs = use_offset;
+ ops.datbuf = NULL;
+ ops.oobbuf = writebuf + (use_len_max * i) + use_offset;
+ err = mtd_write_oob(mtd, addr, &ops);
+ if (err || ops.oobretlen != use_len) {
+ pr_err("error: writeoob failed at %#llx\n",
+ (long long)addr);
+ pr_err("error: use_len %d, use_offset %d\n",
+ use_len, use_offset);
+ errcnt += 1;
+ return err ? err : -1;
+ }
+ if (vary_offset)
+ do_vary_offset();
+ }
+
+ return err;
+}
+
+static int write_whole_device(void)
+{
+ int err;
+ unsigned int i;
+
+ pr_info("writing OOBs of whole device\n");
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = write_eraseblock(i);
+ if (err)
+ return err;
+ if (i % 256 == 0)
+ pr_info("written up to eraseblock %u\n", i);
+
+ err = mtdtest_relax();
+ if (err)
+ return err;
+ }
+ pr_info("written %u eraseblocks\n", i);
+ return 0;
+}
+
+/*
+ * Display the address, offset and data bytes at comparison failure.
+ * Return number of bitflips encountered.
+ */
+static size_t memcmpshowoffset(loff_t addr, loff_t offset, const void *cs,
+ const void *ct, size_t count)
+{
+ const unsigned char *su1, *su2;
+ int res;
+ size_t i = 0;
+ size_t bitflips = 0;
+
+ for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--, i++) {
+ res = *su1 ^ *su2;
+ if (res) {
+ pr_info("error @addr[0x%lx:0x%lx] 0x%x -> 0x%x diff 0x%x\n",
+ (unsigned long)addr, (unsigned long)offset + i,
+ *su1, *su2, res);
+ bitflips += hweight8(res);
+ }
+ }
+
+ return bitflips;
+}
+
+#define memcmpshow(addr, cs, ct, count) memcmpshowoffset((addr), 0, (cs), (ct),\
+ (count))
+
+/*
+ * Compare with 0xff and show the address, offset and data bytes at
+ * comparison failure. Return number of bitflips encountered.
+ */
+static size_t memffshow(loff_t addr, loff_t offset, const void *cs,
+ size_t count)
+{
+ const unsigned char *su1;
+ int res;
+ size_t i = 0;
+ size_t bitflips = 0;
+
+ for (su1 = cs; 0 < count; ++su1, count--, i++) {
+ res = *su1 ^ 0xff;
+ if (res) {
+ pr_info("error @addr[0x%lx:0x%lx] 0x%x -> 0xff diff 0x%x\n",
+ (unsigned long)addr, (unsigned long)offset + i,
+ *su1, res);
+ bitflips += hweight8(res);
+ }
+ }
+
+ return bitflips;
+}
+
+static int verify_eraseblock(int ebnum)
+{
+ int i;
+ struct mtd_oob_ops ops = { };
+ int err = 0;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+ size_t bitflips;
+
+ prandom_bytes_state(&rnd_state, writebuf, use_len_max * pgcnt);
+ for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = 0;
+ ops.retlen = 0;
+ ops.ooblen = use_len;
+ ops.oobretlen = 0;
+ ops.ooboffs = use_offset;
+ ops.datbuf = NULL;
+ ops.oobbuf = readbuf;
+ err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
+ if (err || ops.oobretlen != use_len) {
+ pr_err("error: readoob failed at %#llx\n",
+ (long long)addr);
+ errcnt += 1;
+ return err ? err : -1;
+ }
+
+ bitflips = memcmpshow(addr, readbuf,
+ writebuf + (use_len_max * i) + use_offset,
+ use_len);
+ if (bitflips > bitflip_limit) {
+ pr_err("error: verify failed at %#llx\n",
+ (long long)addr);
+ errcnt += 1;
+ if (errcnt > 1000) {
+ pr_err("error: too many errors\n");
+ return -1;
+ }
+ } else if (bitflips) {
+ pr_info("ignoring error as within bitflip_limit\n");
+ }
+
+ if (use_offset != 0 || use_len < mtd->oobavail) {
+ int k;
+
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = 0;
+ ops.retlen = 0;
+ ops.ooblen = mtd->oobavail;
+ ops.oobretlen = 0;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.oobbuf = readbuf;
+ err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
+ if (err || ops.oobretlen != mtd->oobavail) {
+ pr_err("error: readoob failed at %#llx\n",
+ (long long)addr);
+ errcnt += 1;
+ return err ? err : -1;
+ }
+ bitflips = memcmpshowoffset(addr, use_offset,
+ readbuf + use_offset,
+ writebuf + (use_len_max * i) + use_offset,
+ use_len);
+
+ /* verify pre-offset area for 0xff */
+ bitflips += memffshow(addr, 0, readbuf, use_offset);
+
+ /* verify post-(use_offset + use_len) area for 0xff */
+ k = use_offset + use_len;
+ bitflips += memffshow(addr, k, readbuf + k,
+ mtd->oobavail - k);
+
+ if (bitflips > bitflip_limit) {
+ pr_err("error: verify failed at %#llx\n",
+ (long long)addr);
+ errcnt += 1;
+ if (errcnt > 1000) {
+ pr_err("error: too many errors\n");
+ return -1;
+ }
+ } else if (bitflips) {
+ pr_info("ignoring errors as within bitflip limit\n");
+ }
+ }
+ if (vary_offset)
+ do_vary_offset();
+ }
+ return err;
+}
+
+static int verify_eraseblock_in_one_go(int ebnum)
+{
+ struct mtd_oob_ops ops = { };
+ int err = 0;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+ size_t len = mtd->oobavail * pgcnt;
+ size_t oobavail = mtd->oobavail;
+ size_t bitflips;
+ int i;
+
+ prandom_bytes_state(&rnd_state, writebuf, len);
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = 0;
+ ops.retlen = 0;
+ ops.ooblen = len;
+ ops.oobretlen = 0;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.oobbuf = readbuf;
+
+ /* read entire block's OOB at one go */
+ err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
+ if (err || ops.oobretlen != len) {
+ pr_err("error: readoob failed at %#llx\n",
+ (long long)addr);
+ errcnt += 1;
+ return err ? err : -1;
+ }
+
+ /* verify one page OOB at a time for bitflip per page limit check */
+ for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
+ bitflips = memcmpshow(addr, readbuf + (i * oobavail),
+ writebuf + (i * oobavail), oobavail);
+ if (bitflips > bitflip_limit) {
+ pr_err("error: verify failed at %#llx\n",
+ (long long)addr);
+ errcnt += 1;
+ if (errcnt > 1000) {
+ pr_err("error: too many errors\n");
+ return -1;
+ }
+ } else if (bitflips) {
+ pr_info("ignoring error as within bitflip_limit\n");
+ }
+ }
+
+ return err;
+}
+
+static int verify_all_eraseblocks(void)
+{
+ int err;
+ unsigned int i;
+
+ pr_info("verifying all eraseblocks\n");
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = verify_eraseblock(i);
+ if (err)
+ return err;
+ if (i % 256 == 0)
+ pr_info("verified up to eraseblock %u\n", i);
+
+ err = mtdtest_relax();
+ if (err)
+ return err;
+ }
+ pr_info("verified %u eraseblocks\n", i);
+ return 0;
+}
+
+static int __init mtd_oobtest_init(void)
+{
+ int err = 0;
+ unsigned int i;
+ uint64_t tmp;
+ struct mtd_oob_ops ops = { };
+ loff_t addr = 0, addr0;
+
+ printk(KERN_INFO "\n");
+ printk(KERN_INFO "=================================================\n");
+
+ if (dev < 0) {
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
+ return -EINVAL;
+ }
+
+ pr_info("MTD device: %d\n", dev);
+
+ mtd = get_mtd_device(NULL, dev);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ pr_err("error: cannot get MTD device\n");
+ return err;
+ }
+
+ if (!mtd_type_is_nand(mtd)) {
+ pr_info("this test requires NAND flash\n");
+ goto out;
+ }
+
+ tmp = mtd->size;
+ do_div(tmp, mtd->erasesize);
+ ebcnt = tmp;
+ pgcnt = mtd->erasesize / mtd->writesize;
+
+ pr_info("MTD device size %llu, eraseblock size %u, "
+ "page size %u, count of eraseblocks %u, pages per "
+ "eraseblock %u, OOB size %u\n",
+ (unsigned long long)mtd->size, mtd->erasesize,
+ mtd->writesize, ebcnt, pgcnt, mtd->oobsize);
+
+ err = -ENOMEM;
+ readbuf = kmalloc(mtd->erasesize, GFP_KERNEL);
+ if (!readbuf)
+ goto out;
+ writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
+ if (!writebuf)
+ goto out;
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
+ if (!bbt)
+ goto out;
+
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ use_offset = 0;
+ use_len = mtd->oobavail;
+ use_len_max = mtd->oobavail;
+ vary_offset = 0;
+
+ /* First test: write all OOB, read it back and verify */
+ pr_info("test 1 of 5\n");
+
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ prandom_seed_state(&rnd_state, 1);
+ err = write_whole_device();
+ if (err)
+ goto out;
+
+ prandom_seed_state(&rnd_state, 1);
+ err = verify_all_eraseblocks();
+ if (err)
+ goto out;
+
+ /*
+ * Second test: write all OOB, a block at a time, read it back and
+ * verify.
+ */
+ pr_info("test 2 of 5\n");
+
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ prandom_seed_state(&rnd_state, 3);
+ err = write_whole_device();
+ if (err)
+ goto out;
+
+ /* Check all eraseblocks */
+ prandom_seed_state(&rnd_state, 3);
+ pr_info("verifying all eraseblocks\n");
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = verify_eraseblock_in_one_go(i);
+ if (err)
+ goto out;
+ if (i % 256 == 0)
+ pr_info("verified up to eraseblock %u\n", i);
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ pr_info("verified %u eraseblocks\n", i);
+
+ /*
+ * Third test: write OOB at varying offsets and lengths, read it back
+ * and verify.
+ */
+ pr_info("test 3 of 5\n");
+
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ /* Write all eraseblocks */
+ use_offset = 0;
+ use_len = mtd->oobavail;
+ use_len_max = mtd->oobavail;
+ vary_offset = 1;
+ prandom_seed_state(&rnd_state, 5);
+
+ err = write_whole_device();
+ if (err)
+ goto out;
+
+ /* Check all eraseblocks */
+ use_offset = 0;
+ use_len = mtd->oobavail;
+ use_len_max = mtd->oobavail;
+ vary_offset = 1;
+ prandom_seed_state(&rnd_state, 5);
+ err = verify_all_eraseblocks();
+ if (err)
+ goto out;
+
+ use_offset = 0;
+ use_len = mtd->oobavail;
+ use_len_max = mtd->oobavail;
+ vary_offset = 0;
+
+ /* Fourth test: try to write off end of device */
+ pr_info("test 4 of 5\n");
+
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ addr0 = 0;
+ for (i = 0; i < ebcnt && bbt[i]; ++i)
+ addr0 += mtd->erasesize;
+
+ /* Attempt to write off end of OOB */
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = 0;
+ ops.retlen = 0;
+ ops.ooblen = 1;
+ ops.oobretlen = 0;
+ ops.ooboffs = mtd->oobavail;
+ ops.datbuf = NULL;
+ ops.oobbuf = writebuf;
+ pr_info("attempting to start write past end of OOB\n");
+ pr_info("an error is expected...\n");
+ err = mtd_write_oob(mtd, addr0, &ops);
+ if (err) {
+ pr_info("error occurred as expected\n");
+ } else {
+ pr_err("error: can write past end of OOB\n");
+ errcnt += 1;
+ }
+
+ /* Attempt to read off end of OOB */
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = 0;
+ ops.retlen = 0;
+ ops.ooblen = 1;
+ ops.oobretlen = 0;
+ ops.ooboffs = mtd->oobavail;
+ ops.datbuf = NULL;
+ ops.oobbuf = readbuf;
+ pr_info("attempting to start read past end of OOB\n");
+ pr_info("an error is expected...\n");
+ err = mtd_read_oob(mtd, addr0, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
+ if (err) {
+ pr_info("error occurred as expected\n");
+ } else {
+ pr_err("error: can read past end of OOB\n");
+ errcnt += 1;
+ }
+
+ if (bbt[ebcnt - 1])
+ pr_info("skipping end of device tests because last "
+ "block is bad\n");
+ else {
+ /* Attempt to write off end of device */
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = 0;
+ ops.retlen = 0;
+ ops.ooblen = mtd->oobavail + 1;
+ ops.oobretlen = 0;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.oobbuf = writebuf;
+ pr_info("attempting to write past end of device\n");
+ pr_info("an error is expected...\n");
+ err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
+ if (err) {
+ pr_info("error occurred as expected\n");
+ } else {
+ pr_err("error: wrote past end of device\n");
+ errcnt += 1;
+ }
+
+ /* Attempt to read off end of device */
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = 0;
+ ops.retlen = 0;
+ ops.ooblen = mtd->oobavail + 1;
+ ops.oobretlen = 0;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.oobbuf = readbuf;
+ pr_info("attempting to read past end of device\n");
+ pr_info("an error is expected...\n");
+ err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
+ if (err) {
+ pr_info("error occurred as expected\n");
+ } else {
+ pr_err("error: read past end of device\n");
+ errcnt += 1;
+ }
+
+ err = mtdtest_erase_eraseblock(mtd, ebcnt - 1);
+ if (err)
+ goto out;
+
+ /* Attempt to write off end of device */
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = 0;
+ ops.retlen = 0;
+ ops.ooblen = mtd->oobavail;
+ ops.oobretlen = 0;
+ ops.ooboffs = 1;
+ ops.datbuf = NULL;
+ ops.oobbuf = writebuf;
+ pr_info("attempting to write past end of device\n");
+ pr_info("an error is expected...\n");
+ err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
+ if (err) {
+ pr_info("error occurred as expected\n");
+ } else {
+ pr_err("error: wrote past end of device\n");
+ errcnt += 1;
+ }
+
+ /* Attempt to read off end of device */
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = 0;
+ ops.retlen = 0;
+ ops.ooblen = mtd->oobavail;
+ ops.oobretlen = 0;
+ ops.ooboffs = 1;
+ ops.datbuf = NULL;
+ ops.oobbuf = readbuf;
+ pr_info("attempting to read past end of device\n");
+ pr_info("an error is expected...\n");
+ err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
+ if (err) {
+ pr_info("error occurred as expected\n");
+ } else {
+ pr_err("error: read past end of device\n");
+ errcnt += 1;
+ }
+ }
+
+ /* Fifth test: write / read across block boundaries */
+ pr_info("test 5 of 5\n");
+
+ /* Erase all eraseblocks */
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ /* Write all eraseblocks */
+ prandom_seed_state(&rnd_state, 11);
+ pr_info("writing OOBs of whole device\n");
+ for (i = 0; i < ebcnt - 1; ++i) {
+ int cnt = 2;
+ int pg;
+ size_t sz = mtd->oobavail;
+ if (bbt[i] || bbt[i + 1])
+ continue;
+ addr = (loff_t)(i + 1) * mtd->erasesize - mtd->writesize;
+ prandom_bytes_state(&rnd_state, writebuf, sz * cnt);
+ for (pg = 0; pg < cnt; ++pg) {
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = 0;
+ ops.retlen = 0;
+ ops.ooblen = sz;
+ ops.oobretlen = 0;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.oobbuf = writebuf + pg * sz;
+ err = mtd_write_oob(mtd, addr, &ops);
+ if (err)
+ goto out;
+ if (i % 256 == 0)
+ pr_info("written up to eraseblock %u\n", i);
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+
+ addr += mtd->writesize;
+ }
+ }
+ pr_info("written %u eraseblocks\n", i);
+
+ /* Check all eraseblocks */
+ prandom_seed_state(&rnd_state, 11);
+ pr_info("verifying all eraseblocks\n");
+ for (i = 0; i < ebcnt - 1; ++i) {
+ if (bbt[i] || bbt[i + 1])
+ continue;
+ prandom_bytes_state(&rnd_state, writebuf, mtd->oobavail * 2);
+ addr = (loff_t)(i + 1) * mtd->erasesize - mtd->writesize;
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = 0;
+ ops.retlen = 0;
+ ops.ooblen = mtd->oobavail * 2;
+ ops.oobretlen = 0;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.oobbuf = readbuf;
+ err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
+ if (err)
+ goto out;
+ if (memcmpshow(addr, readbuf, writebuf,
+ mtd->oobavail * 2)) {
+ pr_err("error: verify failed at %#llx\n",
+ (long long)addr);
+ errcnt += 1;
+ if (errcnt > 1000) {
+ err = -EINVAL;
+ pr_err("error: too many errors\n");
+ goto out;
+ }
+ }
+ if (i % 256 == 0)
+ pr_info("verified up to eraseblock %u\n", i);
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ pr_info("verified %u eraseblocks\n", i);
+
+ pr_info("finished with %d errors\n", errcnt);
+out:
+ kfree(bbt);
+ kfree(writebuf);
+ kfree(readbuf);
+ put_mtd_device(mtd);
+ if (err)
+ pr_info("error %d occurred\n", err);
+ printk(KERN_INFO "=================================================\n");
+ return err;
+}
+module_init(mtd_oobtest_init);
+
+static void __exit mtd_oobtest_exit(void)
+{
+ return;
+}
+module_exit(mtd_oobtest_exit);
+
+MODULE_DESCRIPTION("Out-of-band test module");
+MODULE_AUTHOR("Adrian Hunter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/pagetest.c b/drivers/mtd/tests/pagetest.c
new file mode 100644
index 0000000000..8eb40b6e6d
--- /dev/null
+++ b/drivers/mtd/tests/pagetest.c
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2006-2008 Nokia Corporation
+ *
+ * Test page read and write on MTD device.
+ *
+ * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/div64.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/err.h>
+#include <linux/mtd/mtd.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/random.h>
+
+#include "mtd_test.h"
+
+static int dev = -EINVAL;
+module_param(dev, int, S_IRUGO);
+MODULE_PARM_DESC(dev, "MTD device number to use");
+
+static struct mtd_info *mtd;
+static unsigned char *twopages;
+static unsigned char *writebuf;
+static unsigned char *boundary;
+static unsigned char *bbt;
+
+static int pgsize;
+static int bufsize;
+static int ebcnt;
+static int pgcnt;
+static int errcnt;
+static struct rnd_state rnd_state;
+
+static int write_eraseblock(int ebnum)
+{
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+
+ prandom_bytes_state(&rnd_state, writebuf, mtd->erasesize);
+ cond_resched();
+ return mtdtest_write(mtd, addr, mtd->erasesize, writebuf);
+}
+
+static int verify_eraseblock(int ebnum)
+{
+ uint32_t j;
+ int err = 0, i;
+ loff_t addr0, addrn;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+
+ addr0 = 0;
+ for (i = 0; i < ebcnt && bbt[i]; ++i)
+ addr0 += mtd->erasesize;
+
+ addrn = mtd->size;
+ for (i = 0; i < ebcnt && bbt[ebcnt - i - 1]; ++i)
+ addrn -= mtd->erasesize;
+
+ prandom_bytes_state(&rnd_state, writebuf, mtd->erasesize);
+ for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) {
+ /* Do a read to set the internal dataRAMs to different data */
+ err = mtdtest_read(mtd, addr0, bufsize, twopages);
+ if (err)
+ return err;
+ err = mtdtest_read(mtd, addrn - bufsize, bufsize, twopages);
+ if (err)
+ return err;
+ memset(twopages, 0, bufsize);
+ err = mtdtest_read(mtd, addr, bufsize, twopages);
+ if (err)
+ break;
+ if (memcmp(twopages, writebuf + (j * pgsize), bufsize)) {
+ pr_err("error: verify failed at %#llx\n",
+ (long long)addr);
+ errcnt += 1;
+ }
+ }
+ /* Check boundary between eraseblocks */
+ if (addr <= addrn - pgsize - pgsize && !bbt[ebnum + 1]) {
+ struct rnd_state old_state = rnd_state;
+
+ /* Do a read to set the internal dataRAMs to different data */
+ err = mtdtest_read(mtd, addr0, bufsize, twopages);
+ if (err)
+ return err;
+ err = mtdtest_read(mtd, addrn - bufsize, bufsize, twopages);
+ if (err)
+ return err;
+ memset(twopages, 0, bufsize);
+ err = mtdtest_read(mtd, addr, bufsize, twopages);
+ if (err)
+ return err;
+ memcpy(boundary, writebuf + mtd->erasesize - pgsize, pgsize);
+ prandom_bytes_state(&rnd_state, boundary + pgsize, pgsize);
+ if (memcmp(twopages, boundary, bufsize)) {
+ pr_err("error: verify failed at %#llx\n",
+ (long long)addr);
+ errcnt += 1;
+ }
+ rnd_state = old_state;
+ }
+ return err;
+}
+
+static int crosstest(void)
+{
+ int err = 0, i;
+ loff_t addr, addr0, addrn;
+ unsigned char *pp1, *pp2, *pp3, *pp4;
+
+ pr_info("crosstest\n");
+ pp1 = kcalloc(pgsize, 4, GFP_KERNEL);
+ if (!pp1)
+ return -ENOMEM;
+ pp2 = pp1 + pgsize;
+ pp3 = pp2 + pgsize;
+ pp4 = pp3 + pgsize;
+
+ addr0 = 0;
+ for (i = 0; i < ebcnt && bbt[i]; ++i)
+ addr0 += mtd->erasesize;
+
+ addrn = mtd->size;
+ for (i = 0; i < ebcnt && bbt[ebcnt - i - 1]; ++i)
+ addrn -= mtd->erasesize;
+
+ /* Read 2nd-to-last page to pp1 */
+ addr = addrn - pgsize - pgsize;
+ err = mtdtest_read(mtd, addr, pgsize, pp1);
+ if (err) {
+ kfree(pp1);
+ return err;
+ }
+
+ /* Read 3rd-to-last page to pp1 */
+ addr = addrn - pgsize - pgsize - pgsize;
+ err = mtdtest_read(mtd, addr, pgsize, pp1);
+ if (err) {
+ kfree(pp1);
+ return err;
+ }
+
+ /* Read first page to pp2 */
+ addr = addr0;
+ pr_info("reading page at %#llx\n", (long long)addr);
+ err = mtdtest_read(mtd, addr, pgsize, pp2);
+ if (err) {
+ kfree(pp1);
+ return err;
+ }
+
+ /* Read last page to pp3 */
+ addr = addrn - pgsize;
+ pr_info("reading page at %#llx\n", (long long)addr);
+ err = mtdtest_read(mtd, addr, pgsize, pp3);
+ if (err) {
+ kfree(pp1);
+ return err;
+ }
+
+ /* Read first page again to pp4 */
+ addr = addr0;
+ pr_info("reading page at %#llx\n", (long long)addr);
+ err = mtdtest_read(mtd, addr, pgsize, pp4);
+ if (err) {
+ kfree(pp1);
+ return err;
+ }
+
+ /* pp2 and pp4 should be the same */
+ pr_info("verifying pages read at %#llx match\n",
+ (long long)addr0);
+ if (memcmp(pp2, pp4, pgsize)) {
+ pr_err("verify failed!\n");
+ errcnt += 1;
+ } else if (!err)
+ pr_info("crosstest ok\n");
+ kfree(pp1);
+ return err;
+}
+
+static int erasecrosstest(void)
+{
+ int err = 0, i, ebnum, ebnum2;
+ loff_t addr0;
+ char *readbuf = twopages;
+
+ pr_info("erasecrosstest\n");
+
+ ebnum = 0;
+ addr0 = 0;
+ for (i = 0; i < ebcnt && bbt[i]; ++i) {
+ addr0 += mtd->erasesize;
+ ebnum += 1;
+ }
+
+ ebnum2 = ebcnt - 1;
+ while (ebnum2 && bbt[ebnum2])
+ ebnum2 -= 1;
+
+ pr_info("erasing block %d\n", ebnum);
+ err = mtdtest_erase_eraseblock(mtd, ebnum);
+ if (err)
+ return err;
+
+ pr_info("writing 1st page of block %d\n", ebnum);
+ prandom_bytes_state(&rnd_state, writebuf, pgsize);
+ strcpy(writebuf, "There is no data like this!");
+ err = mtdtest_write(mtd, addr0, pgsize, writebuf);
+ if (err)
+ return err;
+
+ pr_info("reading 1st page of block %d\n", ebnum);
+ memset(readbuf, 0, pgsize);
+ err = mtdtest_read(mtd, addr0, pgsize, readbuf);
+ if (err)
+ return err;
+
+ pr_info("verifying 1st page of block %d\n", ebnum);
+ if (memcmp(writebuf, readbuf, pgsize)) {
+ pr_err("verify failed!\n");
+ errcnt += 1;
+ return -1;
+ }
+
+ pr_info("erasing block %d\n", ebnum);
+ err = mtdtest_erase_eraseblock(mtd, ebnum);
+ if (err)
+ return err;
+
+ pr_info("writing 1st page of block %d\n", ebnum);
+ prandom_bytes_state(&rnd_state, writebuf, pgsize);
+ strcpy(writebuf, "There is no data like this!");
+ err = mtdtest_write(mtd, addr0, pgsize, writebuf);
+ if (err)
+ return err;
+
+ pr_info("erasing block %d\n", ebnum2);
+ err = mtdtest_erase_eraseblock(mtd, ebnum2);
+ if (err)
+ return err;
+
+ pr_info("reading 1st page of block %d\n", ebnum);
+ memset(readbuf, 0, pgsize);
+ err = mtdtest_read(mtd, addr0, pgsize, readbuf);
+ if (err)
+ return err;
+
+ pr_info("verifying 1st page of block %d\n", ebnum);
+ if (memcmp(writebuf, readbuf, pgsize)) {
+ pr_err("verify failed!\n");
+ errcnt += 1;
+ return -1;
+ }
+
+ if (!err)
+ pr_info("erasecrosstest ok\n");
+ return err;
+}
+
+static int erasetest(void)
+{
+ int err = 0, i, ebnum, ok = 1;
+ loff_t addr0;
+
+ pr_info("erasetest\n");
+
+ ebnum = 0;
+ addr0 = 0;
+ for (i = 0; i < ebcnt && bbt[i]; ++i) {
+ addr0 += mtd->erasesize;
+ ebnum += 1;
+ }
+
+ pr_info("erasing block %d\n", ebnum);
+ err = mtdtest_erase_eraseblock(mtd, ebnum);
+ if (err)
+ return err;
+
+ pr_info("writing 1st page of block %d\n", ebnum);
+ prandom_bytes_state(&rnd_state, writebuf, pgsize);
+ err = mtdtest_write(mtd, addr0, pgsize, writebuf);
+ if (err)
+ return err;
+
+ pr_info("erasing block %d\n", ebnum);
+ err = mtdtest_erase_eraseblock(mtd, ebnum);
+ if (err)
+ return err;
+
+ pr_info("reading 1st page of block %d\n", ebnum);
+ err = mtdtest_read(mtd, addr0, pgsize, twopages);
+ if (err)
+ return err;
+
+ pr_info("verifying 1st page of block %d is all 0xff\n",
+ ebnum);
+ for (i = 0; i < pgsize; ++i)
+ if (twopages[i] != 0xff) {
+ pr_err("verifying all 0xff failed at %d\n",
+ i);
+ errcnt += 1;
+ ok = 0;
+ break;
+ }
+
+ if (ok && !err)
+ pr_info("erasetest ok\n");
+
+ return err;
+}
+
+static int __init mtd_pagetest_init(void)
+{
+ int err = 0;
+ uint64_t tmp;
+ uint32_t i;
+
+ printk(KERN_INFO "\n");
+ printk(KERN_INFO "=================================================\n");
+
+ if (dev < 0) {
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
+ return -EINVAL;
+ }
+
+ pr_info("MTD device: %d\n", dev);
+
+ mtd = get_mtd_device(NULL, dev);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ pr_err("error: cannot get MTD device\n");
+ return err;
+ }
+
+ if (!mtd_type_is_nand(mtd)) {
+ pr_info("this test requires NAND flash\n");
+ goto out;
+ }
+
+ tmp = mtd->size;
+ do_div(tmp, mtd->erasesize);
+ ebcnt = tmp;
+ pgcnt = mtd->erasesize / mtd->writesize;
+ pgsize = mtd->writesize;
+
+ pr_info("MTD device size %llu, eraseblock size %u, "
+ "page size %u, count of eraseblocks %u, pages per "
+ "eraseblock %u, OOB size %u\n",
+ (unsigned long long)mtd->size, mtd->erasesize,
+ pgsize, ebcnt, pgcnt, mtd->oobsize);
+
+ err = -ENOMEM;
+ bufsize = pgsize * 2;
+ writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
+ if (!writebuf)
+ goto out;
+ twopages = kmalloc(bufsize, GFP_KERNEL);
+ if (!twopages)
+ goto out;
+ boundary = kmalloc(bufsize, GFP_KERNEL);
+ if (!boundary)
+ goto out;
+
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
+ if (!bbt)
+ goto out;
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ /* Erase all eraseblocks */
+ pr_info("erasing whole device\n");
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+ pr_info("erased %u eraseblocks\n", ebcnt);
+
+ /* Write all eraseblocks */
+ prandom_seed_state(&rnd_state, 1);
+ pr_info("writing whole device\n");
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = write_eraseblock(i);
+ if (err)
+ goto out;
+ if (i % 256 == 0)
+ pr_info("written up to eraseblock %u\n", i);
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ pr_info("written %u eraseblocks\n", i);
+
+ /* Check all eraseblocks */
+ prandom_seed_state(&rnd_state, 1);
+ pr_info("verifying all eraseblocks\n");
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = verify_eraseblock(i);
+ if (err)
+ goto out;
+ if (i % 256 == 0)
+ pr_info("verified up to eraseblock %u\n", i);
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ pr_info("verified %u eraseblocks\n", i);
+
+ err = crosstest();
+ if (err)
+ goto out;
+
+ if (ebcnt > 1) {
+ err = erasecrosstest();
+ if (err)
+ goto out;
+ } else {
+ pr_info("skipping erasecrosstest, 2 erase blocks needed\n");
+ }
+
+ err = erasetest();
+ if (err)
+ goto out;
+
+ pr_info("finished with %d errors\n", errcnt);
+out:
+
+ kfree(bbt);
+ kfree(boundary);
+ kfree(twopages);
+ kfree(writebuf);
+ put_mtd_device(mtd);
+ if (err)
+ pr_info("error %d occurred\n", err);
+ printk(KERN_INFO "=================================================\n");
+ return err;
+}
+module_init(mtd_pagetest_init);
+
+static void __exit mtd_pagetest_exit(void)
+{
+ return;
+}
+module_exit(mtd_pagetest_exit);
+
+MODULE_DESCRIPTION("NAND page test");
+MODULE_AUTHOR("Adrian Hunter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/readtest.c b/drivers/mtd/tests/readtest.c
new file mode 100644
index 0000000000..99670ef91f
--- /dev/null
+++ b/drivers/mtd/tests/readtest.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2006-2008 Nokia Corporation
+ *
+ * Check MTD device read.
+ *
+ * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/err.h>
+#include <linux/mtd/mtd.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include "mtd_test.h"
+
+static int dev = -EINVAL;
+module_param(dev, int, S_IRUGO);
+MODULE_PARM_DESC(dev, "MTD device number to use");
+
+static struct mtd_info *mtd;
+static unsigned char *iobuf;
+static unsigned char *iobuf1;
+static unsigned char *bbt;
+
+static int pgsize;
+static int ebcnt;
+static int pgcnt;
+
+static int read_eraseblock_by_page(int ebnum)
+{
+ int i, ret, err = 0;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+ void *buf = iobuf;
+ void *oobbuf = iobuf1;
+
+ for (i = 0; i < pgcnt; i++) {
+ memset(buf, 0 , pgsize);
+ ret = mtdtest_read(mtd, addr, pgsize, buf);
+ if (ret) {
+ if (!err)
+ err = ret;
+ }
+ if (mtd->oobsize) {
+ struct mtd_oob_ops ops = { };
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.len = 0;
+ ops.retlen = 0;
+ ops.ooblen = mtd->oobsize;
+ ops.oobretlen = 0;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.oobbuf = oobbuf;
+ ret = mtd_read_oob(mtd, addr, &ops);
+ if ((ret && !mtd_is_bitflip(ret)) ||
+ ops.oobretlen != mtd->oobsize) {
+ pr_err("error: read oob failed at "
+ "%#llx\n", (long long)addr);
+ if (!err)
+ err = ret;
+ if (!err)
+ err = -EINVAL;
+ }
+ oobbuf += mtd->oobsize;
+ }
+ addr += pgsize;
+ buf += pgsize;
+ }
+
+ return err;
+}
+
+static void dump_eraseblock(int ebnum)
+{
+ int i, j, n;
+ char line[128];
+ int pg, oob;
+
+ pr_info("dumping eraseblock %d\n", ebnum);
+ n = mtd->erasesize;
+ for (i = 0; i < n;) {
+ char *p = line;
+
+ p += sprintf(p, "%05x: ", i);
+ for (j = 0; j < 32 && i < n; j++, i++)
+ p += sprintf(p, "%02x", (unsigned int)iobuf[i]);
+ printk(KERN_CRIT "%s\n", line);
+ cond_resched();
+ }
+ if (!mtd->oobsize)
+ return;
+ pr_info("dumping oob from eraseblock %d\n", ebnum);
+ n = mtd->oobsize;
+ for (pg = 0, i = 0; pg < pgcnt; pg++)
+ for (oob = 0; oob < n;) {
+ char *p = line;
+
+ p += sprintf(p, "%05x: ", i);
+ for (j = 0; j < 32 && oob < n; j++, oob++, i++)
+ p += sprintf(p, "%02x",
+ (unsigned int)iobuf1[i]);
+ printk(KERN_CRIT "%s\n", line);
+ cond_resched();
+ }
+}
+
+static int __init mtd_readtest_init(void)
+{
+ uint64_t tmp;
+ int err, i;
+
+ printk(KERN_INFO "\n");
+ printk(KERN_INFO "=================================================\n");
+
+ if (dev < 0) {
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ return -EINVAL;
+ }
+
+ pr_info("MTD device: %d\n", dev);
+
+ mtd = get_mtd_device(NULL, dev);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ pr_err("error: Cannot get MTD device\n");
+ return err;
+ }
+
+ if (mtd->writesize == 1) {
+ pr_info("not NAND flash, assume page size is 512 "
+ "bytes.\n");
+ pgsize = 512;
+ } else
+ pgsize = mtd->writesize;
+
+ tmp = mtd->size;
+ do_div(tmp, mtd->erasesize);
+ ebcnt = tmp;
+ pgcnt = mtd->erasesize / pgsize;
+
+ pr_info("MTD device size %llu, eraseblock size %u, "
+ "page size %u, count of eraseblocks %u, pages per "
+ "eraseblock %u, OOB size %u\n",
+ (unsigned long long)mtd->size, mtd->erasesize,
+ pgsize, ebcnt, pgcnt, mtd->oobsize);
+
+ err = -ENOMEM;
+ iobuf = kmalloc(mtd->erasesize, GFP_KERNEL);
+ if (!iobuf)
+ goto out;
+ iobuf1 = kmalloc(mtd->erasesize, GFP_KERNEL);
+ if (!iobuf1)
+ goto out;
+
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
+ if (!bbt)
+ goto out;
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ /* Read all eraseblocks 1 page at a time */
+ pr_info("testing page read\n");
+ for (i = 0; i < ebcnt; ++i) {
+ int ret;
+
+ if (bbt[i])
+ continue;
+ ret = read_eraseblock_by_page(i);
+ if (ret) {
+ dump_eraseblock(i);
+ if (!err)
+ err = ret;
+ }
+
+ ret = mtdtest_relax();
+ if (ret) {
+ err = ret;
+ goto out;
+ }
+ }
+
+ if (err)
+ pr_info("finished with errors\n");
+ else
+ pr_info("finished\n");
+
+out:
+
+ kfree(iobuf);
+ kfree(iobuf1);
+ kfree(bbt);
+ put_mtd_device(mtd);
+ if (err)
+ pr_info("error %d occurred\n", err);
+ printk(KERN_INFO "=================================================\n");
+ return err;
+}
+module_init(mtd_readtest_init);
+
+static void __exit mtd_readtest_exit(void)
+{
+ return;
+}
+module_exit(mtd_readtest_exit);
+
+MODULE_DESCRIPTION("Read test module");
+MODULE_AUTHOR("Adrian Hunter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/speedtest.c b/drivers/mtd/tests/speedtest.c
new file mode 100644
index 0000000000..075bce32ca
--- /dev/null
+++ b/drivers/mtd/tests/speedtest.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2007 Nokia Corporation
+ *
+ * Test read and write speed of a MTD device.
+ *
+ * Author: Adrian Hunter <adrian.hunter@nokia.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/ktime.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/err.h>
+#include <linux/mtd/mtd.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/random.h>
+
+#include "mtd_test.h"
+
+static int dev = -EINVAL;
+module_param(dev, int, S_IRUGO);
+MODULE_PARM_DESC(dev, "MTD device number to use");
+
+static int count;
+module_param(count, int, S_IRUGO);
+MODULE_PARM_DESC(count, "Maximum number of eraseblocks to use "
+ "(0 means use all)");
+
+static struct mtd_info *mtd;
+static unsigned char *iobuf;
+static unsigned char *bbt;
+
+static int pgsize;
+static int ebcnt;
+static int pgcnt;
+static int goodebcnt;
+static ktime_t start, finish;
+
+static int multiblock_erase(int ebnum, int blocks)
+{
+ int err;
+ struct erase_info ei;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+
+ memset(&ei, 0, sizeof(struct erase_info));
+ ei.addr = addr;
+ ei.len = mtd->erasesize * blocks;
+
+ err = mtd_erase(mtd, &ei);
+ if (err) {
+ pr_err("error %d while erasing EB %d, blocks %d\n",
+ err, ebnum, blocks);
+ return err;
+ }
+
+ return 0;
+}
+
+static int write_eraseblock(int ebnum)
+{
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+
+ return mtdtest_write(mtd, addr, mtd->erasesize, iobuf);
+}
+
+static int write_eraseblock_by_page(int ebnum)
+{
+ int i, err = 0;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+ void *buf = iobuf;
+
+ for (i = 0; i < pgcnt; i++) {
+ err = mtdtest_write(mtd, addr, pgsize, buf);
+ if (err)
+ break;
+ addr += pgsize;
+ buf += pgsize;
+ }
+
+ return err;
+}
+
+static int write_eraseblock_by_2pages(int ebnum)
+{
+ size_t sz = pgsize * 2;
+ int i, n = pgcnt / 2, err = 0;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+ void *buf = iobuf;
+
+ for (i = 0; i < n; i++) {
+ err = mtdtest_write(mtd, addr, sz, buf);
+ if (err)
+ return err;
+ addr += sz;
+ buf += sz;
+ }
+ if (pgcnt % 2)
+ err = mtdtest_write(mtd, addr, pgsize, buf);
+
+ return err;
+}
+
+static int read_eraseblock(int ebnum)
+{
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+
+ return mtdtest_read(mtd, addr, mtd->erasesize, iobuf);
+}
+
+static int read_eraseblock_by_page(int ebnum)
+{
+ int i, err = 0;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+ void *buf = iobuf;
+
+ for (i = 0; i < pgcnt; i++) {
+ err = mtdtest_read(mtd, addr, pgsize, buf);
+ if (err)
+ break;
+ addr += pgsize;
+ buf += pgsize;
+ }
+
+ return err;
+}
+
+static int read_eraseblock_by_2pages(int ebnum)
+{
+ size_t sz = pgsize * 2;
+ int i, n = pgcnt / 2, err = 0;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+ void *buf = iobuf;
+
+ for (i = 0; i < n; i++) {
+ err = mtdtest_read(mtd, addr, sz, buf);
+ if (err)
+ return err;
+ addr += sz;
+ buf += sz;
+ }
+ if (pgcnt % 2)
+ err = mtdtest_read(mtd, addr, pgsize, buf);
+
+ return err;
+}
+
+static inline void start_timing(void)
+{
+ start = ktime_get();
+}
+
+static inline void stop_timing(void)
+{
+ finish = ktime_get();
+}
+
+static long calc_speed(void)
+{
+ uint64_t k, us;
+
+ us = ktime_us_delta(finish, start);
+ if (us == 0)
+ return 0;
+ k = (uint64_t)goodebcnt * (mtd->erasesize / 1024) * 1000000;
+ do_div(k, us);
+ return k;
+}
+
+static int __init mtd_speedtest_init(void)
+{
+ int err, i, blocks, j, k;
+ long speed;
+ uint64_t tmp;
+
+ printk(KERN_INFO "\n");
+ printk(KERN_INFO "=================================================\n");
+
+ if (dev < 0) {
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
+ return -EINVAL;
+ }
+
+ if (count)
+ pr_info("MTD device: %d count: %d\n", dev, count);
+ else
+ pr_info("MTD device: %d\n", dev);
+
+ mtd = get_mtd_device(NULL, dev);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ pr_err("error: cannot get MTD device\n");
+ return err;
+ }
+
+ if (mtd->writesize == 1) {
+ pr_info("not NAND flash, assume page size is 512 "
+ "bytes.\n");
+ pgsize = 512;
+ } else
+ pgsize = mtd->writesize;
+
+ tmp = mtd->size;
+ do_div(tmp, mtd->erasesize);
+ ebcnt = tmp;
+ pgcnt = mtd->erasesize / pgsize;
+
+ pr_info("MTD device size %llu, eraseblock size %u, "
+ "page size %u, count of eraseblocks %u, pages per "
+ "eraseblock %u, OOB size %u\n",
+ (unsigned long long)mtd->size, mtd->erasesize,
+ pgsize, ebcnt, pgcnt, mtd->oobsize);
+
+ if (count > 0 && count < ebcnt)
+ ebcnt = count;
+
+ err = -ENOMEM;
+ iobuf = kmalloc(mtd->erasesize, GFP_KERNEL);
+ if (!iobuf)
+ goto out;
+
+ get_random_bytes(iobuf, mtd->erasesize);
+
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
+ if (!bbt)
+ goto out;
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+ for (i = 0; i < ebcnt; i++) {
+ if (!bbt[i])
+ goodebcnt++;
+ }
+
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ /* Write all eraseblocks, 1 eraseblock at a time */
+ pr_info("testing eraseblock write speed\n");
+ start_timing();
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = write_eraseblock(i);
+ if (err)
+ goto out;
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ stop_timing();
+ speed = calc_speed();
+ pr_info("eraseblock write speed is %ld KiB/s\n", speed);
+
+ /* Read all eraseblocks, 1 eraseblock at a time */
+ pr_info("testing eraseblock read speed\n");
+ start_timing();
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = read_eraseblock(i);
+ if (err)
+ goto out;
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ stop_timing();
+ speed = calc_speed();
+ pr_info("eraseblock read speed is %ld KiB/s\n", speed);
+
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ /* Write all eraseblocks, 1 page at a time */
+ pr_info("testing page write speed\n");
+ start_timing();
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = write_eraseblock_by_page(i);
+ if (err)
+ goto out;
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ stop_timing();
+ speed = calc_speed();
+ pr_info("page write speed is %ld KiB/s\n", speed);
+
+ /* Read all eraseblocks, 1 page at a time */
+ pr_info("testing page read speed\n");
+ start_timing();
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = read_eraseblock_by_page(i);
+ if (err)
+ goto out;
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ stop_timing();
+ speed = calc_speed();
+ pr_info("page read speed is %ld KiB/s\n", speed);
+
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ /* Write all eraseblocks, 2 pages at a time */
+ pr_info("testing 2 page write speed\n");
+ start_timing();
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = write_eraseblock_by_2pages(i);
+ if (err)
+ goto out;
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ stop_timing();
+ speed = calc_speed();
+ pr_info("2 page write speed is %ld KiB/s\n", speed);
+
+ /* Read all eraseblocks, 2 pages at a time */
+ pr_info("testing 2 page read speed\n");
+ start_timing();
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = read_eraseblock_by_2pages(i);
+ if (err)
+ goto out;
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ stop_timing();
+ speed = calc_speed();
+ pr_info("2 page read speed is %ld KiB/s\n", speed);
+
+ /* Erase all eraseblocks */
+ pr_info("Testing erase speed\n");
+ start_timing();
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+ stop_timing();
+ speed = calc_speed();
+ pr_info("erase speed is %ld KiB/s\n", speed);
+
+ /* Multi-block erase all eraseblocks */
+ for (k = 1; k < 7; k++) {
+ blocks = 1 << k;
+ pr_info("Testing %dx multi-block erase speed\n",
+ blocks);
+ start_timing();
+ for (i = 0; i < ebcnt; ) {
+ for (j = 0; j < blocks && (i + j) < ebcnt; j++)
+ if (bbt[i + j])
+ break;
+ if (j < 1) {
+ i++;
+ continue;
+ }
+ err = multiblock_erase(i, j);
+ if (err)
+ goto out;
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+
+ i += j;
+ }
+ stop_timing();
+ speed = calc_speed();
+ pr_info("%dx multi-block erase speed is %ld KiB/s\n",
+ blocks, speed);
+ }
+ pr_info("finished\n");
+out:
+ kfree(iobuf);
+ kfree(bbt);
+ put_mtd_device(mtd);
+ if (err)
+ pr_info("error %d occurred\n", err);
+ printk(KERN_INFO "=================================================\n");
+ return err;
+}
+module_init(mtd_speedtest_init);
+
+static void __exit mtd_speedtest_exit(void)
+{
+ return;
+}
+module_exit(mtd_speedtest_exit);
+
+MODULE_DESCRIPTION("Speed test module");
+MODULE_AUTHOR("Adrian Hunter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/stresstest.c b/drivers/mtd/tests/stresstest.c
new file mode 100644
index 0000000000..8062098930
--- /dev/null
+++ b/drivers/mtd/tests/stresstest.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2006-2008 Nokia Corporation
+ *
+ * Test random reads, writes and erases on MTD device.
+ *
+ * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/err.h>
+#include <linux/mtd/mtd.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/random.h>
+
+#include "mtd_test.h"
+
+static int dev = -EINVAL;
+module_param(dev, int, S_IRUGO);
+MODULE_PARM_DESC(dev, "MTD device number to use");
+
+static int count = 10000;
+module_param(count, int, S_IRUGO);
+MODULE_PARM_DESC(count, "Number of operations to do (default is 10000)");
+
+static struct mtd_info *mtd;
+static unsigned char *writebuf;
+static unsigned char *readbuf;
+static unsigned char *bbt;
+static int *offsets;
+
+static int pgsize;
+static int bufsize;
+static int ebcnt;
+static int pgcnt;
+
+static int rand_eb(void)
+{
+ unsigned int eb;
+
+again:
+ /* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */
+ eb = get_random_u32_below(ebcnt - 1);
+ if (bbt[eb])
+ goto again;
+ return eb;
+}
+
+static int rand_offs(void)
+{
+ return get_random_u32_below(bufsize);
+}
+
+static int rand_len(int offs)
+{
+ return get_random_u32_below(bufsize - offs);
+}
+
+static int do_read(void)
+{
+ int eb = rand_eb();
+ int offs = rand_offs();
+ int len = rand_len(offs);
+ loff_t addr;
+
+ if (bbt[eb + 1]) {
+ if (offs >= mtd->erasesize)
+ offs -= mtd->erasesize;
+ if (offs + len > mtd->erasesize)
+ len = mtd->erasesize - offs;
+ }
+ addr = (loff_t)eb * mtd->erasesize + offs;
+ return mtdtest_read(mtd, addr, len, readbuf);
+}
+
+static int do_write(void)
+{
+ int eb = rand_eb(), offs, err, len;
+ loff_t addr;
+
+ offs = offsets[eb];
+ if (offs >= mtd->erasesize) {
+ err = mtdtest_erase_eraseblock(mtd, eb);
+ if (err)
+ return err;
+ offs = offsets[eb] = 0;
+ }
+ len = rand_len(offs);
+ len = ((len + pgsize - 1) / pgsize) * pgsize;
+ if (offs + len > mtd->erasesize) {
+ if (bbt[eb + 1])
+ len = mtd->erasesize - offs;
+ else {
+ err = mtdtest_erase_eraseblock(mtd, eb + 1);
+ if (err)
+ return err;
+ offsets[eb + 1] = 0;
+ }
+ }
+ addr = (loff_t)eb * mtd->erasesize + offs;
+ err = mtdtest_write(mtd, addr, len, writebuf);
+ if (unlikely(err))
+ return err;
+ offs += len;
+ while (offs > mtd->erasesize) {
+ offsets[eb++] = mtd->erasesize;
+ offs -= mtd->erasesize;
+ }
+ offsets[eb] = offs;
+ return 0;
+}
+
+static int do_operation(void)
+{
+ if (get_random_u32_below(2))
+ return do_read();
+ else
+ return do_write();
+}
+
+static int __init mtd_stresstest_init(void)
+{
+ int err;
+ int i, op;
+ uint64_t tmp;
+
+ printk(KERN_INFO "\n");
+ printk(KERN_INFO "=================================================\n");
+
+ if (dev < 0) {
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
+ return -EINVAL;
+ }
+
+ pr_info("MTD device: %d\n", dev);
+
+ mtd = get_mtd_device(NULL, dev);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ pr_err("error: cannot get MTD device\n");
+ return err;
+ }
+
+ if (mtd->writesize == 1) {
+ pr_info("not NAND flash, assume page size is 512 "
+ "bytes.\n");
+ pgsize = 512;
+ } else
+ pgsize = mtd->writesize;
+
+ tmp = mtd->size;
+ do_div(tmp, mtd->erasesize);
+ ebcnt = tmp;
+ pgcnt = mtd->erasesize / pgsize;
+
+ pr_info("MTD device size %llu, eraseblock size %u, "
+ "page size %u, count of eraseblocks %u, pages per "
+ "eraseblock %u, OOB size %u\n",
+ (unsigned long long)mtd->size, mtd->erasesize,
+ pgsize, ebcnt, pgcnt, mtd->oobsize);
+
+ if (ebcnt < 2) {
+ pr_err("error: need at least 2 eraseblocks\n");
+ err = -ENOSPC;
+ goto out_put_mtd;
+ }
+
+ /* Read or write up 2 eraseblocks at a time */
+ bufsize = mtd->erasesize * 2;
+
+ err = -ENOMEM;
+ readbuf = vmalloc(bufsize);
+ writebuf = vmalloc(bufsize);
+ offsets = kmalloc_array(ebcnt, sizeof(int), GFP_KERNEL);
+ if (!readbuf || !writebuf || !offsets)
+ goto out;
+ for (i = 0; i < ebcnt; i++)
+ offsets[i] = mtd->erasesize;
+ get_random_bytes(writebuf, bufsize);
+
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
+ if (!bbt)
+ goto out;
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ /* Do operations */
+ pr_info("doing operations\n");
+ for (op = 0; op < count; op++) {
+ if ((op & 1023) == 0)
+ pr_info("%d operations done\n", op);
+ err = do_operation();
+ if (err)
+ goto out;
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ pr_info("finished, %d operations done\n", op);
+
+out:
+ kfree(offsets);
+ kfree(bbt);
+ vfree(writebuf);
+ vfree(readbuf);
+out_put_mtd:
+ put_mtd_device(mtd);
+ if (err)
+ pr_info("error %d occurred\n", err);
+ printk(KERN_INFO "=================================================\n");
+ return err;
+}
+module_init(mtd_stresstest_init);
+
+static void __exit mtd_stresstest_exit(void)
+{
+ return;
+}
+module_exit(mtd_stresstest_exit);
+
+MODULE_DESCRIPTION("Stress test module");
+MODULE_AUTHOR("Adrian Hunter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/subpagetest.c b/drivers/mtd/tests/subpagetest.c
new file mode 100644
index 0000000000..05250a0801
--- /dev/null
+++ b/drivers/mtd/tests/subpagetest.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * Test sub-page read and write on MTD device.
+ * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/err.h>
+#include <linux/mtd/mtd.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/random.h>
+
+#include "mtd_test.h"
+
+static int dev = -EINVAL;
+module_param(dev, int, S_IRUGO);
+MODULE_PARM_DESC(dev, "MTD device number to use");
+
+static struct mtd_info *mtd;
+static unsigned char *writebuf;
+static unsigned char *readbuf;
+static unsigned char *bbt;
+
+static int subpgsize;
+static int bufsize;
+static int ebcnt;
+static int pgcnt;
+static int errcnt;
+static struct rnd_state rnd_state;
+
+static inline void clear_data(unsigned char *buf, size_t len)
+{
+ memset(buf, 0, len);
+}
+
+static int write_eraseblock(int ebnum)
+{
+ size_t written;
+ int err = 0;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize);
+ err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
+ if (unlikely(err || written != subpgsize)) {
+ pr_err("error: write failed at %#llx\n",
+ (long long)addr);
+ if (written != subpgsize) {
+ pr_err(" write size: %#x\n", subpgsize);
+ pr_err(" written: %#zx\n", written);
+ }
+ return err ? err : -1;
+ }
+
+ addr += subpgsize;
+
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize);
+ err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
+ if (unlikely(err || written != subpgsize)) {
+ pr_err("error: write failed at %#llx\n",
+ (long long)addr);
+ if (written != subpgsize) {
+ pr_err(" write size: %#x\n", subpgsize);
+ pr_err(" written: %#zx\n", written);
+ }
+ return err ? err : -1;
+ }
+
+ return err;
+}
+
+static int write_eraseblock2(int ebnum)
+{
+ size_t written;
+ int err = 0, k;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+
+ for (k = 1; k < 33; ++k) {
+ if (addr + (subpgsize * k) > (loff_t)(ebnum + 1) * mtd->erasesize)
+ break;
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize * k);
+ err = mtd_write(mtd, addr, subpgsize * k, &written, writebuf);
+ if (unlikely(err || written != subpgsize * k)) {
+ pr_err("error: write failed at %#llx\n",
+ (long long)addr);
+ if (written != subpgsize * k) {
+ pr_err(" write size: %#x\n",
+ subpgsize * k);
+ pr_err(" written: %#08zx\n",
+ written);
+ }
+ return err ? err : -1;
+ }
+ addr += subpgsize * k;
+ }
+
+ return err;
+}
+
+static void print_subpage(unsigned char *p)
+{
+ int i, j;
+
+ for (i = 0; i < subpgsize; ) {
+ for (j = 0; i < subpgsize && j < 32; ++i, ++j)
+ printk("%02x", *p++);
+ printk("\n");
+ }
+}
+
+static int verify_eraseblock(int ebnum)
+{
+ size_t read;
+ int err = 0;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize);
+ clear_data(readbuf, subpgsize);
+ err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
+ if (unlikely(err || read != subpgsize)) {
+ if (mtd_is_bitflip(err) && read == subpgsize) {
+ pr_info("ECC correction at %#llx\n",
+ (long long)addr);
+ err = 0;
+ } else {
+ pr_err("error: read failed at %#llx\n",
+ (long long)addr);
+ return err ? err : -1;
+ }
+ }
+ if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
+ pr_err("error: verify failed at %#llx\n",
+ (long long)addr);
+ pr_info("------------- written----------------\n");
+ print_subpage(writebuf);
+ pr_info("------------- read ------------------\n");
+ print_subpage(readbuf);
+ pr_info("-------------------------------------\n");
+ errcnt += 1;
+ }
+
+ addr += subpgsize;
+
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize);
+ clear_data(readbuf, subpgsize);
+ err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
+ if (unlikely(err || read != subpgsize)) {
+ if (mtd_is_bitflip(err) && read == subpgsize) {
+ pr_info("ECC correction at %#llx\n",
+ (long long)addr);
+ err = 0;
+ } else {
+ pr_err("error: read failed at %#llx\n",
+ (long long)addr);
+ return err ? err : -1;
+ }
+ }
+ if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
+ pr_info("error: verify failed at %#llx\n",
+ (long long)addr);
+ pr_info("------------- written----------------\n");
+ print_subpage(writebuf);
+ pr_info("------------- read ------------------\n");
+ print_subpage(readbuf);
+ pr_info("-------------------------------------\n");
+ errcnt += 1;
+ }
+
+ return err;
+}
+
+static int verify_eraseblock2(int ebnum)
+{
+ size_t read;
+ int err = 0, k;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+
+ for (k = 1; k < 33; ++k) {
+ if (addr + (subpgsize * k) > (loff_t)(ebnum + 1) * mtd->erasesize)
+ break;
+ prandom_bytes_state(&rnd_state, writebuf, subpgsize * k);
+ clear_data(readbuf, subpgsize * k);
+ err = mtd_read(mtd, addr, subpgsize * k, &read, readbuf);
+ if (unlikely(err || read != subpgsize * k)) {
+ if (mtd_is_bitflip(err) && read == subpgsize * k) {
+ pr_info("ECC correction at %#llx\n",
+ (long long)addr);
+ err = 0;
+ } else {
+ pr_err("error: read failed at "
+ "%#llx\n", (long long)addr);
+ return err ? err : -1;
+ }
+ }
+ if (unlikely(memcmp(readbuf, writebuf, subpgsize * k))) {
+ pr_err("error: verify failed at %#llx\n",
+ (long long)addr);
+ errcnt += 1;
+ }
+ addr += subpgsize * k;
+ }
+
+ return err;
+}
+
+static int verify_eraseblock_ff(int ebnum)
+{
+ uint32_t j;
+ size_t read;
+ int err = 0;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+
+ memset(writebuf, 0xff, subpgsize);
+ for (j = 0; j < mtd->erasesize / subpgsize; ++j) {
+ clear_data(readbuf, subpgsize);
+ err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
+ if (unlikely(err || read != subpgsize)) {
+ if (mtd_is_bitflip(err) && read == subpgsize) {
+ pr_info("ECC correction at %#llx\n",
+ (long long)addr);
+ err = 0;
+ } else {
+ pr_err("error: read failed at "
+ "%#llx\n", (long long)addr);
+ return err ? err : -1;
+ }
+ }
+ if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
+ pr_err("error: verify 0xff failed at "
+ "%#llx\n", (long long)addr);
+ errcnt += 1;
+ }
+ addr += subpgsize;
+ }
+
+ return err;
+}
+
+static int verify_all_eraseblocks_ff(void)
+{
+ int err;
+ unsigned int i;
+
+ pr_info("verifying all eraseblocks for 0xff\n");
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = verify_eraseblock_ff(i);
+ if (err)
+ return err;
+ if (i % 256 == 0)
+ pr_info("verified up to eraseblock %u\n", i);
+
+ err = mtdtest_relax();
+ if (err)
+ return err;
+ }
+ pr_info("verified %u eraseblocks\n", i);
+ return 0;
+}
+
+static int __init mtd_subpagetest_init(void)
+{
+ int err = 0;
+ uint32_t i;
+ uint64_t tmp;
+
+ printk(KERN_INFO "\n");
+ printk(KERN_INFO "=================================================\n");
+
+ if (dev < 0) {
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
+ return -EINVAL;
+ }
+
+ pr_info("MTD device: %d\n", dev);
+
+ mtd = get_mtd_device(NULL, dev);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ pr_err("error: cannot get MTD device\n");
+ return err;
+ }
+
+ if (!mtd_type_is_nand(mtd)) {
+ pr_info("this test requires NAND flash\n");
+ goto out;
+ }
+
+ subpgsize = mtd->writesize >> mtd->subpage_sft;
+ tmp = mtd->size;
+ do_div(tmp, mtd->erasesize);
+ ebcnt = tmp;
+ pgcnt = mtd->erasesize / mtd->writesize;
+
+ pr_info("MTD device size %llu, eraseblock size %u, "
+ "page size %u, subpage size %u, count of eraseblocks %u, "
+ "pages per eraseblock %u, OOB size %u\n",
+ (unsigned long long)mtd->size, mtd->erasesize,
+ mtd->writesize, subpgsize, ebcnt, pgcnt, mtd->oobsize);
+
+ err = -ENOMEM;
+ bufsize = subpgsize * 32;
+ writebuf = kmalloc(bufsize, GFP_KERNEL);
+ if (!writebuf)
+ goto out;
+ readbuf = kmalloc(bufsize, GFP_KERNEL);
+ if (!readbuf)
+ goto out;
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
+ if (!bbt)
+ goto out;
+
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ pr_info("writing whole device\n");
+ prandom_seed_state(&rnd_state, 1);
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = write_eraseblock(i);
+ if (unlikely(err))
+ goto out;
+ if (i % 256 == 0)
+ pr_info("written up to eraseblock %u\n", i);
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ pr_info("written %u eraseblocks\n", i);
+
+ prandom_seed_state(&rnd_state, 1);
+ pr_info("verifying all eraseblocks\n");
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = verify_eraseblock(i);
+ if (unlikely(err))
+ goto out;
+ if (i % 256 == 0)
+ pr_info("verified up to eraseblock %u\n", i);
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ pr_info("verified %u eraseblocks\n", i);
+
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ err = verify_all_eraseblocks_ff();
+ if (err)
+ goto out;
+
+ /* Write all eraseblocks */
+ prandom_seed_state(&rnd_state, 3);
+ pr_info("writing whole device\n");
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = write_eraseblock2(i);
+ if (unlikely(err))
+ goto out;
+ if (i % 256 == 0)
+ pr_info("written up to eraseblock %u\n", i);
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ pr_info("written %u eraseblocks\n", i);
+
+ /* Check all eraseblocks */
+ prandom_seed_state(&rnd_state, 3);
+ pr_info("verifying all eraseblocks\n");
+ for (i = 0; i < ebcnt; ++i) {
+ if (bbt[i])
+ continue;
+ err = verify_eraseblock2(i);
+ if (unlikely(err))
+ goto out;
+ if (i % 256 == 0)
+ pr_info("verified up to eraseblock %u\n", i);
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ pr_info("verified %u eraseblocks\n", i);
+
+ err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+ if (err)
+ goto out;
+
+ err = verify_all_eraseblocks_ff();
+ if (err)
+ goto out;
+
+ pr_info("finished with %d errors\n", errcnt);
+
+out:
+ kfree(bbt);
+ kfree(readbuf);
+ kfree(writebuf);
+ put_mtd_device(mtd);
+ if (err)
+ pr_info("error %d occurred\n", err);
+ printk(KERN_INFO "=================================================\n");
+ return err;
+}
+module_init(mtd_subpagetest_init);
+
+static void __exit mtd_subpagetest_exit(void)
+{
+ return;
+}
+module_exit(mtd_subpagetest_exit);
+
+MODULE_DESCRIPTION("Subpage test module");
+MODULE_AUTHOR("Adrian Hunter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/torturetest.c b/drivers/mtd/tests/torturetest.c
new file mode 100644
index 0000000000..841689b4d8
--- /dev/null
+++ b/drivers/mtd/tests/torturetest.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2006-2008 Artem Bityutskiy
+ * Copyright (C) 2006-2008 Jarkko Lavinen
+ * Copyright (C) 2006-2008 Adrian Hunter
+ *
+ * Authors: Artem Bityutskiy, Jarkko Lavinen, Adria Hunter
+ *
+ * WARNING: this test program may kill your flash and your device. Do not
+ * use it unless you know what you do. Authors are not responsible for any
+ * damage caused by this program.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/ktime.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/err.h>
+#include <linux/mtd/mtd.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include "mtd_test.h"
+
+#define RETRIES 3
+
+static int eb = 8;
+module_param(eb, int, S_IRUGO);
+MODULE_PARM_DESC(eb, "eraseblock number within the selected MTD device");
+
+static int ebcnt = 32;
+module_param(ebcnt, int, S_IRUGO);
+MODULE_PARM_DESC(ebcnt, "number of consecutive eraseblocks to torture");
+
+static int pgcnt;
+module_param(pgcnt, int, S_IRUGO);
+MODULE_PARM_DESC(pgcnt, "number of pages per eraseblock to torture (0 => all)");
+
+static int dev = -EINVAL;
+module_param(dev, int, S_IRUGO);
+MODULE_PARM_DESC(dev, "MTD device number to use");
+
+static int gran = 512;
+module_param(gran, int, S_IRUGO);
+MODULE_PARM_DESC(gran, "how often the status information should be printed");
+
+static int check = 1;
+module_param(check, int, S_IRUGO);
+MODULE_PARM_DESC(check, "if the written data should be checked");
+
+static unsigned int cycles_count;
+module_param(cycles_count, uint, S_IRUGO);
+MODULE_PARM_DESC(cycles_count, "how many erase cycles to do "
+ "(infinite by default)");
+
+static struct mtd_info *mtd;
+
+/* This buffer contains 0x555555...0xAAAAAA... pattern */
+static unsigned char *patt_5A5;
+/* This buffer contains 0xAAAAAA...0x555555... pattern */
+static unsigned char *patt_A5A;
+/* This buffer contains all 0xFF bytes */
+static unsigned char *patt_FF;
+/* This a temporary buffer is use when checking data */
+static unsigned char *check_buf;
+/* How many erase cycles were done */
+static unsigned int erase_cycles;
+
+static int pgsize;
+static ktime_t start, finish;
+
+static void report_corrupt(unsigned char *read, unsigned char *written);
+
+static inline void start_timing(void)
+{
+ start = ktime_get();
+}
+
+static inline void stop_timing(void)
+{
+ finish = ktime_get();
+}
+
+/*
+ * Check that the contents of eraseblock number @enbum is equivalent to the
+ * @buf buffer.
+ */
+static inline int check_eraseblock(int ebnum, unsigned char *buf)
+{
+ int err, retries = 0;
+ size_t read;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+ size_t len = mtd->erasesize;
+
+ if (pgcnt) {
+ addr = (loff_t)(ebnum + 1) * mtd->erasesize - pgcnt * pgsize;
+ len = pgcnt * pgsize;
+ }
+
+retry:
+ err = mtd_read(mtd, addr, len, &read, check_buf);
+ if (mtd_is_bitflip(err))
+ pr_err("single bit flip occurred at EB %d "
+ "MTD reported that it was fixed.\n", ebnum);
+ else if (err) {
+ pr_err("error %d while reading EB %d, "
+ "read %zd\n", err, ebnum, read);
+ return err;
+ }
+
+ if (read != len) {
+ pr_err("failed to read %zd bytes from EB %d, "
+ "read only %zd, but no error reported\n",
+ len, ebnum, read);
+ return -EIO;
+ }
+
+ if (memcmp(buf, check_buf, len)) {
+ pr_err("read wrong data from EB %d\n", ebnum);
+ report_corrupt(check_buf, buf);
+
+ if (retries++ < RETRIES) {
+ /* Try read again */
+ yield();
+ pr_info("re-try reading data from EB %d\n",
+ ebnum);
+ goto retry;
+ } else {
+ pr_info("retried %d times, still errors, "
+ "give-up\n", RETRIES);
+ return -EINVAL;
+ }
+ }
+
+ if (retries != 0)
+ pr_info("only attempt number %d was OK (!!!)\n",
+ retries);
+
+ return 0;
+}
+
+static inline int write_pattern(int ebnum, void *buf)
+{
+ int err;
+ size_t written;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
+ size_t len = mtd->erasesize;
+
+ if (pgcnt) {
+ addr = (loff_t)(ebnum + 1) * mtd->erasesize - pgcnt * pgsize;
+ len = pgcnt * pgsize;
+ }
+ err = mtd_write(mtd, addr, len, &written, buf);
+ if (err) {
+ pr_err("error %d while writing EB %d, written %zd"
+ " bytes\n", err, ebnum, written);
+ return err;
+ }
+ if (written != len) {
+ pr_info("written only %zd bytes of %zd, but no error"
+ " reported\n", written, len);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int __init tort_init(void)
+{
+ int err = 0, i, infinite = !cycles_count;
+ unsigned char *bad_ebs;
+
+ printk(KERN_INFO "\n");
+ printk(KERN_INFO "=================================================\n");
+ pr_info("Warning: this program is trying to wear out your "
+ "flash, stop it if this is not wanted.\n");
+
+ if (dev < 0) {
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
+ return -EINVAL;
+ }
+
+ pr_info("MTD device: %d\n", dev);
+ pr_info("torture %d eraseblocks (%d-%d) of mtd%d\n",
+ ebcnt, eb, eb + ebcnt - 1, dev);
+ if (pgcnt)
+ pr_info("torturing just %d pages per eraseblock\n",
+ pgcnt);
+ pr_info("write verify %s\n", check ? "enabled" : "disabled");
+
+ mtd = get_mtd_device(NULL, dev);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ pr_err("error: cannot get MTD device\n");
+ return err;
+ }
+
+ if (mtd->writesize == 1) {
+ pr_info("not NAND flash, assume page size is 512 "
+ "bytes.\n");
+ pgsize = 512;
+ } else
+ pgsize = mtd->writesize;
+
+ if (pgcnt && (pgcnt > mtd->erasesize / pgsize || pgcnt < 0)) {
+ pr_err("error: invalid pgcnt value %d\n", pgcnt);
+ goto out_mtd;
+ }
+
+ err = -ENOMEM;
+ patt_5A5 = kmalloc(mtd->erasesize, GFP_KERNEL);
+ if (!patt_5A5)
+ goto out_mtd;
+
+ patt_A5A = kmalloc(mtd->erasesize, GFP_KERNEL);
+ if (!patt_A5A)
+ goto out_patt_5A5;
+
+ patt_FF = kmalloc(mtd->erasesize, GFP_KERNEL);
+ if (!patt_FF)
+ goto out_patt_A5A;
+
+ check_buf = kmalloc(mtd->erasesize, GFP_KERNEL);
+ if (!check_buf)
+ goto out_patt_FF;
+
+ bad_ebs = kzalloc(ebcnt, GFP_KERNEL);
+ if (!bad_ebs)
+ goto out_check_buf;
+
+ /* Initialize patterns */
+ memset(patt_FF, 0xFF, mtd->erasesize);
+ for (i = 0; i < mtd->erasesize / pgsize; i++) {
+ if (!(i & 1)) {
+ memset(patt_5A5 + i * pgsize, 0x55, pgsize);
+ memset(patt_A5A + i * pgsize, 0xAA, pgsize);
+ } else {
+ memset(patt_5A5 + i * pgsize, 0xAA, pgsize);
+ memset(patt_A5A + i * pgsize, 0x55, pgsize);
+ }
+ }
+
+ err = mtdtest_scan_for_bad_eraseblocks(mtd, bad_ebs, eb, ebcnt);
+ if (err)
+ goto out;
+
+ start_timing();
+ while (1) {
+ int i;
+ void *patt;
+
+ err = mtdtest_erase_good_eraseblocks(mtd, bad_ebs, eb, ebcnt);
+ if (err)
+ goto out;
+
+ /* Check if the eraseblocks contain only 0xFF bytes */
+ if (check) {
+ for (i = eb; i < eb + ebcnt; i++) {
+ if (bad_ebs[i - eb])
+ continue;
+ err = check_eraseblock(i, patt_FF);
+ if (err) {
+ pr_info("verify failed"
+ " for 0xFF... pattern\n");
+ goto out;
+ }
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ }
+
+ /* Write the pattern */
+ for (i = eb; i < eb + ebcnt; i++) {
+ if (bad_ebs[i - eb])
+ continue;
+ if ((eb + erase_cycles) & 1)
+ patt = patt_5A5;
+ else
+ patt = patt_A5A;
+ err = write_pattern(i, patt);
+ if (err)
+ goto out;
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+
+ /* Verify what we wrote */
+ if (check) {
+ for (i = eb; i < eb + ebcnt; i++) {
+ if (bad_ebs[i - eb])
+ continue;
+ if ((eb + erase_cycles) & 1)
+ patt = patt_5A5;
+ else
+ patt = patt_A5A;
+ err = check_eraseblock(i, patt);
+ if (err) {
+ pr_info("verify failed for %s"
+ " pattern\n",
+ ((eb + erase_cycles) & 1) ?
+ "0x55AA55..." : "0xAA55AA...");
+ goto out;
+ }
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+ }
+ }
+
+ erase_cycles += 1;
+
+ if (erase_cycles % gran == 0) {
+ long ms;
+
+ stop_timing();
+ ms = ktime_ms_delta(finish, start);
+ pr_info("%08u erase cycles done, took %lu "
+ "milliseconds (%lu seconds)\n",
+ erase_cycles, ms, ms / 1000);
+ start_timing();
+ }
+
+ if (!infinite && --cycles_count == 0)
+ break;
+ }
+out:
+
+ pr_info("finished after %u erase cycles\n",
+ erase_cycles);
+ kfree(bad_ebs);
+out_check_buf:
+ kfree(check_buf);
+out_patt_FF:
+ kfree(patt_FF);
+out_patt_A5A:
+ kfree(patt_A5A);
+out_patt_5A5:
+ kfree(patt_5A5);
+out_mtd:
+ put_mtd_device(mtd);
+ if (err)
+ pr_info("error %d occurred during torturing\n", err);
+ printk(KERN_INFO "=================================================\n");
+ return err;
+}
+module_init(tort_init);
+
+static void __exit tort_exit(void)
+{
+ return;
+}
+module_exit(tort_exit);
+
+static int countdiffs(unsigned char *buf, unsigned char *check_buf,
+ unsigned offset, unsigned len, unsigned *bytesp,
+ unsigned *bitsp);
+static void print_bufs(unsigned char *read, unsigned char *written, int start,
+ int len);
+
+/*
+ * Report the detailed information about how the read EB differs from what was
+ * written.
+ */
+static void report_corrupt(unsigned char *read, unsigned char *written)
+{
+ int i;
+ int bytes, bits, pages, first;
+ int offset, len;
+ size_t check_len = mtd->erasesize;
+
+ if (pgcnt)
+ check_len = pgcnt * pgsize;
+
+ bytes = bits = pages = 0;
+ for (i = 0; i < check_len; i += pgsize)
+ if (countdiffs(written, read, i, pgsize, &bytes,
+ &bits) >= 0)
+ pages++;
+
+ pr_info("verify fails on %d pages, %d bytes/%d bits\n",
+ pages, bytes, bits);
+ pr_info("The following is a list of all differences between"
+ " what was read from flash and what was expected\n");
+
+ for (i = 0; i < check_len; i += pgsize) {
+ cond_resched();
+ bytes = bits = 0;
+ first = countdiffs(written, read, i, pgsize, &bytes,
+ &bits);
+ if (first < 0)
+ continue;
+
+ printk("-------------------------------------------------------"
+ "----------------------------------\n");
+
+ pr_info("Page %zd has %d bytes/%d bits failing verify,"
+ " starting at offset 0x%x\n",
+ (mtd->erasesize - check_len + i) / pgsize,
+ bytes, bits, first);
+
+ offset = first & ~0x7;
+ len = ((first + bytes) | 0x7) + 1 - offset;
+
+ print_bufs(read, written, offset, len);
+ }
+}
+
+static void print_bufs(unsigned char *read, unsigned char *written, int start,
+ int len)
+{
+ int i = 0, j1, j2;
+ char *diff;
+
+ printk("Offset Read Written\n");
+ while (i < len) {
+ printk("0x%08x: ", start + i);
+ diff = " ";
+ for (j1 = 0; j1 < 8 && i + j1 < len; j1++) {
+ printk(" %02x", read[start + i + j1]);
+ if (read[start + i + j1] != written[start + i + j1])
+ diff = "***";
+ }
+
+ while (j1 < 8) {
+ printk(" ");
+ j1 += 1;
+ }
+
+ printk(" %s ", diff);
+
+ for (j2 = 0; j2 < 8 && i + j2 < len; j2++)
+ printk(" %02x", written[start + i + j2]);
+ printk("\n");
+ i += 8;
+ }
+}
+
+/*
+ * Count the number of differing bytes and bits and return the first differing
+ * offset.
+ */
+static int countdiffs(unsigned char *buf, unsigned char *check_buf,
+ unsigned offset, unsigned len, unsigned *bytesp,
+ unsigned *bitsp)
+{
+ unsigned i, bit;
+ int first = -1;
+
+ for (i = offset; i < offset + len; i++)
+ if (buf[i] != check_buf[i]) {
+ first = i;
+ break;
+ }
+
+ while (i < offset + len) {
+ if (buf[i] != check_buf[i]) {
+ (*bytesp)++;
+ bit = 1;
+ while (bit < 256) {
+ if ((buf[i] & bit) != (check_buf[i] & bit))
+ (*bitsp)++;
+ bit <<= 1;
+ }
+ }
+ i++;
+ }
+
+ return first;
+}
+
+MODULE_DESCRIPTION("Eraseblock torturing module");
+MODULE_AUTHOR("Artem Bityutskiy, Jarkko Lavinen, Adrian Hunter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
new file mode 100644
index 0000000000..2ed77b7b3f
--- /dev/null
+++ b/drivers/mtd/ubi/Kconfig
@@ -0,0 +1,107 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig MTD_UBI
+ tristate "Enable UBI - Unsorted block images"
+ select CRC32
+ help
+ UBI is a software layer above MTD layer which admits use of LVM-like
+ logical volumes on top of MTD devices, hides some complexities of
+ flash chips like wear and bad blocks and provides some other useful
+ capabilities. Please, consult the MTD web site for more details
+ (www.linux-mtd.infradead.org).
+
+if MTD_UBI
+
+config MTD_UBI_WL_THRESHOLD
+ int "UBI wear-leveling threshold"
+ default 4096
+ range 2 65536
+ help
+ This parameter defines the maximum difference between the highest
+ erase counter value and the lowest erase counter value of eraseblocks
+ of UBI devices. When this threshold is exceeded, UBI starts performing
+ wear leveling by means of moving data from eraseblock with low erase
+ counter to eraseblocks with high erase counter.
+
+ The default value should be OK for SLC NAND flashes, NOR flashes and
+ other flashes which have eraseblock life-cycle 100000 or more.
+ However, in case of MLC NAND flashes which typically have eraseblock
+ life-cycle less than 10000, the threshold should be lessened (e.g.,
+ to 128 or 256, although it does not have to be power of 2).
+
+config MTD_UBI_BEB_LIMIT
+ int "Maximum expected bad eraseblock count per 1024 eraseblocks"
+ default 20
+ range 0 768
+ help
+ This option specifies the maximum bad physical eraseblocks UBI
+ expects on the MTD device (per 1024 eraseblocks). If the underlying
+ flash does not admit of bad eraseblocks (e.g. NOR flash), this value
+ is ignored.
+
+ NAND datasheets often specify the minimum and maximum NVM (Number of
+ Valid Blocks) for the flashes' endurance lifetime. The maximum
+ expected bad eraseblocks per 1024 eraseblocks then can be calculated
+ as "1024 * (1 - MinNVB / MaxNVB)", which gives 20 for most NANDs
+ (MaxNVB is basically the total count of eraseblocks on the chip).
+
+ To put it differently, if this value is 20, UBI will try to reserve
+ about 1.9% of physical eraseblocks for bad blocks handling. And that
+ will be 1.9% of eraseblocks on the entire NAND chip, not just the MTD
+ partition UBI attaches. This means that if you have, say, a NAND
+ flash chip admits maximum 40 bad eraseblocks, and it is split on two
+ MTD partitions of the same size, UBI will reserve 40 eraseblocks when
+ attaching a partition.
+
+ This option can be overridden by the "mtd=" UBI module parameter or
+ by the "attach" ioctl.
+
+ Leave the default value if unsure.
+
+config MTD_UBI_FASTMAP
+ bool "UBI Fastmap (Experimental feature)"
+ default n
+ help
+ Important: this feature is experimental so far and the on-flash
+ format for fastmap may change in the next kernel versions
+
+ Fastmap is a mechanism which allows attaching an UBI device
+ in nearly constant time. Instead of scanning the whole MTD device it
+ only has to locate a checkpoint (called fastmap) on the device.
+ The on-flash fastmap contains all information needed to attach
+ the device. Using fastmap makes only sense on large devices where
+ attaching by scanning takes long. UBI will not automatically install
+ a fastmap on old images, but you can set the UBI module parameter
+ fm_autoconvert to 1 if you want so. Please note that fastmap-enabled
+ images are still usable with UBI implementations without
+ fastmap support. On typical flash devices the whole fastmap fits
+ into one PEB. UBI will reserve PEBs to hold two fastmaps.
+
+ If in doubt, say "N".
+
+config MTD_UBI_GLUEBI
+ tristate "MTD devices emulation driver (gluebi)"
+ help
+ This option enables gluebi - an additional driver which emulates MTD
+ devices on top of UBI volumes: for each UBI volumes an MTD device is
+ created, and all I/O to this MTD device is redirected to the UBI
+ volume. This is handy to make MTD-oriented software (like JFFS2)
+ work on top of UBI. Do not enable this unless you use legacy
+ software.
+
+config MTD_UBI_BLOCK
+ bool "Read-only block devices on top of UBI volumes"
+ default n
+ depends on BLOCK
+ help
+ This option enables read-only UBI block devices support. UBI block
+ devices will be layered on top of UBI volumes, which means that the
+ UBI driver will transparently handle things like bad eraseblocks and
+ bit-flips. You can put any block-oriented file system on top of UBI
+ volumes in read-only mode (e.g., ext4), but it is probably most
+ practical for read-only file systems, like squashfs.
+
+ When selected, this feature will be built in the UBI driver.
+
+ If in doubt, say "N".
+
+endif # MTD_UBI
diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile
new file mode 100644
index 0000000000..543673605c
--- /dev/null
+++ b/drivers/mtd/ubi/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_MTD_UBI) += ubi.o
+
+ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o attach.o
+ubi-y += misc.o debug.o
+ubi-$(CONFIG_MTD_UBI_FASTMAP) += fastmap.o
+ubi-$(CONFIG_MTD_UBI_BLOCK) += block.o
+
+obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
new file mode 100644
index 0000000000..ae5abe492b
--- /dev/null
+++ b/drivers/mtd/ubi/attach.c
@@ -0,0 +1,1923 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * UBI attaching sub-system.
+ *
+ * This sub-system is responsible for attaching MTD devices and it also
+ * implements flash media scanning.
+ *
+ * The attaching information is represented by a &struct ubi_attach_info'
+ * object. Information about volumes is represented by &struct ubi_ainf_volume
+ * objects which are kept in volume RB-tree with root at the @volumes field.
+ * The RB-tree is indexed by the volume ID.
+ *
+ * Logical eraseblocks are represented by &struct ubi_ainf_peb objects. These
+ * objects are kept in per-volume RB-trees with the root at the corresponding
+ * &struct ubi_ainf_volume object. To put it differently, we keep an RB-tree of
+ * per-volume objects and each of these objects is the root of RB-tree of
+ * per-LEB objects.
+ *
+ * Corrupted physical eraseblocks are put to the @corr list, free physical
+ * eraseblocks are put to the @free list and the physical eraseblock to be
+ * erased are put to the @erase list.
+ *
+ * About corruptions
+ * ~~~~~~~~~~~~~~~~~
+ *
+ * UBI protects EC and VID headers with CRC-32 checksums, so it can detect
+ * whether the headers are corrupted or not. Sometimes UBI also protects the
+ * data with CRC-32, e.g., when it executes the atomic LEB change operation, or
+ * when it moves the contents of a PEB for wear-leveling purposes.
+ *
+ * UBI tries to distinguish between 2 types of corruptions.
+ *
+ * 1. Corruptions caused by power cuts. These are expected corruptions and UBI
+ * tries to handle them gracefully, without printing too many warnings and
+ * error messages. The idea is that we do not lose important data in these
+ * cases - we may lose only the data which were being written to the media just
+ * before the power cut happened, and the upper layers (e.g., UBIFS) are
+ * supposed to handle such data losses (e.g., by using the FS journal).
+ *
+ * When UBI detects a corruption (CRC-32 mismatch) in a PEB, and it looks like
+ * the reason is a power cut, UBI puts this PEB to the @erase list, and all
+ * PEBs in the @erase list are scheduled for erasure later.
+ *
+ * 2. Unexpected corruptions which are not caused by power cuts. During
+ * attaching, such PEBs are put to the @corr list and UBI preserves them.
+ * Obviously, this lessens the amount of available PEBs, and if at some point
+ * UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly informs
+ * about such PEBs every time the MTD device is attached.
+ *
+ * However, it is difficult to reliably distinguish between these types of
+ * corruptions and UBI's strategy is as follows (in case of attaching by
+ * scanning). UBI assumes corruption type 2 if the VID header is corrupted and
+ * the data area does not contain all 0xFFs, and there were no bit-flips or
+ * integrity errors (e.g., ECC errors in case of NAND) while reading the data
+ * area. Otherwise UBI assumes corruption type 1. So the decision criteria
+ * are as follows.
+ * o If the data area contains only 0xFFs, there are no data, and it is safe
+ * to just erase this PEB - this is corruption type 1.
+ * o If the data area has bit-flips or data integrity errors (ECC errors on
+ * NAND), it is probably a PEB which was being erased when power cut
+ * happened, so this is corruption type 1. However, this is just a guess,
+ * which might be wrong.
+ * o Otherwise this is corruption type 2.
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/crc32.h>
+#include <linux/math64.h>
+#include <linux/random.h>
+#include "ubi.h"
+
+static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai);
+
+#define AV_FIND BIT(0)
+#define AV_ADD BIT(1)
+#define AV_FIND_OR_ADD (AV_FIND | AV_ADD)
+
+/**
+ * find_or_add_av - internal function to find a volume, add a volume or do
+ * both (find and add if missing).
+ * @ai: attaching information
+ * @vol_id: the requested volume ID
+ * @flags: a combination of the %AV_FIND and %AV_ADD flags describing the
+ * expected operation. If only %AV_ADD is set, -EEXIST is returned
+ * if the volume already exists. If only %AV_FIND is set, NULL is
+ * returned if the volume does not exist. And if both flags are
+ * set, the helper first tries to find an existing volume, and if
+ * it does not exist it creates a new one.
+ * @created: in value used to inform the caller whether it"s a newly created
+ * volume or not.
+ *
+ * This function returns a pointer to a volume description or an ERR_PTR if
+ * the operation failed. It can also return NULL if only %AV_FIND is set and
+ * the volume does not exist.
+ */
+static struct ubi_ainf_volume *find_or_add_av(struct ubi_attach_info *ai,
+ int vol_id, unsigned int flags,
+ bool *created)
+{
+ struct ubi_ainf_volume *av;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+
+ /* Walk the volume RB-tree to look if this volume is already present */
+ while (*p) {
+ parent = *p;
+ av = rb_entry(parent, struct ubi_ainf_volume, rb);
+
+ if (vol_id == av->vol_id) {
+ *created = false;
+
+ if (!(flags & AV_FIND))
+ return ERR_PTR(-EEXIST);
+
+ return av;
+ }
+
+ if (vol_id > av->vol_id)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ if (!(flags & AV_ADD))
+ return NULL;
+
+ /* The volume is absent - add it */
+ av = kzalloc(sizeof(*av), GFP_KERNEL);
+ if (!av)
+ return ERR_PTR(-ENOMEM);
+
+ av->vol_id = vol_id;
+
+ if (vol_id > ai->highest_vol_id)
+ ai->highest_vol_id = vol_id;
+
+ rb_link_node(&av->rb, parent, p);
+ rb_insert_color(&av->rb, &ai->volumes);
+ ai->vols_found += 1;
+ *created = true;
+ dbg_bld("added volume %d", vol_id);
+ return av;
+}
+
+/**
+ * ubi_find_or_add_av - search for a volume in the attaching information and
+ * add one if it does not exist.
+ * @ai: attaching information
+ * @vol_id: the requested volume ID
+ * @created: whether the volume has been created or not
+ *
+ * This function returns a pointer to the new volume description or an
+ * ERR_PTR if the operation failed.
+ */
+static struct ubi_ainf_volume *ubi_find_or_add_av(struct ubi_attach_info *ai,
+ int vol_id, bool *created)
+{
+ return find_or_add_av(ai, vol_id, AV_FIND_OR_ADD, created);
+}
+
+/**
+ * ubi_alloc_aeb - allocate an aeb element
+ * @ai: attaching information
+ * @pnum: physical eraseblock number
+ * @ec: erase counter of the physical eraseblock
+ *
+ * Allocate an aeb object and initialize the pnum and ec information.
+ * vol_id and lnum are set to UBI_UNKNOWN, and the other fields are
+ * initialized to zero.
+ * Note that the element is not added in any list or RB tree.
+ */
+struct ubi_ainf_peb *ubi_alloc_aeb(struct ubi_attach_info *ai, int pnum,
+ int ec)
+{
+ struct ubi_ainf_peb *aeb;
+
+ aeb = kmem_cache_zalloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
+ return NULL;
+
+ aeb->pnum = pnum;
+ aeb->ec = ec;
+ aeb->vol_id = UBI_UNKNOWN;
+ aeb->lnum = UBI_UNKNOWN;
+
+ return aeb;
+}
+
+/**
+ * ubi_free_aeb - free an aeb element
+ * @ai: attaching information
+ * @aeb: the element to free
+ *
+ * Free an aeb object. The caller must have removed the element from any list
+ * or RB tree.
+ */
+void ubi_free_aeb(struct ubi_attach_info *ai, struct ubi_ainf_peb *aeb)
+{
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+}
+
+/**
+ * add_to_list - add physical eraseblock to a list.
+ * @ai: attaching information
+ * @pnum: physical eraseblock number to add
+ * @vol_id: the last used volume id for the PEB
+ * @lnum: the last used LEB number for the PEB
+ * @ec: erase counter of the physical eraseblock
+ * @to_head: if not zero, add to the head of the list
+ * @list: the list to add to
+ *
+ * This function allocates a 'struct ubi_ainf_peb' object for physical
+ * eraseblock @pnum and adds it to the "free", "erase", or "alien" lists.
+ * It stores the @lnum and @vol_id alongside, which can both be
+ * %UBI_UNKNOWN if they are not available, not readable, or not assigned.
+ * If @to_head is not zero, PEB will be added to the head of the list, which
+ * basically means it will be processed first later. E.g., we add corrupted
+ * PEBs (corrupted due to power cuts) to the head of the erase list to make
+ * sure we erase them first and get rid of corruptions ASAP. This function
+ * returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+static int add_to_list(struct ubi_attach_info *ai, int pnum, int vol_id,
+ int lnum, int ec, int to_head, struct list_head *list)
+{
+ struct ubi_ainf_peb *aeb;
+
+ if (list == &ai->free) {
+ dbg_bld("add to free: PEB %d, EC %d", pnum, ec);
+ } else if (list == &ai->erase) {
+ dbg_bld("add to erase: PEB %d, EC %d", pnum, ec);
+ } else if (list == &ai->alien) {
+ dbg_bld("add to alien: PEB %d, EC %d", pnum, ec);
+ ai->alien_peb_count += 1;
+ } else
+ BUG();
+
+ aeb = ubi_alloc_aeb(ai, pnum, ec);
+ if (!aeb)
+ return -ENOMEM;
+
+ aeb->vol_id = vol_id;
+ aeb->lnum = lnum;
+ if (to_head)
+ list_add(&aeb->u.list, list);
+ else
+ list_add_tail(&aeb->u.list, list);
+ return 0;
+}
+
+/**
+ * add_corrupted - add a corrupted physical eraseblock.
+ * @ai: attaching information
+ * @pnum: physical eraseblock number to add
+ * @ec: erase counter of the physical eraseblock
+ *
+ * This function allocates a 'struct ubi_ainf_peb' object for a corrupted
+ * physical eraseblock @pnum and adds it to the 'corr' list. The corruption
+ * was presumably not caused by a power cut. Returns zero in case of success
+ * and a negative error code in case of failure.
+ */
+static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
+{
+ struct ubi_ainf_peb *aeb;
+
+ dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
+
+ aeb = ubi_alloc_aeb(ai, pnum, ec);
+ if (!aeb)
+ return -ENOMEM;
+
+ ai->corr_peb_count += 1;
+ list_add(&aeb->u.list, &ai->corr);
+ return 0;
+}
+
+/**
+ * add_fastmap - add a Fastmap related physical eraseblock.
+ * @ai: attaching information
+ * @pnum: physical eraseblock number the VID header came from
+ * @vid_hdr: the volume identifier header
+ * @ec: erase counter of the physical eraseblock
+ *
+ * This function allocates a 'struct ubi_ainf_peb' object for a Fastamp
+ * physical eraseblock @pnum and adds it to the 'fastmap' list.
+ * Such blocks can be Fastmap super and data blocks from both the most
+ * recent Fastmap we're attaching from or from old Fastmaps which will
+ * be erased.
+ */
+static int add_fastmap(struct ubi_attach_info *ai, int pnum,
+ struct ubi_vid_hdr *vid_hdr, int ec)
+{
+ struct ubi_ainf_peb *aeb;
+
+ aeb = ubi_alloc_aeb(ai, pnum, ec);
+ if (!aeb)
+ return -ENOMEM;
+
+ aeb->vol_id = be32_to_cpu(vid_hdr->vol_id);
+ aeb->sqnum = be64_to_cpu(vid_hdr->sqnum);
+ list_add(&aeb->u.list, &ai->fastmap);
+
+ dbg_bld("add to fastmap list: PEB %d, vol_id %d, sqnum: %llu", pnum,
+ aeb->vol_id, aeb->sqnum);
+
+ return 0;
+}
+
+/**
+ * validate_vid_hdr - check volume identifier header.
+ * @ubi: UBI device description object
+ * @vid_hdr: the volume identifier header to check
+ * @av: information about the volume this logical eraseblock belongs to
+ * @pnum: physical eraseblock number the VID header came from
+ *
+ * This function checks that data stored in @vid_hdr is consistent. Returns
+ * non-zero if an inconsistency was found and zero if not.
+ *
+ * Note, UBI does sanity check of everything it reads from the flash media.
+ * Most of the checks are done in the I/O sub-system. Here we check that the
+ * information in the VID header is consistent to the information in other VID
+ * headers of the same volume.
+ */
+static int validate_vid_hdr(const struct ubi_device *ubi,
+ const struct ubi_vid_hdr *vid_hdr,
+ const struct ubi_ainf_volume *av, int pnum)
+{
+ int vol_type = vid_hdr->vol_type;
+ int vol_id = be32_to_cpu(vid_hdr->vol_id);
+ int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+ int data_pad = be32_to_cpu(vid_hdr->data_pad);
+
+ if (av->leb_count != 0) {
+ int av_vol_type;
+
+ /*
+ * This is not the first logical eraseblock belonging to this
+ * volume. Ensure that the data in its VID header is consistent
+ * to the data in previous logical eraseblock headers.
+ */
+
+ if (vol_id != av->vol_id) {
+ ubi_err(ubi, "inconsistent vol_id");
+ goto bad;
+ }
+
+ if (av->vol_type == UBI_STATIC_VOLUME)
+ av_vol_type = UBI_VID_STATIC;
+ else
+ av_vol_type = UBI_VID_DYNAMIC;
+
+ if (vol_type != av_vol_type) {
+ ubi_err(ubi, "inconsistent vol_type");
+ goto bad;
+ }
+
+ if (used_ebs != av->used_ebs) {
+ ubi_err(ubi, "inconsistent used_ebs");
+ goto bad;
+ }
+
+ if (data_pad != av->data_pad) {
+ ubi_err(ubi, "inconsistent data_pad");
+ goto bad;
+ }
+ }
+
+ return 0;
+
+bad:
+ ubi_err(ubi, "inconsistent VID header at PEB %d", pnum);
+ ubi_dump_vid_hdr(vid_hdr);
+ ubi_dump_av(av);
+ return -EINVAL;
+}
+
+/**
+ * add_volume - add volume to the attaching information.
+ * @ai: attaching information
+ * @vol_id: ID of the volume to add
+ * @pnum: physical eraseblock number
+ * @vid_hdr: volume identifier header
+ *
+ * If the volume corresponding to the @vid_hdr logical eraseblock is already
+ * present in the attaching information, this function does nothing. Otherwise
+ * it adds corresponding volume to the attaching information. Returns a pointer
+ * to the allocated "av" object in case of success and a negative error code in
+ * case of failure.
+ */
+static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
+ int vol_id, int pnum,
+ const struct ubi_vid_hdr *vid_hdr)
+{
+ struct ubi_ainf_volume *av;
+ bool created;
+
+ ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
+
+ av = ubi_find_or_add_av(ai, vol_id, &created);
+ if (IS_ERR(av) || !created)
+ return av;
+
+ av->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+ av->data_pad = be32_to_cpu(vid_hdr->data_pad);
+ av->compat = vid_hdr->compat;
+ av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
+ : UBI_STATIC_VOLUME;
+
+ return av;
+}
+
+/**
+ * ubi_compare_lebs - find out which logical eraseblock is newer.
+ * @ubi: UBI device description object
+ * @aeb: first logical eraseblock to compare
+ * @pnum: physical eraseblock number of the second logical eraseblock to
+ * compare
+ * @vid_hdr: volume identifier header of the second logical eraseblock
+ *
+ * This function compares 2 copies of a LEB and informs which one is newer. In
+ * case of success this function returns a positive value, in case of failure, a
+ * negative error code is returned. The success return codes use the following
+ * bits:
+ * o bit 0 is cleared: the first PEB (described by @aeb) is newer than the
+ * second PEB (described by @pnum and @vid_hdr);
+ * o bit 0 is set: the second PEB is newer;
+ * o bit 1 is cleared: no bit-flips were detected in the newer LEB;
+ * o bit 1 is set: bit-flips were detected in the newer LEB;
+ * o bit 2 is cleared: the older LEB is not corrupted;
+ * o bit 2 is set: the older LEB is corrupted.
+ */
+int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
+ int pnum, const struct ubi_vid_hdr *vid_hdr)
+{
+ int len, err, second_is_newer, bitflips = 0, corrupted = 0;
+ uint32_t data_crc, crc;
+ struct ubi_vid_io_buf *vidb = NULL;
+ unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
+
+ if (sqnum2 == aeb->sqnum) {
+ /*
+ * This must be a really ancient UBI image which has been
+ * created before sequence numbers support has been added. At
+ * that times we used 32-bit LEB versions stored in logical
+ * eraseblocks. That was before UBI got into mainline. We do not
+ * support these images anymore. Well, those images still work,
+ * but only if no unclean reboots happened.
+ */
+ ubi_err(ubi, "unsupported on-flash UBI format");
+ return -EINVAL;
+ }
+
+ /* Obviously the LEB with lower sequence counter is older */
+ second_is_newer = (sqnum2 > aeb->sqnum);
+
+ /*
+ * Now we know which copy is newer. If the copy flag of the PEB with
+ * newer version is not set, then we just return, otherwise we have to
+ * check data CRC. For the second PEB we already have the VID header,
+ * for the first one - we'll need to re-read it from flash.
+ *
+ * Note: this may be optimized so that we wouldn't read twice.
+ */
+
+ if (second_is_newer) {
+ if (!vid_hdr->copy_flag) {
+ /* It is not a copy, so it is newer */
+ dbg_bld("second PEB %d is newer, copy_flag is unset",
+ pnum);
+ return 1;
+ }
+ } else {
+ if (!aeb->copy_flag) {
+ /* It is not a copy, so it is newer */
+ dbg_bld("first PEB %d is newer, copy_flag is unset",
+ pnum);
+ return bitflips << 1;
+ }
+
+ vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
+ if (!vidb)
+ return -ENOMEM;
+
+ pnum = aeb->pnum;
+ err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 0);
+ if (err) {
+ if (err == UBI_IO_BITFLIPS)
+ bitflips = 1;
+ else {
+ ubi_err(ubi, "VID of PEB %d header is bad, but it was OK earlier, err %d",
+ pnum, err);
+ if (err > 0)
+ err = -EIO;
+
+ goto out_free_vidh;
+ }
+ }
+
+ vid_hdr = ubi_get_vid_hdr(vidb);
+ }
+
+ /* Read the data of the copy and check the CRC */
+
+ len = be32_to_cpu(vid_hdr->data_size);
+
+ mutex_lock(&ubi->buf_mutex);
+ err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, len);
+ if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
+ goto out_unlock;
+
+ data_crc = be32_to_cpu(vid_hdr->data_crc);
+ crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, len);
+ if (crc != data_crc) {
+ dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x",
+ pnum, crc, data_crc);
+ corrupted = 1;
+ bitflips = 0;
+ second_is_newer = !second_is_newer;
+ } else {
+ dbg_bld("PEB %d CRC is OK", pnum);
+ bitflips |= !!err;
+ }
+ mutex_unlock(&ubi->buf_mutex);
+
+ ubi_free_vid_buf(vidb);
+
+ if (second_is_newer)
+ dbg_bld("second PEB %d is newer, copy_flag is set", pnum);
+ else
+ dbg_bld("first PEB %d is newer, copy_flag is set", pnum);
+
+ return second_is_newer | (bitflips << 1) | (corrupted << 2);
+
+out_unlock:
+ mutex_unlock(&ubi->buf_mutex);
+out_free_vidh:
+ ubi_free_vid_buf(vidb);
+ return err;
+}
+
+/**
+ * ubi_add_to_av - add used physical eraseblock to the attaching information.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ * @pnum: the physical eraseblock number
+ * @ec: erase counter
+ * @vid_hdr: the volume identifier header
+ * @bitflips: if bit-flips were detected when this physical eraseblock was read
+ *
+ * This function adds information about a used physical eraseblock to the
+ * 'used' tree of the corresponding volume. The function is rather complex
+ * because it has to handle cases when this is not the first physical
+ * eraseblock belonging to the same logical eraseblock, and the newer one has
+ * to be picked, while the older one has to be dropped. This function returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
+ int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips)
+{
+ int err, vol_id, lnum;
+ unsigned long long sqnum;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb;
+ struct rb_node **p, *parent = NULL;
+
+ vol_id = be32_to_cpu(vid_hdr->vol_id);
+ lnum = be32_to_cpu(vid_hdr->lnum);
+ sqnum = be64_to_cpu(vid_hdr->sqnum);
+
+ dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d",
+ pnum, vol_id, lnum, ec, sqnum, bitflips);
+
+ av = add_volume(ai, vol_id, pnum, vid_hdr);
+ if (IS_ERR(av))
+ return PTR_ERR(av);
+
+ if (ai->max_sqnum < sqnum)
+ ai->max_sqnum = sqnum;
+
+ /*
+ * Walk the RB-tree of logical eraseblocks of volume @vol_id to look
+ * if this is the first instance of this logical eraseblock or not.
+ */
+ p = &av->root.rb_node;
+ while (*p) {
+ int cmp_res;
+
+ parent = *p;
+ aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+ if (lnum != aeb->lnum) {
+ if (lnum < aeb->lnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ continue;
+ }
+
+ /*
+ * There is already a physical eraseblock describing the same
+ * logical eraseblock present.
+ */
+
+ dbg_bld("this LEB already exists: PEB %d, sqnum %llu, EC %d",
+ aeb->pnum, aeb->sqnum, aeb->ec);
+
+ /*
+ * Make sure that the logical eraseblocks have different
+ * sequence numbers. Otherwise the image is bad.
+ *
+ * However, if the sequence number is zero, we assume it must
+ * be an ancient UBI image from the era when UBI did not have
+ * sequence numbers. We still can attach these images, unless
+ * there is a need to distinguish between old and new
+ * eraseblocks, in which case we'll refuse the image in
+ * 'ubi_compare_lebs()'. In other words, we attach old clean
+ * images, but refuse attaching old images with duplicated
+ * logical eraseblocks because there was an unclean reboot.
+ */
+ if (aeb->sqnum == sqnum && sqnum != 0) {
+ ubi_err(ubi, "two LEBs with same sequence number %llu",
+ sqnum);
+ ubi_dump_aeb(aeb, 0);
+ ubi_dump_vid_hdr(vid_hdr);
+ return -EINVAL;
+ }
+
+ /*
+ * Now we have to drop the older one and preserve the newer
+ * one.
+ */
+ cmp_res = ubi_compare_lebs(ubi, aeb, pnum, vid_hdr);
+ if (cmp_res < 0)
+ return cmp_res;
+
+ if (cmp_res & 1) {
+ /*
+ * This logical eraseblock is newer than the one
+ * found earlier.
+ */
+ err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
+ if (err)
+ return err;
+
+ err = add_to_list(ai, aeb->pnum, aeb->vol_id,
+ aeb->lnum, aeb->ec, cmp_res & 4,
+ &ai->erase);
+ if (err)
+ return err;
+
+ aeb->ec = ec;
+ aeb->pnum = pnum;
+ aeb->vol_id = vol_id;
+ aeb->lnum = lnum;
+ aeb->scrub = ((cmp_res & 2) || bitflips);
+ aeb->copy_flag = vid_hdr->copy_flag;
+ aeb->sqnum = sqnum;
+
+ if (av->highest_lnum == lnum)
+ av->last_data_size =
+ be32_to_cpu(vid_hdr->data_size);
+
+ return 0;
+ } else {
+ /*
+ * This logical eraseblock is older than the one found
+ * previously.
+ */
+ return add_to_list(ai, pnum, vol_id, lnum, ec,
+ cmp_res & 4, &ai->erase);
+ }
+ }
+
+ /*
+ * We've met this logical eraseblock for the first time, add it to the
+ * attaching information.
+ */
+
+ err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
+ if (err)
+ return err;
+
+ aeb = ubi_alloc_aeb(ai, pnum, ec);
+ if (!aeb)
+ return -ENOMEM;
+
+ aeb->vol_id = vol_id;
+ aeb->lnum = lnum;
+ aeb->scrub = bitflips;
+ aeb->copy_flag = vid_hdr->copy_flag;
+ aeb->sqnum = sqnum;
+
+ if (av->highest_lnum <= lnum) {
+ av->highest_lnum = lnum;
+ av->last_data_size = be32_to_cpu(vid_hdr->data_size);
+ }
+
+ av->leb_count += 1;
+ rb_link_node(&aeb->u.rb, parent, p);
+ rb_insert_color(&aeb->u.rb, &av->root);
+ return 0;
+}
+
+/**
+ * ubi_add_av - add volume to the attaching information.
+ * @ai: attaching information
+ * @vol_id: the requested volume ID
+ *
+ * This function returns a pointer to the new volume description or an
+ * ERR_PTR if the operation failed.
+ */
+struct ubi_ainf_volume *ubi_add_av(struct ubi_attach_info *ai, int vol_id)
+{
+ bool created;
+
+ return find_or_add_av(ai, vol_id, AV_ADD, &created);
+}
+
+/**
+ * ubi_find_av - find volume in the attaching information.
+ * @ai: attaching information
+ * @vol_id: the requested volume ID
+ *
+ * This function returns a pointer to the volume description or %NULL if there
+ * are no data about this volume in the attaching information.
+ */
+struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
+ int vol_id)
+{
+ bool created;
+
+ return find_or_add_av((struct ubi_attach_info *)ai, vol_id, AV_FIND,
+ &created);
+}
+
+static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av,
+ struct list_head *list);
+
+/**
+ * ubi_remove_av - delete attaching information about a volume.
+ * @ai: attaching information
+ * @av: the volume attaching information to delete
+ */
+void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
+{
+ dbg_bld("remove attaching information about volume %d", av->vol_id);
+
+ rb_erase(&av->rb, &ai->volumes);
+ destroy_av(ai, av, &ai->erase);
+ ai->vols_found -= 1;
+}
+
+/**
+ * early_erase_peb - erase a physical eraseblock.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ * @pnum: physical eraseblock number to erase;
+ * @ec: erase counter value to write (%UBI_UNKNOWN if it is unknown)
+ *
+ * This function erases physical eraseblock 'pnum', and writes the erase
+ * counter header to it. This function should only be used on UBI device
+ * initialization stages, when the EBA sub-system had not been yet initialized.
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int early_erase_peb(struct ubi_device *ubi,
+ const struct ubi_attach_info *ai, int pnum, int ec)
+{
+ int err;
+ struct ubi_ec_hdr *ec_hdr;
+
+ if ((long long)ec >= UBI_MAX_ERASECOUNTER) {
+ /*
+ * Erase counter overflow. Upgrade UBI and use 64-bit
+ * erase counters internally.
+ */
+ ubi_err(ubi, "erase counter overflow at PEB %d, EC %d",
+ pnum, ec);
+ return -EINVAL;
+ }
+
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ec_hdr)
+ return -ENOMEM;
+
+ ec_hdr->ec = cpu_to_be64(ec);
+
+ err = ubi_io_sync_erase(ubi, pnum, 0);
+ if (err < 0)
+ goto out_free;
+
+ err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
+
+out_free:
+ kfree(ec_hdr);
+ return err;
+}
+
+/**
+ * ubi_early_get_peb - get a free physical eraseblock.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This function returns a free physical eraseblock. It is supposed to be
+ * called on the UBI initialization stages when the wear-leveling sub-system is
+ * not initialized yet. This function picks a physical eraseblocks from one of
+ * the lists, writes the EC header if it is needed, and removes it from the
+ * list.
+ *
+ * This function returns a pointer to the "aeb" of the found free PEB in case
+ * of success and an error code in case of failure.
+ */
+struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
+ struct ubi_attach_info *ai)
+{
+ int err = 0;
+ struct ubi_ainf_peb *aeb, *tmp_aeb;
+
+ if (!list_empty(&ai->free)) {
+ aeb = list_entry(ai->free.next, struct ubi_ainf_peb, u.list);
+ list_del(&aeb->u.list);
+ dbg_bld("return free PEB %d, EC %d", aeb->pnum, aeb->ec);
+ return aeb;
+ }
+
+ /*
+ * We try to erase the first physical eraseblock from the erase list
+ * and pick it if we succeed, or try to erase the next one if not. And
+ * so forth. We don't want to take care about bad eraseblocks here -
+ * they'll be handled later.
+ */
+ list_for_each_entry_safe(aeb, tmp_aeb, &ai->erase, u.list) {
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
+
+ err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1);
+ if (err)
+ continue;
+
+ aeb->ec += 1;
+ list_del(&aeb->u.list);
+ dbg_bld("return PEB %d, EC %d", aeb->pnum, aeb->ec);
+ return aeb;
+ }
+
+ ubi_err(ubi, "no free eraseblocks");
+ return ERR_PTR(-ENOSPC);
+}
+
+/**
+ * check_corruption - check the data area of PEB.
+ * @ubi: UBI device description object
+ * @vid_hdr: the (corrupted) VID header of this PEB
+ * @pnum: the physical eraseblock number to check
+ *
+ * This is a helper function which is used to distinguish between VID header
+ * corruptions caused by power cuts and other reasons. If the PEB contains only
+ * 0xFF bytes in the data area, the VID header is most probably corrupted
+ * because of a power cut (%0 is returned in this case). Otherwise, it was
+ * probably corrupted for some other reasons (%1 is returned in this case). A
+ * negative error code is returned if a read error occurred.
+ *
+ * If the corruption reason was a power cut, UBI can safely erase this PEB.
+ * Otherwise, it should preserve it to avoid possibly destroying important
+ * information.
+ */
+static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
+ int pnum)
+{
+ int err;
+
+ mutex_lock(&ubi->buf_mutex);
+ memset(ubi->peb_buf, 0x00, ubi->leb_size);
+
+ err = ubi_io_read(ubi, ubi->peb_buf, pnum, ubi->leb_start,
+ ubi->leb_size);
+ if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
+ /*
+ * Bit-flips or integrity errors while reading the data area.
+ * It is difficult to say for sure what type of corruption is
+ * this, but presumably a power cut happened while this PEB was
+ * erased, so it became unstable and corrupted, and should be
+ * erased.
+ */
+ err = 0;
+ goto out_unlock;
+ }
+
+ if (err)
+ goto out_unlock;
+
+ if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size))
+ goto out_unlock;
+
+ ubi_err(ubi, "PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
+ pnum);
+ ubi_err(ubi, "this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
+ ubi_dump_vid_hdr(vid_hdr);
+ pr_err("hexdump of PEB %d offset %d, length %d",
+ pnum, ubi->leb_start, ubi->leb_size);
+ ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
+ ubi->peb_buf, ubi->leb_size, 1);
+ err = 1;
+
+out_unlock:
+ mutex_unlock(&ubi->buf_mutex);
+ return err;
+}
+
+static bool vol_ignored(int vol_id)
+{
+ switch (vol_id) {
+ case UBI_LAYOUT_VOLUME_ID:
+ return true;
+ }
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ return ubi_is_fm_vol(vol_id);
+#else
+ return false;
+#endif
+}
+
+/**
+ * scan_peb - scan and process UBI headers of a PEB.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ * @pnum: the physical eraseblock number
+ * @fast: true if we're scanning for a Fastmap
+ *
+ * This function reads UBI headers of PEB @pnum, checks them, and adds
+ * information about this PEB to the corresponding list or RB-tree in the
+ * "attaching info" structure. Returns zero if the physical eraseblock was
+ * successfully handled and a negative error code in case of failure.
+ */
+static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int pnum, bool fast)
+{
+ struct ubi_ec_hdr *ech = ai->ech;
+ struct ubi_vid_io_buf *vidb = ai->vidb;
+ struct ubi_vid_hdr *vidh = ubi_get_vid_hdr(vidb);
+ long long ec;
+ int err, bitflips = 0, vol_id = -1, ec_err = 0;
+
+ dbg_bld("scan PEB %d", pnum);
+
+ /* Skip bad physical eraseblocks */
+ err = ubi_io_is_bad(ubi, pnum);
+ if (err < 0)
+ return err;
+ else if (err) {
+ ai->bad_peb_count += 1;
+ return 0;
+ }
+
+ err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+ if (err < 0)
+ return err;
+ switch (err) {
+ case 0:
+ break;
+ case UBI_IO_BITFLIPS:
+ bitflips = 1;
+ break;
+ case UBI_IO_FF:
+ ai->empty_peb_count += 1;
+ return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+ UBI_UNKNOWN, 0, &ai->erase);
+ case UBI_IO_FF_BITFLIPS:
+ ai->empty_peb_count += 1;
+ return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+ UBI_UNKNOWN, 1, &ai->erase);
+ case UBI_IO_BAD_HDR_EBADMSG:
+ case UBI_IO_BAD_HDR:
+ /*
+ * We have to also look at the VID header, possibly it is not
+ * corrupted. Set %bitflips flag in order to make this PEB be
+ * moved and EC be re-created.
+ */
+ ec_err = err;
+ ec = UBI_UNKNOWN;
+ bitflips = 1;
+ break;
+ default:
+ ubi_err(ubi, "'ubi_io_read_ec_hdr()' returned unknown code %d",
+ err);
+ return -EINVAL;
+ }
+
+ if (!ec_err) {
+ int image_seq;
+
+ /* Make sure UBI version is OK */
+ if (ech->version != UBI_VERSION) {
+ ubi_err(ubi, "this UBI version is %d, image version is %d",
+ UBI_VERSION, (int)ech->version);
+ return -EINVAL;
+ }
+
+ ec = be64_to_cpu(ech->ec);
+ if (ec > UBI_MAX_ERASECOUNTER) {
+ /*
+ * Erase counter overflow. The EC headers have 64 bits
+ * reserved, but we anyway make use of only 31 bit
+ * values, as this seems to be enough for any existing
+ * flash. Upgrade UBI and use 64-bit erase counters
+ * internally.
+ */
+ ubi_err(ubi, "erase counter overflow, max is %d",
+ UBI_MAX_ERASECOUNTER);
+ ubi_dump_ec_hdr(ech);
+ return -EINVAL;
+ }
+
+ /*
+ * Make sure that all PEBs have the same image sequence number.
+ * This allows us to detect situations when users flash UBI
+ * images incorrectly, so that the flash has the new UBI image
+ * and leftovers from the old one. This feature was added
+ * relatively recently, and the sequence number was always
+ * zero, because old UBI implementations always set it to zero.
+ * For this reasons, we do not panic if some PEBs have zero
+ * sequence number, while other PEBs have non-zero sequence
+ * number.
+ */
+ image_seq = be32_to_cpu(ech->image_seq);
+ if (!ubi->image_seq)
+ ubi->image_seq = image_seq;
+ if (image_seq && ubi->image_seq != image_seq) {
+ ubi_err(ubi, "bad image sequence number %d in PEB %d, expected %d",
+ image_seq, pnum, ubi->image_seq);
+ ubi_dump_ec_hdr(ech);
+ return -EINVAL;
+ }
+ }
+
+ /* OK, we've done with the EC header, let's look at the VID header */
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 0);
+ if (err < 0)
+ return err;
+ switch (err) {
+ case 0:
+ break;
+ case UBI_IO_BITFLIPS:
+ bitflips = 1;
+ break;
+ case UBI_IO_BAD_HDR_EBADMSG:
+ if (ec_err == UBI_IO_BAD_HDR_EBADMSG)
+ /*
+ * Both EC and VID headers are corrupted and were read
+ * with data integrity error, probably this is a bad
+ * PEB, bit it is not marked as bad yet. This may also
+ * be a result of power cut during erasure.
+ */
+ ai->maybe_bad_peb_count += 1;
+ fallthrough;
+ case UBI_IO_BAD_HDR:
+ /*
+ * If we're facing a bad VID header we have to drop *all*
+ * Fastmap data structures we find. The most recent Fastmap
+ * could be bad and therefore there is a chance that we attach
+ * from an old one. On a fine MTD stack a PEB must not render
+ * bad all of a sudden, but the reality is different.
+ * So, let's be paranoid and help finding the root cause by
+ * falling back to scanning mode instead of attaching with a
+ * bad EBA table and cause data corruption which is hard to
+ * analyze.
+ */
+ if (fast)
+ ai->force_full_scan = 1;
+
+ if (ec_err)
+ /*
+ * Both headers are corrupted. There is a possibility
+ * that this a valid UBI PEB which has corresponding
+ * LEB, but the headers are corrupted. However, it is
+ * impossible to distinguish it from a PEB which just
+ * contains garbage because of a power cut during erase
+ * operation. So we just schedule this PEB for erasure.
+ *
+ * Besides, in case of NOR flash, we deliberately
+ * corrupt both headers because NOR flash erasure is
+ * slow and can start from the end.
+ */
+ err = 0;
+ else
+ /*
+ * The EC was OK, but the VID header is corrupted. We
+ * have to check what is in the data area.
+ */
+ err = check_corruption(ubi, vidh, pnum);
+
+ if (err < 0)
+ return err;
+ else if (!err)
+ /* This corruption is caused by a power cut */
+ err = add_to_list(ai, pnum, UBI_UNKNOWN,
+ UBI_UNKNOWN, ec, 1, &ai->erase);
+ else
+ /* This is an unexpected corruption */
+ err = add_corrupted(ai, pnum, ec);
+ if (err)
+ return err;
+ goto adjust_mean_ec;
+ case UBI_IO_FF_BITFLIPS:
+ err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+ ec, 1, &ai->erase);
+ if (err)
+ return err;
+ goto adjust_mean_ec;
+ case UBI_IO_FF:
+ if (ec_err || bitflips)
+ err = add_to_list(ai, pnum, UBI_UNKNOWN,
+ UBI_UNKNOWN, ec, 1, &ai->erase);
+ else
+ err = add_to_list(ai, pnum, UBI_UNKNOWN,
+ UBI_UNKNOWN, ec, 0, &ai->free);
+ if (err)
+ return err;
+ goto adjust_mean_ec;
+ default:
+ ubi_err(ubi, "'ubi_io_read_vid_hdr()' returned unknown code %d",
+ err);
+ return -EINVAL;
+ }
+
+ vol_id = be32_to_cpu(vidh->vol_id);
+ if (vol_id > UBI_MAX_VOLUMES && !vol_ignored(vol_id)) {
+ int lnum = be32_to_cpu(vidh->lnum);
+
+ /* Unsupported internal volume */
+ switch (vidh->compat) {
+ case UBI_COMPAT_DELETE:
+ ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it",
+ vol_id, lnum);
+
+ err = add_to_list(ai, pnum, vol_id, lnum,
+ ec, 1, &ai->erase);
+ if (err)
+ return err;
+ return 0;
+
+ case UBI_COMPAT_RO:
+ ubi_msg(ubi, "read-only compatible internal volume %d:%d found, switch to read-only mode",
+ vol_id, lnum);
+ ubi->ro_mode = 1;
+ break;
+
+ case UBI_COMPAT_PRESERVE:
+ ubi_msg(ubi, "\"preserve\" compatible internal volume %d:%d found",
+ vol_id, lnum);
+ err = add_to_list(ai, pnum, vol_id, lnum,
+ ec, 0, &ai->alien);
+ if (err)
+ return err;
+ return 0;
+
+ case UBI_COMPAT_REJECT:
+ ubi_err(ubi, "incompatible internal volume %d:%d found",
+ vol_id, lnum);
+ return -EINVAL;
+ }
+ }
+
+ if (ec_err)
+ ubi_warn(ubi, "valid VID header but corrupted EC header at PEB %d",
+ pnum);
+
+ if (ubi_is_fm_vol(vol_id))
+ err = add_fastmap(ai, pnum, vidh, ec);
+ else
+ err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
+
+ if (err)
+ return err;
+
+adjust_mean_ec:
+ if (!ec_err) {
+ ai->ec_sum += ec;
+ ai->ec_count += 1;
+ if (ec > ai->max_ec)
+ ai->max_ec = ec;
+ if (ec < ai->min_ec)
+ ai->min_ec = ec;
+ }
+
+ return 0;
+}
+
+/**
+ * late_analysis - analyze the overall situation with PEB.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This is a helper function which takes a look what PEBs we have after we
+ * gather information about all of them ("ai" is compete). It decides whether
+ * the flash is empty and should be formatted of whether there are too many
+ * corrupted PEBs and we should not attach this MTD device. Returns zero if we
+ * should proceed with attaching the MTD device, and %-EINVAL if we should not.
+ */
+static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
+{
+ struct ubi_ainf_peb *aeb;
+ int max_corr, peb_count;
+
+ peb_count = ubi->peb_count - ai->bad_peb_count - ai->alien_peb_count;
+ max_corr = peb_count / 20 ?: 8;
+
+ /*
+ * Few corrupted PEBs is not a problem and may be just a result of
+ * unclean reboots. However, many of them may indicate some problems
+ * with the flash HW or driver.
+ */
+ if (ai->corr_peb_count) {
+ ubi_err(ubi, "%d PEBs are corrupted and preserved",
+ ai->corr_peb_count);
+ pr_err("Corrupted PEBs are:");
+ list_for_each_entry(aeb, &ai->corr, u.list)
+ pr_cont(" %d", aeb->pnum);
+ pr_cont("\n");
+
+ /*
+ * If too many PEBs are corrupted, we refuse attaching,
+ * otherwise, only print a warning.
+ */
+ if (ai->corr_peb_count >= max_corr) {
+ ubi_err(ubi, "too many corrupted PEBs, refusing");
+ return -EINVAL;
+ }
+ }
+
+ if (ai->empty_peb_count + ai->maybe_bad_peb_count == peb_count) {
+ /*
+ * All PEBs are empty, or almost all - a couple PEBs look like
+ * they may be bad PEBs which were not marked as bad yet.
+ *
+ * This piece of code basically tries to distinguish between
+ * the following situations:
+ *
+ * 1. Flash is empty, but there are few bad PEBs, which are not
+ * marked as bad so far, and which were read with error. We
+ * want to go ahead and format this flash. While formatting,
+ * the faulty PEBs will probably be marked as bad.
+ *
+ * 2. Flash contains non-UBI data and we do not want to format
+ * it and destroy possibly important information.
+ */
+ if (ai->maybe_bad_peb_count <= 2) {
+ ai->is_empty = 1;
+ ubi_msg(ubi, "empty MTD device detected");
+ get_random_bytes(&ubi->image_seq,
+ sizeof(ubi->image_seq));
+ } else {
+ ubi_err(ubi, "MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
+ return -EINVAL;
+ }
+
+ }
+
+ return 0;
+}
+
+/**
+ * destroy_av - free volume attaching information.
+ * @av: volume attaching information
+ * @ai: attaching information
+ * @list: put the aeb elements in there if !NULL, otherwise free them
+ *
+ * This function destroys the volume attaching information.
+ */
+static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av,
+ struct list_head *list)
+{
+ struct ubi_ainf_peb *aeb;
+ struct rb_node *this = av->root.rb_node;
+
+ while (this) {
+ if (this->rb_left)
+ this = this->rb_left;
+ else if (this->rb_right)
+ this = this->rb_right;
+ else {
+ aeb = rb_entry(this, struct ubi_ainf_peb, u.rb);
+ this = rb_parent(this);
+ if (this) {
+ if (this->rb_left == &aeb->u.rb)
+ this->rb_left = NULL;
+ else
+ this->rb_right = NULL;
+ }
+
+ if (list)
+ list_add_tail(&aeb->u.list, list);
+ else
+ ubi_free_aeb(ai, aeb);
+ }
+ }
+ kfree(av);
+}
+
+/**
+ * destroy_ai - destroy attaching information.
+ * @ai: attaching information
+ */
+static void destroy_ai(struct ubi_attach_info *ai)
+{
+ struct ubi_ainf_peb *aeb, *aeb_tmp;
+ struct ubi_ainf_volume *av;
+ struct rb_node *rb;
+
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
+ list_del(&aeb->u.list);
+ ubi_free_aeb(ai, aeb);
+ }
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
+ list_del(&aeb->u.list);
+ ubi_free_aeb(ai, aeb);
+ }
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
+ list_del(&aeb->u.list);
+ ubi_free_aeb(ai, aeb);
+ }
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
+ list_del(&aeb->u.list);
+ ubi_free_aeb(ai, aeb);
+ }
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->fastmap, u.list) {
+ list_del(&aeb->u.list);
+ ubi_free_aeb(ai, aeb);
+ }
+
+ /* Destroy the volume RB-tree */
+ rb = ai->volumes.rb_node;
+ while (rb) {
+ if (rb->rb_left)
+ rb = rb->rb_left;
+ else if (rb->rb_right)
+ rb = rb->rb_right;
+ else {
+ av = rb_entry(rb, struct ubi_ainf_volume, rb);
+
+ rb = rb_parent(rb);
+ if (rb) {
+ if (rb->rb_left == &av->rb)
+ rb->rb_left = NULL;
+ else
+ rb->rb_right = NULL;
+ }
+
+ destroy_av(ai, av, NULL);
+ }
+ }
+
+ kmem_cache_destroy(ai->aeb_slab_cache);
+ kfree(ai);
+}
+
+/**
+ * scan_all - scan entire MTD device.
+ * @ubi: UBI device description object
+ * @ai: attach info object
+ * @start: start scanning at this PEB
+ *
+ * This function does full scanning of an MTD device and returns complete
+ * information about it in form of a "struct ubi_attach_info" object. In case
+ * of failure, an error code is returned.
+ */
+static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int start)
+{
+ int err, pnum;
+ struct rb_node *rb1, *rb2;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb;
+
+ err = -ENOMEM;
+
+ ai->ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ai->ech)
+ return err;
+
+ ai->vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
+ if (!ai->vidb)
+ goto out_ech;
+
+ for (pnum = start; pnum < ubi->peb_count; pnum++) {
+ cond_resched();
+
+ dbg_gen("process PEB %d", pnum);
+ err = scan_peb(ubi, ai, pnum, false);
+ if (err < 0)
+ goto out_vidh;
+ }
+
+ ubi_msg(ubi, "scanning is finished");
+
+ /* Calculate mean erase counter */
+ if (ai->ec_count)
+ ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
+
+ err = late_analysis(ubi, ai);
+ if (err)
+ goto out_vidh;
+
+ /*
+ * In case of unknown erase counter we use the mean erase counter
+ * value.
+ */
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
+ }
+
+ list_for_each_entry(aeb, &ai->free, u.list) {
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
+ }
+
+ list_for_each_entry(aeb, &ai->corr, u.list)
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
+
+ list_for_each_entry(aeb, &ai->erase, u.list)
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
+
+ err = self_check_ai(ubi, ai);
+ if (err)
+ goto out_vidh;
+
+ ubi_free_vid_buf(ai->vidb);
+ kfree(ai->ech);
+
+ return 0;
+
+out_vidh:
+ ubi_free_vid_buf(ai->vidb);
+out_ech:
+ kfree(ai->ech);
+ return err;
+}
+
+static struct ubi_attach_info *alloc_ai(void)
+{
+ struct ubi_attach_info *ai;
+
+ ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
+ if (!ai)
+ return ai;
+
+ INIT_LIST_HEAD(&ai->corr);
+ INIT_LIST_HEAD(&ai->free);
+ INIT_LIST_HEAD(&ai->erase);
+ INIT_LIST_HEAD(&ai->alien);
+ INIT_LIST_HEAD(&ai->fastmap);
+ ai->volumes = RB_ROOT;
+ ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache",
+ sizeof(struct ubi_ainf_peb),
+ 0, 0, NULL);
+ if (!ai->aeb_slab_cache) {
+ kfree(ai);
+ ai = NULL;
+ }
+
+ return ai;
+}
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+
+/**
+ * scan_fast - try to find a fastmap and attach from it.
+ * @ubi: UBI device description object
+ * @ai: attach info object
+ *
+ * Returns 0 on success, negative return values indicate an internal
+ * error.
+ * UBI_NO_FASTMAP denotes that no fastmap was found.
+ * UBI_BAD_FASTMAP denotes that the found fastmap was invalid.
+ */
+static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
+{
+ int err, pnum;
+ struct ubi_attach_info *scan_ai;
+
+ err = -ENOMEM;
+
+ scan_ai = alloc_ai();
+ if (!scan_ai)
+ goto out;
+
+ scan_ai->ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!scan_ai->ech)
+ goto out_ai;
+
+ scan_ai->vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
+ if (!scan_ai->vidb)
+ goto out_ech;
+
+ for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
+ cond_resched();
+
+ dbg_gen("process PEB %d", pnum);
+ err = scan_peb(ubi, scan_ai, pnum, true);
+ if (err < 0)
+ goto out_vidh;
+ }
+
+ ubi_free_vid_buf(scan_ai->vidb);
+ kfree(scan_ai->ech);
+
+ if (scan_ai->force_full_scan)
+ err = UBI_NO_FASTMAP;
+ else
+ err = ubi_scan_fastmap(ubi, *ai, scan_ai);
+
+ if (err) {
+ /*
+ * Didn't attach via fastmap, do a full scan but reuse what
+ * we've aready scanned.
+ */
+ destroy_ai(*ai);
+ *ai = scan_ai;
+ } else
+ destroy_ai(scan_ai);
+
+ return err;
+
+out_vidh:
+ ubi_free_vid_buf(scan_ai->vidb);
+out_ech:
+ kfree(scan_ai->ech);
+out_ai:
+ destroy_ai(scan_ai);
+out:
+ return err;
+}
+
+#endif
+
+/**
+ * ubi_attach - attach an MTD device.
+ * @ubi: UBI device descriptor
+ * @force_scan: if set to non-zero attach by scanning
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int ubi_attach(struct ubi_device *ubi, int force_scan)
+{
+ int err;
+ struct ubi_attach_info *ai;
+
+ ai = alloc_ai();
+ if (!ai)
+ return -ENOMEM;
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* On small flash devices we disable fastmap in any case. */
+ if ((int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) <= UBI_FM_MAX_START) {
+ ubi->fm_disabled = 1;
+ force_scan = 1;
+ }
+
+ if (force_scan)
+ err = scan_all(ubi, ai, 0);
+ else {
+ err = scan_fast(ubi, &ai);
+ if (err > 0 || mtd_is_eccerr(err)) {
+ if (err != UBI_NO_FASTMAP) {
+ destroy_ai(ai);
+ ai = alloc_ai();
+ if (!ai)
+ return -ENOMEM;
+
+ err = scan_all(ubi, ai, 0);
+ } else {
+ err = scan_all(ubi, ai, UBI_FM_MAX_START);
+ }
+ }
+ }
+#else
+ err = scan_all(ubi, ai, 0);
+#endif
+ if (err)
+ goto out_ai;
+
+ ubi->bad_peb_count = ai->bad_peb_count;
+ ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
+ ubi->corr_peb_count = ai->corr_peb_count;
+ ubi->max_ec = ai->max_ec;
+ ubi->mean_ec = ai->mean_ec;
+ dbg_gen("max. sequence number: %llu", ai->max_sqnum);
+
+ err = ubi_read_volume_table(ubi, ai);
+ if (err)
+ goto out_ai;
+
+ err = ubi_wl_init(ubi, ai);
+ if (err)
+ goto out_vtbl;
+
+ err = ubi_eba_init(ubi, ai);
+ if (err)
+ goto out_wl;
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ if (ubi->fm && ubi_dbg_chk_fastmap(ubi)) {
+ struct ubi_attach_info *scan_ai;
+
+ scan_ai = alloc_ai();
+ if (!scan_ai) {
+ err = -ENOMEM;
+ goto out_wl;
+ }
+
+ err = scan_all(ubi, scan_ai, 0);
+ if (err) {
+ destroy_ai(scan_ai);
+ goto out_wl;
+ }
+
+ err = self_check_eba(ubi, ai, scan_ai);
+ destroy_ai(scan_ai);
+
+ if (err)
+ goto out_wl;
+ }
+#endif
+
+ destroy_ai(ai);
+ return 0;
+
+out_wl:
+ ubi_wl_close(ubi);
+out_vtbl:
+ ubi_free_all_volumes(ubi);
+ vfree(ubi->vtbl);
+out_ai:
+ destroy_ai(ai);
+ return err;
+}
+
+/**
+ * self_check_ai - check the attaching information.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This function returns zero if the attaching information is all right, and a
+ * negative error code if not or if an error occurred.
+ */
+static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
+{
+ struct ubi_vid_io_buf *vidb = ai->vidb;
+ struct ubi_vid_hdr *vidh = ubi_get_vid_hdr(vidb);
+ int pnum, err, vols_found = 0;
+ struct rb_node *rb1, *rb2;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb, *last_aeb;
+ uint8_t *buf;
+
+ if (!ubi_dbg_chk_gen(ubi))
+ return 0;
+
+ /*
+ * At first, check that attaching information is OK.
+ */
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ int leb_count = 0;
+
+ cond_resched();
+
+ vols_found += 1;
+
+ if (ai->is_empty) {
+ ubi_err(ubi, "bad is_empty flag");
+ goto bad_av;
+ }
+
+ if (av->vol_id < 0 || av->highest_lnum < 0 ||
+ av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 ||
+ av->data_pad < 0 || av->last_data_size < 0) {
+ ubi_err(ubi, "negative values");
+ goto bad_av;
+ }
+
+ if (av->vol_id >= UBI_MAX_VOLUMES &&
+ av->vol_id < UBI_INTERNAL_VOL_START) {
+ ubi_err(ubi, "bad vol_id");
+ goto bad_av;
+ }
+
+ if (av->vol_id > ai->highest_vol_id) {
+ ubi_err(ubi, "highest_vol_id is %d, but vol_id %d is there",
+ ai->highest_vol_id, av->vol_id);
+ goto out;
+ }
+
+ if (av->vol_type != UBI_DYNAMIC_VOLUME &&
+ av->vol_type != UBI_STATIC_VOLUME) {
+ ubi_err(ubi, "bad vol_type");
+ goto bad_av;
+ }
+
+ if (av->data_pad > ubi->leb_size / 2) {
+ ubi_err(ubi, "bad data_pad");
+ goto bad_av;
+ }
+
+ last_aeb = NULL;
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
+ cond_resched();
+
+ last_aeb = aeb;
+ leb_count += 1;
+
+ if (aeb->pnum < 0 || aeb->ec < 0) {
+ ubi_err(ubi, "negative values");
+ goto bad_aeb;
+ }
+
+ if (aeb->ec < ai->min_ec) {
+ ubi_err(ubi, "bad ai->min_ec (%d), %d found",
+ ai->min_ec, aeb->ec);
+ goto bad_aeb;
+ }
+
+ if (aeb->ec > ai->max_ec) {
+ ubi_err(ubi, "bad ai->max_ec (%d), %d found",
+ ai->max_ec, aeb->ec);
+ goto bad_aeb;
+ }
+
+ if (aeb->pnum >= ubi->peb_count) {
+ ubi_err(ubi, "too high PEB number %d, total PEBs %d",
+ aeb->pnum, ubi->peb_count);
+ goto bad_aeb;
+ }
+
+ if (av->vol_type == UBI_STATIC_VOLUME) {
+ if (aeb->lnum >= av->used_ebs) {
+ ubi_err(ubi, "bad lnum or used_ebs");
+ goto bad_aeb;
+ }
+ } else {
+ if (av->used_ebs != 0) {
+ ubi_err(ubi, "non-zero used_ebs");
+ goto bad_aeb;
+ }
+ }
+
+ if (aeb->lnum > av->highest_lnum) {
+ ubi_err(ubi, "incorrect highest_lnum or lnum");
+ goto bad_aeb;
+ }
+ }
+
+ if (av->leb_count != leb_count) {
+ ubi_err(ubi, "bad leb_count, %d objects in the tree",
+ leb_count);
+ goto bad_av;
+ }
+
+ if (!last_aeb)
+ continue;
+
+ aeb = last_aeb;
+
+ if (aeb->lnum != av->highest_lnum) {
+ ubi_err(ubi, "bad highest_lnum");
+ goto bad_aeb;
+ }
+ }
+
+ if (vols_found != ai->vols_found) {
+ ubi_err(ubi, "bad ai->vols_found %d, should be %d",
+ ai->vols_found, vols_found);
+ goto out;
+ }
+
+ /* Check that attaching information is correct */
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ last_aeb = NULL;
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
+ int vol_type;
+
+ cond_resched();
+
+ last_aeb = aeb;
+
+ err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidb, 1);
+ if (err && err != UBI_IO_BITFLIPS) {
+ ubi_err(ubi, "VID header is not OK (%d)",
+ err);
+ if (err > 0)
+ err = -EIO;
+ return err;
+ }
+
+ vol_type = vidh->vol_type == UBI_VID_DYNAMIC ?
+ UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
+ if (av->vol_type != vol_type) {
+ ubi_err(ubi, "bad vol_type");
+ goto bad_vid_hdr;
+ }
+
+ if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) {
+ ubi_err(ubi, "bad sqnum %llu", aeb->sqnum);
+ goto bad_vid_hdr;
+ }
+
+ if (av->vol_id != be32_to_cpu(vidh->vol_id)) {
+ ubi_err(ubi, "bad vol_id %d", av->vol_id);
+ goto bad_vid_hdr;
+ }
+
+ if (av->compat != vidh->compat) {
+ ubi_err(ubi, "bad compat %d", vidh->compat);
+ goto bad_vid_hdr;
+ }
+
+ if (aeb->lnum != be32_to_cpu(vidh->lnum)) {
+ ubi_err(ubi, "bad lnum %d", aeb->lnum);
+ goto bad_vid_hdr;
+ }
+
+ if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) {
+ ubi_err(ubi, "bad used_ebs %d", av->used_ebs);
+ goto bad_vid_hdr;
+ }
+
+ if (av->data_pad != be32_to_cpu(vidh->data_pad)) {
+ ubi_err(ubi, "bad data_pad %d", av->data_pad);
+ goto bad_vid_hdr;
+ }
+ }
+
+ if (!last_aeb)
+ continue;
+
+ if (av->highest_lnum != be32_to_cpu(vidh->lnum)) {
+ ubi_err(ubi, "bad highest_lnum %d", av->highest_lnum);
+ goto bad_vid_hdr;
+ }
+
+ if (av->last_data_size != be32_to_cpu(vidh->data_size)) {
+ ubi_err(ubi, "bad last_data_size %d",
+ av->last_data_size);
+ goto bad_vid_hdr;
+ }
+ }
+
+ /*
+ * Make sure that all the physical eraseblocks are in one of the lists
+ * or trees.
+ */
+ buf = kzalloc(ubi->peb_count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (pnum = 0; pnum < ubi->peb_count; pnum++) {
+ err = ubi_io_is_bad(ubi, pnum);
+ if (err < 0) {
+ kfree(buf);
+ return err;
+ } else if (err)
+ buf[pnum] = 1;
+ }
+
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+ buf[aeb->pnum] = 1;
+
+ list_for_each_entry(aeb, &ai->free, u.list)
+ buf[aeb->pnum] = 1;
+
+ list_for_each_entry(aeb, &ai->corr, u.list)
+ buf[aeb->pnum] = 1;
+
+ list_for_each_entry(aeb, &ai->erase, u.list)
+ buf[aeb->pnum] = 1;
+
+ list_for_each_entry(aeb, &ai->alien, u.list)
+ buf[aeb->pnum] = 1;
+
+ err = 0;
+ for (pnum = 0; pnum < ubi->peb_count; pnum++)
+ if (!buf[pnum]) {
+ ubi_err(ubi, "PEB %d is not referred", pnum);
+ err = 1;
+ }
+
+ kfree(buf);
+ if (err)
+ goto out;
+ return 0;
+
+bad_aeb:
+ ubi_err(ubi, "bad attaching information about LEB %d", aeb->lnum);
+ ubi_dump_aeb(aeb, 0);
+ ubi_dump_av(av);
+ goto out;
+
+bad_av:
+ ubi_err(ubi, "bad attaching information about volume %d", av->vol_id);
+ ubi_dump_av(av);
+ goto out;
+
+bad_vid_hdr:
+ ubi_err(ubi, "bad attaching information about volume %d", av->vol_id);
+ ubi_dump_av(av);
+ ubi_dump_vid_hdr(vidh);
+
+out:
+ dump_stack();
+ return -EINVAL;
+}
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
new file mode 100644
index 0000000000..437c5b83ff
--- /dev/null
+++ b/drivers/mtd/ubi/block.c
@@ -0,0 +1,673 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014 Ezequiel Garcia
+ * Copyright (c) 2011 Free Electrons
+ *
+ * Driver parameter handling strongly based on drivers/mtd/ubi/build.c
+ * Copyright (c) International Business Machines Corp., 2006
+ * Copyright (c) Nokia Corporation, 2007
+ * Authors: Artem Bityutskiy, Frank Haverkamp
+ */
+
+/*
+ * Read-only block devices on top of UBI volumes
+ *
+ * A simple implementation to allow a block device to be layered on top of a
+ * UBI volume. The implementation is provided by creating a static 1-to-1
+ * mapping between the block device and the UBI volume.
+ *
+ * The addressed byte is obtained from the addressed block sector, which is
+ * mapped linearly into the corresponding LEB:
+ *
+ * LEB number = addressed byte / LEB size
+ *
+ * This feature is compiled in the UBI core, and adds a 'block' parameter
+ * to allow early creation of block devices on top of UBI volumes. Runtime
+ * block creation/removal for UBI volumes is provided through two UBI ioctls:
+ * UBI_IOCVOLCRBLK and UBI_IOCVOLRMBLK.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/mtd/ubi.h>
+#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/hdreg.h>
+#include <linux/scatterlist.h>
+#include <linux/idr.h>
+#include <asm/div64.h>
+
+#include "ubi-media.h"
+#include "ubi.h"
+
+/* Maximum number of supported devices */
+#define UBIBLOCK_MAX_DEVICES 32
+
+/* Maximum length of the 'block=' parameter */
+#define UBIBLOCK_PARAM_LEN 63
+
+/* Maximum number of comma-separated items in the 'block=' parameter */
+#define UBIBLOCK_PARAM_COUNT 2
+
+struct ubiblock_param {
+ int ubi_num;
+ int vol_id;
+ char name[UBIBLOCK_PARAM_LEN+1];
+};
+
+struct ubiblock_pdu {
+ struct ubi_sgl usgl;
+};
+
+/* Numbers of elements set in the @ubiblock_param array */
+static int ubiblock_devs __initdata;
+
+/* MTD devices specification parameters */
+static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
+
+struct ubiblock {
+ struct ubi_volume_desc *desc;
+ int ubi_num;
+ int vol_id;
+ int refcnt;
+ int leb_size;
+
+ struct gendisk *gd;
+ struct request_queue *rq;
+
+ struct mutex dev_mutex;
+ struct list_head list;
+ struct blk_mq_tag_set tag_set;
+};
+
+/* Linked list of all ubiblock instances */
+static LIST_HEAD(ubiblock_devices);
+static DEFINE_IDR(ubiblock_minor_idr);
+/* Protects ubiblock_devices and ubiblock_minor_idr */
+static DEFINE_MUTEX(devices_mutex);
+static int ubiblock_major;
+
+static int __init ubiblock_set_param(const char *val,
+ const struct kernel_param *kp)
+{
+ int i, ret;
+ size_t len;
+ struct ubiblock_param *param;
+ char buf[UBIBLOCK_PARAM_LEN];
+ char *pbuf = &buf[0];
+ char *tokens[UBIBLOCK_PARAM_COUNT];
+
+ if (!val)
+ return -EINVAL;
+
+ len = strnlen(val, UBIBLOCK_PARAM_LEN);
+ if (len == 0) {
+ pr_warn("UBI: block: empty 'block=' parameter - ignored\n");
+ return 0;
+ }
+
+ if (len == UBIBLOCK_PARAM_LEN) {
+ pr_err("UBI: block: parameter \"%s\" is too long, max. is %d\n",
+ val, UBIBLOCK_PARAM_LEN);
+ return -EINVAL;
+ }
+
+ strcpy(buf, val);
+
+ /* Get rid of the final newline */
+ if (buf[len - 1] == '\n')
+ buf[len - 1] = '\0';
+
+ for (i = 0; i < UBIBLOCK_PARAM_COUNT; i++)
+ tokens[i] = strsep(&pbuf, ",");
+
+ param = &ubiblock_param[ubiblock_devs];
+ if (tokens[1]) {
+ /* Two parameters: can be 'ubi, vol_id' or 'ubi, vol_name' */
+ ret = kstrtoint(tokens[0], 10, &param->ubi_num);
+ if (ret < 0)
+ return -EINVAL;
+
+ /* Second param can be a number or a name */
+ ret = kstrtoint(tokens[1], 10, &param->vol_id);
+ if (ret < 0) {
+ param->vol_id = -1;
+ strcpy(param->name, tokens[1]);
+ }
+
+ } else {
+ /* One parameter: must be device path */
+ strcpy(param->name, tokens[0]);
+ param->ubi_num = -1;
+ param->vol_id = -1;
+ }
+
+ ubiblock_devs++;
+
+ return 0;
+}
+
+static const struct kernel_param_ops ubiblock_param_ops = {
+ .set = ubiblock_set_param,
+};
+module_param_cb(block, &ubiblock_param_ops, NULL, 0);
+MODULE_PARM_DESC(block, "Attach block devices to UBI volumes. Parameter format: block=<path|dev,num|dev,name>.\n"
+ "Multiple \"block\" parameters may be specified.\n"
+ "UBI volumes may be specified by their number, name, or path to the device node.\n"
+ "Examples\n"
+ "Using the UBI volume path:\n"
+ "ubi.block=/dev/ubi0_0\n"
+ "Using the UBI device, and the volume name:\n"
+ "ubi.block=0,rootfs\n"
+ "Using both UBI device number and UBI volume number:\n"
+ "ubi.block=0,0\n");
+
+static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
+{
+ struct ubiblock *dev;
+
+ list_for_each_entry(dev, &ubiblock_devices, list)
+ if (dev->ubi_num == ubi_num && dev->vol_id == vol_id)
+ return dev;
+ return NULL;
+}
+
+static blk_status_t ubiblock_read(struct request *req)
+{
+ struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
+ struct ubiblock *dev = req->q->queuedata;
+ u64 pos = blk_rq_pos(req) << 9;
+ int to_read = blk_rq_bytes(req);
+ int bytes_left = to_read;
+ /* Get LEB:offset address to read from */
+ int offset = do_div(pos, dev->leb_size);
+ int leb = pos;
+ struct req_iterator iter;
+ struct bio_vec bvec;
+ int ret;
+
+ blk_mq_start_request(req);
+
+ /*
+ * It is safe to ignore the return value of blk_rq_map_sg() because
+ * the number of sg entries is limited to UBI_MAX_SG_COUNT
+ * and ubi_read_sg() will check that limit.
+ */
+ ubi_sgl_init(&pdu->usgl);
+ blk_rq_map_sg(req->q, req, pdu->usgl.sg);
+
+ while (bytes_left) {
+ /*
+ * We can only read one LEB at a time. Therefore if the read
+ * length is larger than one LEB size, we split the operation.
+ */
+ if (offset + to_read > dev->leb_size)
+ to_read = dev->leb_size - offset;
+
+ ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
+ if (ret < 0)
+ break;
+
+ bytes_left -= to_read;
+ to_read = bytes_left;
+ leb += 1;
+ offset = 0;
+ }
+
+ rq_for_each_segment(bvec, req, iter)
+ flush_dcache_page(bvec.bv_page);
+
+ blk_mq_end_request(req, errno_to_blk_status(ret));
+
+ return BLK_STS_OK;
+}
+
+static int ubiblock_open(struct gendisk *disk, blk_mode_t mode)
+{
+ struct ubiblock *dev = disk->private_data;
+ int ret;
+
+ mutex_lock(&dev->dev_mutex);
+ if (dev->refcnt > 0) {
+ /*
+ * The volume is already open, just increase the reference
+ * counter.
+ */
+ goto out_done;
+ }
+
+ /*
+ * We want users to be aware they should only mount us as read-only.
+ * It's just a paranoid check, as write requests will get rejected
+ * in any case.
+ */
+ if (mode & BLK_OPEN_WRITE) {
+ ret = -EROFS;
+ goto out_unlock;
+ }
+ dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY);
+ if (IS_ERR(dev->desc)) {
+ dev_err(disk_to_dev(dev->gd), "failed to open ubi volume %d_%d",
+ dev->ubi_num, dev->vol_id);
+ ret = PTR_ERR(dev->desc);
+ dev->desc = NULL;
+ goto out_unlock;
+ }
+
+out_done:
+ dev->refcnt++;
+ mutex_unlock(&dev->dev_mutex);
+ return 0;
+
+out_unlock:
+ mutex_unlock(&dev->dev_mutex);
+ return ret;
+}
+
+static void ubiblock_release(struct gendisk *gd)
+{
+ struct ubiblock *dev = gd->private_data;
+
+ mutex_lock(&dev->dev_mutex);
+ dev->refcnt--;
+ if (dev->refcnt == 0) {
+ ubi_close_volume(dev->desc);
+ dev->desc = NULL;
+ }
+ mutex_unlock(&dev->dev_mutex);
+}
+
+static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ /* Some tools might require this information */
+ geo->heads = 1;
+ geo->cylinders = 1;
+ geo->sectors = get_capacity(bdev->bd_disk);
+ geo->start = 0;
+ return 0;
+}
+
+static const struct block_device_operations ubiblock_ops = {
+ .owner = THIS_MODULE,
+ .open = ubiblock_open,
+ .release = ubiblock_release,
+ .getgeo = ubiblock_getgeo,
+};
+
+static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ switch (req_op(bd->rq)) {
+ case REQ_OP_READ:
+ return ubiblock_read(bd->rq);
+ default:
+ return BLK_STS_IOERR;
+ }
+}
+
+static int ubiblock_init_request(struct blk_mq_tag_set *set,
+ struct request *req, unsigned int hctx_idx,
+ unsigned int numa_node)
+{
+ struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
+
+ sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
+ return 0;
+}
+
+static const struct blk_mq_ops ubiblock_mq_ops = {
+ .queue_rq = ubiblock_queue_rq,
+ .init_request = ubiblock_init_request,
+};
+
+static int calc_disk_capacity(struct ubi_volume_info *vi, u64 *disk_capacity)
+{
+ u64 size = vi->used_bytes >> 9;
+
+ if (vi->used_bytes % 512) {
+ if (vi->vol_type == UBI_DYNAMIC_VOLUME)
+ pr_warn("UBI: block: volume size is not a multiple of 512, last %llu bytes are ignored!\n",
+ vi->used_bytes - (size << 9));
+ else
+ pr_info("UBI: block: volume size is not a multiple of 512, last %llu bytes are ignored!\n",
+ vi->used_bytes - (size << 9));
+ }
+
+ if ((sector_t)size != size)
+ return -EFBIG;
+
+ *disk_capacity = size;
+
+ return 0;
+}
+
+int ubiblock_create(struct ubi_volume_info *vi)
+{
+ struct ubiblock *dev;
+ struct gendisk *gd;
+ u64 disk_capacity;
+ int ret;
+
+ ret = calc_disk_capacity(vi, &disk_capacity);
+ if (ret) {
+ return ret;
+ }
+
+ /* Check that the volume isn't already handled */
+ mutex_lock(&devices_mutex);
+ if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
+ ret = -EEXIST;
+ goto out_unlock;
+ }
+
+ dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
+ if (!dev) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ mutex_init(&dev->dev_mutex);
+
+ dev->ubi_num = vi->ubi_num;
+ dev->vol_id = vi->vol_id;
+ dev->leb_size = vi->usable_leb_size;
+
+ dev->tag_set.ops = &ubiblock_mq_ops;
+ dev->tag_set.queue_depth = 64;
+ dev->tag_set.numa_node = NUMA_NO_NODE;
+ dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
+ dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
+ dev->tag_set.driver_data = dev;
+ dev->tag_set.nr_hw_queues = 1;
+
+ ret = blk_mq_alloc_tag_set(&dev->tag_set);
+ if (ret) {
+ dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed");
+ goto out_free_dev;
+ }
+
+
+ /* Initialize the gendisk of this ubiblock device */
+ gd = blk_mq_alloc_disk(&dev->tag_set, dev);
+ if (IS_ERR(gd)) {
+ ret = PTR_ERR(gd);
+ goto out_free_tags;
+ }
+
+ gd->fops = &ubiblock_ops;
+ gd->major = ubiblock_major;
+ gd->minors = 1;
+ gd->first_minor = idr_alloc(&ubiblock_minor_idr, dev, 0, 0, GFP_KERNEL);
+ if (gd->first_minor < 0) {
+ dev_err(disk_to_dev(gd),
+ "block: dynamic minor allocation failed");
+ ret = -ENODEV;
+ goto out_cleanup_disk;
+ }
+ gd->flags |= GENHD_FL_NO_PART;
+ gd->private_data = dev;
+ sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
+ set_capacity(gd, disk_capacity);
+ dev->gd = gd;
+
+ dev->rq = gd->queue;
+ blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
+
+ list_add_tail(&dev->list, &ubiblock_devices);
+
+ /* Must be the last step: anyone can call file ops from now on */
+ ret = device_add_disk(vi->dev, dev->gd, NULL);
+ if (ret)
+ goto out_remove_minor;
+
+ dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
+ dev->ubi_num, dev->vol_id, vi->name);
+ mutex_unlock(&devices_mutex);
+ return 0;
+
+out_remove_minor:
+ list_del(&dev->list);
+ idr_remove(&ubiblock_minor_idr, gd->first_minor);
+out_cleanup_disk:
+ put_disk(dev->gd);
+out_free_tags:
+ blk_mq_free_tag_set(&dev->tag_set);
+out_free_dev:
+ kfree(dev);
+out_unlock:
+ mutex_unlock(&devices_mutex);
+
+ return ret;
+}
+
+static void ubiblock_cleanup(struct ubiblock *dev)
+{
+ /* Stop new requests to arrive */
+ del_gendisk(dev->gd);
+ /* Finally destroy the blk queue */
+ dev_info(disk_to_dev(dev->gd), "released");
+ put_disk(dev->gd);
+ blk_mq_free_tag_set(&dev->tag_set);
+ idr_remove(&ubiblock_minor_idr, dev->gd->first_minor);
+}
+
+int ubiblock_remove(struct ubi_volume_info *vi)
+{
+ struct ubiblock *dev;
+ int ret;
+
+ mutex_lock(&devices_mutex);
+ dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
+ if (!dev) {
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ /* Found a device, let's lock it so we can check if it's busy */
+ mutex_lock(&dev->dev_mutex);
+ if (dev->refcnt > 0) {
+ ret = -EBUSY;
+ goto out_unlock_dev;
+ }
+
+ /* Remove from device list */
+ list_del(&dev->list);
+ ubiblock_cleanup(dev);
+ mutex_unlock(&dev->dev_mutex);
+ mutex_unlock(&devices_mutex);
+
+ kfree(dev);
+ return 0;
+
+out_unlock_dev:
+ mutex_unlock(&dev->dev_mutex);
+out_unlock:
+ mutex_unlock(&devices_mutex);
+ return ret;
+}
+
+static int ubiblock_resize(struct ubi_volume_info *vi)
+{
+ struct ubiblock *dev;
+ u64 disk_capacity;
+ int ret;
+
+ /*
+ * Need to lock the device list until we stop using the device,
+ * otherwise the device struct might get released in
+ * 'ubiblock_remove()'.
+ */
+ mutex_lock(&devices_mutex);
+ dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
+ if (!dev) {
+ mutex_unlock(&devices_mutex);
+ return -ENODEV;
+ }
+
+ ret = calc_disk_capacity(vi, &disk_capacity);
+ if (ret) {
+ mutex_unlock(&devices_mutex);
+ if (ret == -EFBIG) {
+ dev_warn(disk_to_dev(dev->gd),
+ "the volume is too big (%d LEBs), cannot resize",
+ vi->size);
+ }
+ return ret;
+ }
+
+ mutex_lock(&dev->dev_mutex);
+
+ if (get_capacity(dev->gd) != disk_capacity) {
+ set_capacity(dev->gd, disk_capacity);
+ dev_info(disk_to_dev(dev->gd), "resized to %lld bytes",
+ vi->used_bytes);
+ }
+ mutex_unlock(&dev->dev_mutex);
+ mutex_unlock(&devices_mutex);
+ return 0;
+}
+
+static int ubiblock_notify(struct notifier_block *nb,
+ unsigned long notification_type, void *ns_ptr)
+{
+ struct ubi_notification *nt = ns_ptr;
+
+ switch (notification_type) {
+ case UBI_VOLUME_ADDED:
+ /*
+ * We want to enforce explicit block device creation for
+ * volumes, so when a volume is added we do nothing.
+ */
+ break;
+ case UBI_VOLUME_REMOVED:
+ ubiblock_remove(&nt->vi);
+ break;
+ case UBI_VOLUME_RESIZED:
+ ubiblock_resize(&nt->vi);
+ break;
+ case UBI_VOLUME_UPDATED:
+ /*
+ * If the volume is static, a content update might mean the
+ * size (i.e. used_bytes) was also changed.
+ */
+ if (nt->vi.vol_type == UBI_STATIC_VOLUME)
+ ubiblock_resize(&nt->vi);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block ubiblock_notifier = {
+ .notifier_call = ubiblock_notify,
+};
+
+static struct ubi_volume_desc * __init
+open_volume_desc(const char *name, int ubi_num, int vol_id)
+{
+ if (ubi_num == -1)
+ /* No ubi num, name must be a vol device path */
+ return ubi_open_volume_path(name, UBI_READONLY);
+ else if (vol_id == -1)
+ /* No vol_id, must be vol_name */
+ return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
+ else
+ return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
+}
+
+static void __init ubiblock_create_from_param(void)
+{
+ int i, ret = 0;
+ struct ubiblock_param *p;
+ struct ubi_volume_desc *desc;
+ struct ubi_volume_info vi;
+
+ /*
+ * If there is an error creating one of the ubiblocks, continue on to
+ * create the following ubiblocks. This helps in a circumstance where
+ * the kernel command-line specifies multiple block devices and some
+ * may be broken, but we still want the working ones to come up.
+ */
+ for (i = 0; i < ubiblock_devs; i++) {
+ p = &ubiblock_param[i];
+
+ desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
+ if (IS_ERR(desc)) {
+ pr_err(
+ "UBI: block: can't open volume on ubi%d_%d, err=%ld\n",
+ p->ubi_num, p->vol_id, PTR_ERR(desc));
+ continue;
+ }
+
+ ubi_get_volume_info(desc, &vi);
+ ubi_close_volume(desc);
+
+ ret = ubiblock_create(&vi);
+ if (ret) {
+ pr_err(
+ "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
+ vi.name, p->ubi_num, p->vol_id, ret);
+ continue;
+ }
+ }
+}
+
+static void ubiblock_remove_all(void)
+{
+ struct ubiblock *next;
+ struct ubiblock *dev;
+
+ mutex_lock(&devices_mutex);
+ list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
+ /* The module is being forcefully removed */
+ WARN_ON(dev->desc);
+ /* Remove from device list */
+ list_del(&dev->list);
+ ubiblock_cleanup(dev);
+ kfree(dev);
+ }
+ mutex_unlock(&devices_mutex);
+}
+
+int __init ubiblock_init(void)
+{
+ int ret;
+
+ ubiblock_major = register_blkdev(0, "ubiblock");
+ if (ubiblock_major < 0)
+ return ubiblock_major;
+
+ /*
+ * Attach block devices from 'block=' module param.
+ * Even if one block device in the param list fails to come up,
+ * still allow the module to load and leave any others up.
+ */
+ ubiblock_create_from_param();
+
+ /*
+ * Block devices are only created upon user requests, so we ignore
+ * existing volumes.
+ */
+ ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
+ if (ret)
+ goto err_unreg;
+ return 0;
+
+err_unreg:
+ unregister_blkdev(ubiblock_major, "ubiblock");
+ ubiblock_remove_all();
+ return ret;
+}
+
+void __exit ubiblock_exit(void)
+{
+ ubi_unregister_volume_notifier(&ubiblock_notifier);
+ ubiblock_remove_all();
+ unregister_blkdev(ubiblock_major, "ubiblock");
+}
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
new file mode 100644
index 0000000000..8ee51e49fc
--- /dev/null
+++ b/drivers/mtd/ubi/build.c
@@ -0,0 +1,1514 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ * Copyright (c) Nokia Corporation, 2007
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём),
+ * Frank Haverkamp
+ */
+
+/*
+ * This file includes UBI initialization and building of UBI devices.
+ *
+ * When UBI is initialized, it attaches all the MTD devices specified as the
+ * module load parameters or the kernel boot parameters. If MTD devices were
+ * specified, UBI does not attach any MTD device, but it is possible to do
+ * later using the "UBI control device".
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/stringify.h>
+#include <linux/namei.h>
+#include <linux/stat.h>
+#include <linux/miscdevice.h>
+#include <linux/mtd/partitions.h>
+#include <linux/log2.h>
+#include <linux/kthread.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/major.h>
+#include "ubi.h"
+
+/* Maximum length of the 'mtd=' parameter */
+#define MTD_PARAM_LEN_MAX 64
+
+/* Maximum number of comma-separated items in the 'mtd=' parameter */
+#define MTD_PARAM_MAX_COUNT 5
+
+/* Maximum value for the number of bad PEBs per 1024 PEBs */
+#define MAX_MTD_UBI_BEB_LIMIT 768
+
+#ifdef CONFIG_MTD_UBI_MODULE
+#define ubi_is_module() 1
+#else
+#define ubi_is_module() 0
+#endif
+
+/**
+ * struct mtd_dev_param - MTD device parameter description data structure.
+ * @name: MTD character device node path, MTD device name, or MTD device number
+ * string
+ * @ubi_num: UBI number
+ * @vid_hdr_offs: VID header offset
+ * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
+ * @enable_fm: enable fastmap when value is non-zero
+ */
+struct mtd_dev_param {
+ char name[MTD_PARAM_LEN_MAX];
+ int ubi_num;
+ int vid_hdr_offs;
+ int max_beb_per1024;
+ int enable_fm;
+};
+
+/* Numbers of elements set in the @mtd_dev_param array */
+static int mtd_devs;
+
+/* MTD devices specification parameters */
+static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES];
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/* UBI module parameter to enable fastmap automatically on non-fastmap images */
+static bool fm_autoconvert;
+static bool fm_debug;
+#endif
+
+/* Slab cache for wear-leveling entries */
+struct kmem_cache *ubi_wl_entry_slab;
+
+/* UBI control character device */
+static struct miscdevice ubi_ctrl_cdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "ubi_ctrl",
+ .fops = &ubi_ctrl_cdev_operations,
+};
+
+/* All UBI devices in system */
+static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
+
+/* Serializes UBI devices creations and removals */
+DEFINE_MUTEX(ubi_devices_mutex);
+
+/* Protects @ubi_devices and @ubi->ref_count */
+static DEFINE_SPINLOCK(ubi_devices_lock);
+
+/* "Show" method for files in '/<sysfs>/class/ubi/' */
+/* UBI version attribute ('/<sysfs>/class/ubi/version') */
+static ssize_t version_show(const struct class *class, const struct class_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", UBI_VERSION);
+}
+static CLASS_ATTR_RO(version);
+
+static struct attribute *ubi_class_attrs[] = {
+ &class_attr_version.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ubi_class);
+
+/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
+struct class ubi_class = {
+ .name = UBI_NAME_STR,
+ .class_groups = ubi_class_groups,
+};
+
+static ssize_t dev_attribute_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+/* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */
+static struct device_attribute dev_eraseblock_size =
+ __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_avail_eraseblocks =
+ __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_total_eraseblocks =
+ __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_volumes_count =
+ __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_max_ec =
+ __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_reserved_for_bad =
+ __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_bad_peb_count =
+ __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_max_vol_count =
+ __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_min_io_size =
+ __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_bgt_enabled =
+ __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_mtd_num =
+ __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_ro_mode =
+ __ATTR(ro_mode, S_IRUGO, dev_attribute_show, NULL);
+
+/**
+ * ubi_volume_notify - send a volume change notification.
+ * @ubi: UBI device description object
+ * @vol: volume description object of the changed volume
+ * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
+ *
+ * This is a helper function which notifies all subscribers about a volume
+ * change event (creation, removal, re-sizing, re-naming, updating). Returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
+{
+ int ret;
+ struct ubi_notification nt;
+
+ ubi_do_get_device_info(ubi, &nt.di);
+ ubi_do_get_volume_info(ubi, vol, &nt.vi);
+
+ switch (ntype) {
+ case UBI_VOLUME_ADDED:
+ case UBI_VOLUME_REMOVED:
+ case UBI_VOLUME_RESIZED:
+ case UBI_VOLUME_RENAMED:
+ ret = ubi_update_fastmap(ubi);
+ if (ret)
+ ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
+ }
+
+ return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
+}
+
+/**
+ * ubi_notify_all - send a notification to all volumes.
+ * @ubi: UBI device description object
+ * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
+ * @nb: the notifier to call
+ *
+ * This function walks all volumes of UBI device @ubi and sends the @ntype
+ * notification for each volume. If @nb is %NULL, then all registered notifiers
+ * are called, otherwise only the @nb notifier is called. Returns the number of
+ * sent notifications.
+ */
+int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb)
+{
+ struct ubi_notification nt;
+ int i, count = 0;
+
+ ubi_do_get_device_info(ubi, &nt.di);
+
+ mutex_lock(&ubi->device_mutex);
+ for (i = 0; i < ubi->vtbl_slots; i++) {
+ /*
+ * Since the @ubi->device is locked, and we are not going to
+ * change @ubi->volumes, we do not have to lock
+ * @ubi->volumes_lock.
+ */
+ if (!ubi->volumes[i])
+ continue;
+
+ ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi);
+ if (nb)
+ nb->notifier_call(nb, ntype, &nt);
+ else
+ blocking_notifier_call_chain(&ubi_notifiers, ntype,
+ &nt);
+ count += 1;
+ }
+ mutex_unlock(&ubi->device_mutex);
+
+ return count;
+}
+
+/**
+ * ubi_enumerate_volumes - send "add" notification for all existing volumes.
+ * @nb: the notifier to call
+ *
+ * This function walks all UBI devices and volumes and sends the
+ * %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all
+ * registered notifiers are called, otherwise only the @nb notifier is called.
+ * Returns the number of sent notifications.
+ */
+int ubi_enumerate_volumes(struct notifier_block *nb)
+{
+ int i, count = 0;
+
+ /*
+ * Since the @ubi_devices_mutex is locked, and we are not going to
+ * change @ubi_devices, we do not have to lock @ubi_devices_lock.
+ */
+ for (i = 0; i < UBI_MAX_DEVICES; i++) {
+ struct ubi_device *ubi = ubi_devices[i];
+
+ if (!ubi)
+ continue;
+ count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb);
+ }
+
+ return count;
+}
+
+/**
+ * ubi_get_device - get UBI device.
+ * @ubi_num: UBI device number
+ *
+ * This function returns UBI device description object for UBI device number
+ * @ubi_num, or %NULL if the device does not exist. This function increases the
+ * device reference count to prevent removal of the device. In other words, the
+ * device cannot be removed if its reference count is not zero.
+ */
+struct ubi_device *ubi_get_device(int ubi_num)
+{
+ struct ubi_device *ubi;
+
+ spin_lock(&ubi_devices_lock);
+ ubi = ubi_devices[ubi_num];
+ if (ubi) {
+ ubi_assert(ubi->ref_count >= 0);
+ ubi->ref_count += 1;
+ get_device(&ubi->dev);
+ }
+ spin_unlock(&ubi_devices_lock);
+
+ return ubi;
+}
+
+/**
+ * ubi_put_device - drop an UBI device reference.
+ * @ubi: UBI device description object
+ */
+void ubi_put_device(struct ubi_device *ubi)
+{
+ spin_lock(&ubi_devices_lock);
+ ubi->ref_count -= 1;
+ put_device(&ubi->dev);
+ spin_unlock(&ubi_devices_lock);
+}
+
+/**
+ * ubi_get_by_major - get UBI device by character device major number.
+ * @major: major number
+ *
+ * This function is similar to 'ubi_get_device()', but it searches the device
+ * by its major number.
+ */
+struct ubi_device *ubi_get_by_major(int major)
+{
+ int i;
+ struct ubi_device *ubi;
+
+ spin_lock(&ubi_devices_lock);
+ for (i = 0; i < UBI_MAX_DEVICES; i++) {
+ ubi = ubi_devices[i];
+ if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ ubi_assert(ubi->ref_count >= 0);
+ ubi->ref_count += 1;
+ get_device(&ubi->dev);
+ spin_unlock(&ubi_devices_lock);
+ return ubi;
+ }
+ }
+ spin_unlock(&ubi_devices_lock);
+
+ return NULL;
+}
+
+/**
+ * ubi_major2num - get UBI device number by character device major number.
+ * @major: major number
+ *
+ * This function searches UBI device number object by its major number. If UBI
+ * device was not found, this function returns -ENODEV, otherwise the UBI device
+ * number is returned.
+ */
+int ubi_major2num(int major)
+{
+ int i, ubi_num = -ENODEV;
+
+ spin_lock(&ubi_devices_lock);
+ for (i = 0; i < UBI_MAX_DEVICES; i++) {
+ struct ubi_device *ubi = ubi_devices[i];
+
+ if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ ubi_num = ubi->ubi_num;
+ break;
+ }
+ }
+ spin_unlock(&ubi_devices_lock);
+
+ return ubi_num;
+}
+
+/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
+static ssize_t dev_attribute_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct ubi_device *ubi;
+
+ /*
+ * The below code looks weird, but it actually makes sense. We get the
+ * UBI device reference from the contained 'struct ubi_device'. But it
+ * is unclear if the device was removed or not yet. Indeed, if the
+ * device was removed before we increased its reference count,
+ * 'ubi_get_device()' will return -ENODEV and we fail.
+ *
+ * Remember, 'struct ubi_device' is freed in the release function, so
+ * we still can use 'ubi->ubi_num'.
+ */
+ ubi = container_of(dev, struct ubi_device, dev);
+
+ if (attr == &dev_eraseblock_size)
+ ret = sprintf(buf, "%d\n", ubi->leb_size);
+ else if (attr == &dev_avail_eraseblocks)
+ ret = sprintf(buf, "%d\n", ubi->avail_pebs);
+ else if (attr == &dev_total_eraseblocks)
+ ret = sprintf(buf, "%d\n", ubi->good_peb_count);
+ else if (attr == &dev_volumes_count)
+ ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
+ else if (attr == &dev_max_ec)
+ ret = sprintf(buf, "%d\n", ubi->max_ec);
+ else if (attr == &dev_reserved_for_bad)
+ ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
+ else if (attr == &dev_bad_peb_count)
+ ret = sprintf(buf, "%d\n", ubi->bad_peb_count);
+ else if (attr == &dev_max_vol_count)
+ ret = sprintf(buf, "%d\n", ubi->vtbl_slots);
+ else if (attr == &dev_min_io_size)
+ ret = sprintf(buf, "%d\n", ubi->min_io_size);
+ else if (attr == &dev_bgt_enabled)
+ ret = sprintf(buf, "%d\n", ubi->thread_enabled);
+ else if (attr == &dev_mtd_num)
+ ret = sprintf(buf, "%d\n", ubi->mtd->index);
+ else if (attr == &dev_ro_mode)
+ ret = sprintf(buf, "%d\n", ubi->ro_mode);
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static struct attribute *ubi_dev_attrs[] = {
+ &dev_eraseblock_size.attr,
+ &dev_avail_eraseblocks.attr,
+ &dev_total_eraseblocks.attr,
+ &dev_volumes_count.attr,
+ &dev_max_ec.attr,
+ &dev_reserved_for_bad.attr,
+ &dev_bad_peb_count.attr,
+ &dev_max_vol_count.attr,
+ &dev_min_io_size.attr,
+ &dev_bgt_enabled.attr,
+ &dev_mtd_num.attr,
+ &dev_ro_mode.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(ubi_dev);
+
+static void dev_release(struct device *dev)
+{
+ struct ubi_device *ubi = container_of(dev, struct ubi_device, dev);
+
+ kfree(ubi);
+}
+
+/**
+ * kill_volumes - destroy all user volumes.
+ * @ubi: UBI device description object
+ */
+static void kill_volumes(struct ubi_device *ubi)
+{
+ int i;
+
+ for (i = 0; i < ubi->vtbl_slots; i++)
+ if (ubi->volumes[i])
+ ubi_free_volume(ubi, ubi->volumes[i]);
+}
+
+/**
+ * uif_init - initialize user interfaces for an UBI device.
+ * @ubi: UBI device description object
+ *
+ * This function initializes various user interfaces for an UBI device. If the
+ * initialization fails at an early stage, this function frees all the
+ * resources it allocated, returns an error.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int uif_init(struct ubi_device *ubi)
+{
+ int i, err;
+ dev_t dev;
+
+ sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
+
+ /*
+ * Major numbers for the UBI character devices are allocated
+ * dynamically. Major numbers of volume character devices are
+ * equivalent to ones of the corresponding UBI character device. Minor
+ * numbers of UBI character devices are 0, while minor numbers of
+ * volume character devices start from 1. Thus, we allocate one major
+ * number and ubi->vtbl_slots + 1 minor numbers.
+ */
+ err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name);
+ if (err) {
+ ubi_err(ubi, "cannot register UBI character devices");
+ return err;
+ }
+
+ ubi->dev.devt = dev;
+
+ ubi_assert(MINOR(dev) == 0);
+ cdev_init(&ubi->cdev, &ubi_cdev_operations);
+ dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev));
+ ubi->cdev.owner = THIS_MODULE;
+
+ dev_set_name(&ubi->dev, UBI_NAME_STR "%d", ubi->ubi_num);
+ err = cdev_device_add(&ubi->cdev, &ubi->dev);
+ if (err)
+ goto out_unreg;
+
+ for (i = 0; i < ubi->vtbl_slots; i++)
+ if (ubi->volumes[i]) {
+ err = ubi_add_volume(ubi, ubi->volumes[i]);
+ if (err) {
+ ubi_err(ubi, "cannot add volume %d", i);
+ ubi->volumes[i] = NULL;
+ goto out_volumes;
+ }
+ }
+
+ return 0;
+
+out_volumes:
+ kill_volumes(ubi);
+ cdev_device_del(&ubi->cdev, &ubi->dev);
+out_unreg:
+ unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
+ ubi_err(ubi, "cannot initialize UBI %s, error %d",
+ ubi->ubi_name, err);
+ return err;
+}
+
+/**
+ * uif_close - close user interfaces for an UBI device.
+ * @ubi: UBI device description object
+ *
+ * Note, since this function un-registers UBI volume device objects (@vol->dev),
+ * the memory allocated voe the volumes is freed as well (in the release
+ * function).
+ */
+static void uif_close(struct ubi_device *ubi)
+{
+ kill_volumes(ubi);
+ cdev_device_del(&ubi->cdev, &ubi->dev);
+ unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
+}
+
+/**
+ * ubi_free_volumes_from - free volumes from specific index.
+ * @ubi: UBI device description object
+ * @from: the start index used for volume free.
+ */
+static void ubi_free_volumes_from(struct ubi_device *ubi, int from)
+{
+ int i;
+
+ for (i = from; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
+ if (!ubi->volumes[i])
+ continue;
+ ubi_eba_replace_table(ubi->volumes[i], NULL);
+ ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
+ kfree(ubi->volumes[i]);
+ ubi->volumes[i] = NULL;
+ }
+}
+
+/**
+ * ubi_free_all_volumes - free all volumes.
+ * @ubi: UBI device description object
+ */
+void ubi_free_all_volumes(struct ubi_device *ubi)
+{
+ ubi_free_volumes_from(ubi, 0);
+}
+
+/**
+ * ubi_free_internal_volumes - free internal volumes.
+ * @ubi: UBI device description object
+ */
+void ubi_free_internal_volumes(struct ubi_device *ubi)
+{
+ ubi_free_volumes_from(ubi, ubi->vtbl_slots);
+}
+
+static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
+{
+ int limit, device_pebs;
+ uint64_t device_size;
+
+ if (!max_beb_per1024) {
+ /*
+ * Since max_beb_per1024 has not been set by the user in either
+ * the cmdline or Kconfig, use mtd_max_bad_blocks to set the
+ * limit if it is supported by the device.
+ */
+ limit = mtd_max_bad_blocks(ubi->mtd, 0, ubi->mtd->size);
+ if (limit < 0)
+ return 0;
+ return limit;
+ }
+
+ /*
+ * Here we are using size of the entire flash chip and
+ * not just the MTD partition size because the maximum
+ * number of bad eraseblocks is a percentage of the
+ * whole device and bad eraseblocks are not fairly
+ * distributed over the flash chip. So the worst case
+ * is that all the bad eraseblocks of the chip are in
+ * the MTD partition we are attaching (ubi->mtd).
+ */
+ device_size = mtd_get_device_size(ubi->mtd);
+ device_pebs = mtd_div_by_eb(device_size, ubi->mtd);
+ limit = mult_frac(device_pebs, max_beb_per1024, 1024);
+
+ /* Round it up */
+ if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs)
+ limit += 1;
+
+ return limit;
+}
+
+/**
+ * io_init - initialize I/O sub-system for a given UBI device.
+ * @ubi: UBI device description object
+ * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
+ *
+ * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are
+ * assumed:
+ * o EC header is always at offset zero - this cannot be changed;
+ * o VID header starts just after the EC header at the closest address
+ * aligned to @io->hdrs_min_io_size;
+ * o data starts just after the VID header at the closest address aligned to
+ * @io->min_io_size
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int io_init(struct ubi_device *ubi, int max_beb_per1024)
+{
+ dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
+ dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
+
+ if (ubi->mtd->numeraseregions != 0) {
+ /*
+ * Some flashes have several erase regions. Different regions
+ * may have different eraseblock size and other
+ * characteristics. It looks like mostly multi-region flashes
+ * have one "main" region and one or more small regions to
+ * store boot loader code or boot parameters or whatever. I
+ * guess we should just pick the largest region. But this is
+ * not implemented.
+ */
+ ubi_err(ubi, "multiple regions, not implemented");
+ return -EINVAL;
+ }
+
+ if (ubi->vid_hdr_offset < 0)
+ return -EINVAL;
+
+ /*
+ * Note, in this implementation we support MTD devices with 0x7FFFFFFF
+ * physical eraseblocks maximum.
+ */
+
+ ubi->peb_size = ubi->mtd->erasesize;
+ ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
+ ubi->flash_size = ubi->mtd->size;
+
+ if (mtd_can_have_bb(ubi->mtd)) {
+ ubi->bad_allowed = 1;
+ ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
+ }
+
+ if (ubi->mtd->type == MTD_NORFLASH)
+ ubi->nor_flash = 1;
+
+ ubi->min_io_size = ubi->mtd->writesize;
+ ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
+
+ /*
+ * Make sure minimal I/O unit is power of 2. Note, there is no
+ * fundamental reason for this assumption. It is just an optimization
+ * which allows us to avoid costly division operations.
+ */
+ if (!is_power_of_2(ubi->min_io_size)) {
+ ubi_err(ubi, "min. I/O unit (%d) is not power of 2",
+ ubi->min_io_size);
+ return -EINVAL;
+ }
+
+ ubi_assert(ubi->hdrs_min_io_size > 0);
+ ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
+ ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
+
+ ubi->max_write_size = ubi->mtd->writebufsize;
+ /*
+ * Maximum write size has to be greater or equivalent to min. I/O
+ * size, and be multiple of min. I/O size.
+ */
+ if (ubi->max_write_size < ubi->min_io_size ||
+ ubi->max_write_size % ubi->min_io_size ||
+ !is_power_of_2(ubi->max_write_size)) {
+ ubi_err(ubi, "bad write buffer size %d for %d min. I/O unit",
+ ubi->max_write_size, ubi->min_io_size);
+ return -EINVAL;
+ }
+
+ /* Calculate default aligned sizes of EC and VID headers */
+ ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
+ ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
+
+ dbg_gen("min_io_size %d", ubi->min_io_size);
+ dbg_gen("max_write_size %d", ubi->max_write_size);
+ dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
+ dbg_gen("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
+ dbg_gen("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
+
+ if (ubi->vid_hdr_offset == 0)
+ /* Default offset */
+ ubi->vid_hdr_offset = ubi->vid_hdr_aloffset =
+ ubi->ec_hdr_alsize;
+ else {
+ ubi->vid_hdr_aloffset = ubi->vid_hdr_offset &
+ ~(ubi->hdrs_min_io_size - 1);
+ ubi->vid_hdr_shift = ubi->vid_hdr_offset -
+ ubi->vid_hdr_aloffset;
+ }
+
+ /*
+ * Memory allocation for VID header is ubi->vid_hdr_alsize
+ * which is described in comments in io.c.
+ * Make sure VID header shift + UBI_VID_HDR_SIZE not exceeds
+ * ubi->vid_hdr_alsize, so that all vid header operations
+ * won't access memory out of bounds.
+ */
+ if ((ubi->vid_hdr_shift + UBI_VID_HDR_SIZE) > ubi->vid_hdr_alsize) {
+ ubi_err(ubi, "Invalid VID header offset %d, VID header shift(%d)"
+ " + VID header size(%zu) > VID header aligned size(%d).",
+ ubi->vid_hdr_offset, ubi->vid_hdr_shift,
+ UBI_VID_HDR_SIZE, ubi->vid_hdr_alsize);
+ return -EINVAL;
+ }
+
+ /* Similar for the data offset */
+ ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
+ ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
+
+ dbg_gen("vid_hdr_offset %d", ubi->vid_hdr_offset);
+ dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
+ dbg_gen("vid_hdr_shift %d", ubi->vid_hdr_shift);
+ dbg_gen("leb_start %d", ubi->leb_start);
+
+ /* The shift must be aligned to 32-bit boundary */
+ if (ubi->vid_hdr_shift % 4) {
+ ubi_err(ubi, "unaligned VID header shift %d",
+ ubi->vid_hdr_shift);
+ return -EINVAL;
+ }
+
+ /* Check sanity */
+ if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE ||
+ ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE ||
+ ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE ||
+ ubi->leb_start & (ubi->min_io_size - 1)) {
+ ubi_err(ubi, "bad VID header (%d) or data offsets (%d)",
+ ubi->vid_hdr_offset, ubi->leb_start);
+ return -EINVAL;
+ }
+
+ /*
+ * Set maximum amount of physical erroneous eraseblocks to be 10%.
+ * Erroneous PEB are those which have read errors.
+ */
+ ubi->max_erroneous = ubi->peb_count / 10;
+ if (ubi->max_erroneous < 16)
+ ubi->max_erroneous = 16;
+ dbg_gen("max_erroneous %d", ubi->max_erroneous);
+
+ /*
+ * It may happen that EC and VID headers are situated in one minimal
+ * I/O unit. In this case we can only accept this UBI image in
+ * read-only mode.
+ */
+ if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
+ ubi_warn(ubi, "EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
+ ubi->ro_mode = 1;
+ }
+
+ ubi->leb_size = ubi->peb_size - ubi->leb_start;
+
+ if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
+ ubi_msg(ubi, "MTD device %d is write-protected, attach in read-only mode",
+ ubi->mtd->index);
+ ubi->ro_mode = 1;
+ }
+
+ /*
+ * Note, ideally, we have to initialize @ubi->bad_peb_count here. But
+ * unfortunately, MTD does not provide this information. We should loop
+ * over all physical eraseblocks and invoke mtd->block_is_bad() for
+ * each physical eraseblock. So, we leave @ubi->bad_peb_count
+ * uninitialized so far.
+ */
+
+ return 0;
+}
+
+/**
+ * autoresize - re-size the volume which has the "auto-resize" flag set.
+ * @ubi: UBI device description object
+ * @vol_id: ID of the volume to re-size
+ *
+ * This function re-sizes the volume marked by the %UBI_VTBL_AUTORESIZE_FLG in
+ * the volume table to the largest possible size. See comments in ubi-header.h
+ * for more description of the flag. Returns zero in case of success and a
+ * negative error code in case of failure.
+ */
+static int autoresize(struct ubi_device *ubi, int vol_id)
+{
+ struct ubi_volume_desc desc;
+ struct ubi_volume *vol = ubi->volumes[vol_id];
+ int err, old_reserved_pebs = vol->reserved_pebs;
+
+ if (ubi->ro_mode) {
+ ubi_warn(ubi, "skip auto-resize because of R/O mode");
+ return 0;
+ }
+
+ /*
+ * Clear the auto-resize flag in the volume in-memory copy of the
+ * volume table, and 'ubi_resize_volume()' will propagate this change
+ * to the flash.
+ */
+ ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG;
+
+ if (ubi->avail_pebs == 0) {
+ struct ubi_vtbl_record vtbl_rec;
+
+ /*
+ * No available PEBs to re-size the volume, clear the flag on
+ * flash and exit.
+ */
+ vtbl_rec = ubi->vtbl[vol_id];
+ err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
+ if (err)
+ ubi_err(ubi, "cannot clean auto-resize flag for volume %d",
+ vol_id);
+ } else {
+ desc.vol = vol;
+ err = ubi_resize_volume(&desc,
+ old_reserved_pebs + ubi->avail_pebs);
+ if (err)
+ ubi_err(ubi, "cannot auto-resize volume %d",
+ vol_id);
+ }
+
+ if (err)
+ return err;
+
+ ubi_msg(ubi, "volume %d (\"%s\") re-sized from %d to %d LEBs",
+ vol_id, vol->name, old_reserved_pebs, vol->reserved_pebs);
+ return 0;
+}
+
+/**
+ * ubi_attach_mtd_dev - attach an MTD device.
+ * @mtd: MTD device description object
+ * @ubi_num: number to assign to the new UBI device
+ * @vid_hdr_offset: VID header offset
+ * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
+ * @disable_fm: whether disable fastmap
+ *
+ * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
+ * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
+ * which case this function finds a vacant device number and assigns it
+ * automatically. Returns the new UBI device number in case of success and a
+ * negative error code in case of failure.
+ *
+ * If @disable_fm is true, ubi doesn't create new fastmap even the module param
+ * 'fm_autoconvert' is set, and existed old fastmap will be destroyed after
+ * doing full scanning.
+ *
+ * Note, the invocations of this function has to be serialized by the
+ * @ubi_devices_mutex.
+ */
+int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ int vid_hdr_offset, int max_beb_per1024, bool disable_fm)
+{
+ struct ubi_device *ubi;
+ int i, err;
+
+ if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
+ return -EINVAL;
+
+ if (!max_beb_per1024)
+ max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT;
+
+ /*
+ * Check if we already have the same MTD device attached.
+ *
+ * Note, this function assumes that UBI devices creations and deletions
+ * are serialized, so it does not take the &ubi_devices_lock.
+ */
+ for (i = 0; i < UBI_MAX_DEVICES; i++) {
+ ubi = ubi_devices[i];
+ if (ubi && mtd->index == ubi->mtd->index) {
+ pr_err("ubi: mtd%d is already attached to ubi%d\n",
+ mtd->index, i);
+ return -EEXIST;
+ }
+ }
+
+ /*
+ * Make sure this MTD device is not emulated on top of an UBI volume
+ * already. Well, generally this recursion works fine, but there are
+ * different problems like the UBI module takes a reference to itself
+ * by attaching (and thus, opening) the emulated MTD device. This
+ * results in inability to unload the module. And in general it makes
+ * no sense to attach emulated MTD devices, so we prohibit this.
+ */
+ if (mtd->type == MTD_UBIVOLUME) {
+ pr_err("ubi: refuse attaching mtd%d - it is already emulated on top of UBI\n",
+ mtd->index);
+ return -EINVAL;
+ }
+
+ /*
+ * Both UBI and UBIFS have been designed for SLC NAND and NOR flashes.
+ * MLC NAND is different and needs special care, otherwise UBI or UBIFS
+ * will die soon and you will lose all your data.
+ * Relax this rule if the partition we're attaching to operates in SLC
+ * mode.
+ */
+ if (mtd->type == MTD_MLCNANDFLASH &&
+ !(mtd->flags & MTD_SLC_ON_MLC_EMULATION)) {
+ pr_err("ubi: refuse attaching mtd%d - MLC NAND is not supported\n",
+ mtd->index);
+ return -EINVAL;
+ }
+
+ /* UBI cannot work on flashes with zero erasesize. */
+ if (!mtd->erasesize) {
+ pr_err("ubi: refuse attaching mtd%d - zero erasesize flash is not supported\n",
+ mtd->index);
+ return -EINVAL;
+ }
+
+ if (ubi_num == UBI_DEV_NUM_AUTO) {
+ /* Search for an empty slot in the @ubi_devices array */
+ for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
+ if (!ubi_devices[ubi_num])
+ break;
+ if (ubi_num == UBI_MAX_DEVICES) {
+ pr_err("ubi: only %d UBI devices may be created\n",
+ UBI_MAX_DEVICES);
+ return -ENFILE;
+ }
+ } else {
+ if (ubi_num >= UBI_MAX_DEVICES)
+ return -EINVAL;
+
+ /* Make sure ubi_num is not busy */
+ if (ubi_devices[ubi_num]) {
+ pr_err("ubi: ubi%i already exists\n", ubi_num);
+ return -EEXIST;
+ }
+ }
+
+ ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
+ if (!ubi)
+ return -ENOMEM;
+
+ device_initialize(&ubi->dev);
+ ubi->dev.release = dev_release;
+ ubi->dev.class = &ubi_class;
+ ubi->dev.groups = ubi_dev_groups;
+ ubi->dev.parent = &mtd->dev;
+
+ ubi->mtd = mtd;
+ ubi->ubi_num = ubi_num;
+ ubi->vid_hdr_offset = vid_hdr_offset;
+ ubi->autoresize_vol_id = -1;
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ ubi->fm_pool.used = ubi->fm_pool.size = 0;
+ ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;
+
+ /*
+ * fm_pool.max_size is 5% of the total number of PEBs but it's also
+ * between UBI_FM_MAX_POOL_SIZE and UBI_FM_MIN_POOL_SIZE.
+ */
+ ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
+ ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
+ ubi->fm_pool.max_size = max(ubi->fm_pool.max_size,
+ UBI_FM_MIN_POOL_SIZE);
+
+ ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2;
+ ubi->fm_disabled = (!fm_autoconvert || disable_fm) ? 1 : 0;
+ if (fm_debug)
+ ubi_enable_dbg_chk_fastmap(ubi);
+
+ if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
+ <= UBI_FM_MAX_START) {
+ ubi_err(ubi, "More than %i PEBs are needed for fastmap, sorry.",
+ UBI_FM_MAX_START);
+ ubi->fm_disabled = 1;
+ }
+
+ ubi_msg(ubi, "default fastmap pool size: %d", ubi->fm_pool.max_size);
+ ubi_msg(ubi, "default fastmap WL pool size: %d",
+ ubi->fm_wl_pool.max_size);
+#else
+ ubi->fm_disabled = 1;
+#endif
+ mutex_init(&ubi->buf_mutex);
+ mutex_init(&ubi->ckvol_mutex);
+ mutex_init(&ubi->device_mutex);
+ spin_lock_init(&ubi->volumes_lock);
+ init_rwsem(&ubi->fm_protect);
+ init_rwsem(&ubi->fm_eba_sem);
+
+ ubi_msg(ubi, "attaching mtd%d", mtd->index);
+
+ err = io_init(ubi, max_beb_per1024);
+ if (err)
+ goto out_free;
+
+ err = -ENOMEM;
+ ubi->peb_buf = vmalloc(ubi->peb_size);
+ if (!ubi->peb_buf)
+ goto out_free;
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ ubi->fm_size = ubi_calc_fm_size(ubi);
+ ubi->fm_buf = vzalloc(ubi->fm_size);
+ if (!ubi->fm_buf)
+ goto out_free;
+#endif
+ err = ubi_attach(ubi, disable_fm ? 1 : 0);
+ if (err) {
+ ubi_err(ubi, "failed to attach mtd%d, error %d",
+ mtd->index, err);
+ goto out_free;
+ }
+
+ if (ubi->autoresize_vol_id != -1) {
+ err = autoresize(ubi, ubi->autoresize_vol_id);
+ if (err)
+ goto out_detach;
+ }
+
+ err = uif_init(ubi);
+ if (err)
+ goto out_detach;
+
+ err = ubi_debugfs_init_dev(ubi);
+ if (err)
+ goto out_uif;
+
+ ubi->bgt_thread = kthread_create(ubi_thread, ubi, "%s", ubi->bgt_name);
+ if (IS_ERR(ubi->bgt_thread)) {
+ err = PTR_ERR(ubi->bgt_thread);
+ ubi_err(ubi, "cannot spawn \"%s\", error %d",
+ ubi->bgt_name, err);
+ goto out_debugfs;
+ }
+
+ ubi_msg(ubi, "attached mtd%d (name \"%s\", size %llu MiB)",
+ mtd->index, mtd->name, ubi->flash_size >> 20);
+ ubi_msg(ubi, "PEB size: %d bytes (%d KiB), LEB size: %d bytes",
+ ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
+ ubi_msg(ubi, "min./max. I/O unit sizes: %d/%d, sub-page size %d",
+ ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
+ ubi_msg(ubi, "VID header offset: %d (aligned %d), data offset: %d",
+ ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
+ ubi_msg(ubi, "good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
+ ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
+ ubi_msg(ubi, "user volume: %d, internal volumes: %d, max. volumes count: %d",
+ ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
+ ubi->vtbl_slots);
+ ubi_msg(ubi, "max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
+ ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
+ ubi->image_seq);
+ ubi_msg(ubi, "available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
+ ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
+
+ /*
+ * The below lock makes sure we do not race with 'ubi_thread()' which
+ * checks @ubi->thread_enabled. Otherwise we may fail to wake it up.
+ */
+ spin_lock(&ubi->wl_lock);
+ ubi->thread_enabled = 1;
+ wake_up_process(ubi->bgt_thread);
+ spin_unlock(&ubi->wl_lock);
+
+ ubi_devices[ubi_num] = ubi;
+ ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
+ return ubi_num;
+
+out_debugfs:
+ ubi_debugfs_exit_dev(ubi);
+out_uif:
+ uif_close(ubi);
+out_detach:
+ ubi_wl_close(ubi);
+ ubi_free_all_volumes(ubi);
+ vfree(ubi->vtbl);
+out_free:
+ vfree(ubi->peb_buf);
+ vfree(ubi->fm_buf);
+ put_device(&ubi->dev);
+ return err;
+}
+
+/**
+ * ubi_detach_mtd_dev - detach an MTD device.
+ * @ubi_num: UBI device number to detach from
+ * @anyway: detach MTD even if device reference count is not zero
+ *
+ * This function destroys an UBI device number @ubi_num and detaches the
+ * underlying MTD device. Returns zero in case of success and %-EBUSY if the
+ * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not
+ * exist.
+ *
+ * Note, the invocations of this function has to be serialized by the
+ * @ubi_devices_mutex.
+ */
+int ubi_detach_mtd_dev(int ubi_num, int anyway)
+{
+ struct ubi_device *ubi;
+
+ if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+ return -EINVAL;
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -EINVAL;
+
+ spin_lock(&ubi_devices_lock);
+ put_device(&ubi->dev);
+ ubi->ref_count -= 1;
+ if (ubi->ref_count) {
+ if (!anyway) {
+ spin_unlock(&ubi_devices_lock);
+ return -EBUSY;
+ }
+ /* This may only happen if there is a bug */
+ ubi_err(ubi, "%s reference count %d, destroy anyway",
+ ubi->ubi_name, ubi->ref_count);
+ }
+ ubi_devices[ubi_num] = NULL;
+ spin_unlock(&ubi_devices_lock);
+
+ ubi_assert(ubi_num == ubi->ubi_num);
+ ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
+ ubi_msg(ubi, "detaching mtd%d", ubi->mtd->index);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* If we don't write a new fastmap at detach time we lose all
+ * EC updates that have been made since the last written fastmap.
+ * In case of fastmap debugging we omit the update to simulate an
+ * unclean shutdown. */
+ if (!ubi_dbg_chk_fastmap(ubi))
+ ubi_update_fastmap(ubi);
+#endif
+ /*
+ * Before freeing anything, we have to stop the background thread to
+ * prevent it from doing anything on this device while we are freeing.
+ */
+ if (ubi->bgt_thread)
+ kthread_stop(ubi->bgt_thread);
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ cancel_work_sync(&ubi->fm_work);
+#endif
+ ubi_debugfs_exit_dev(ubi);
+ uif_close(ubi);
+
+ ubi_wl_close(ubi);
+ ubi_free_internal_volumes(ubi);
+ vfree(ubi->vtbl);
+ vfree(ubi->peb_buf);
+ vfree(ubi->fm_buf);
+ ubi_msg(ubi, "mtd%d is detached", ubi->mtd->index);
+ put_mtd_device(ubi->mtd);
+ put_device(&ubi->dev);
+ return 0;
+}
+
+/**
+ * open_mtd_by_chdev - open an MTD device by its character device node path.
+ * @mtd_dev: MTD character device node path
+ *
+ * This helper function opens an MTD device by its character node device path.
+ * Returns MTD device description object in case of success and a negative
+ * error code in case of failure.
+ */
+static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
+{
+ int err, minor;
+ struct path path;
+ struct kstat stat;
+
+ /* Probably this is an MTD character device node path */
+ err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
+ if (err)
+ return ERR_PTR(err);
+
+ err = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
+ path_put(&path);
+ if (err)
+ return ERR_PTR(err);
+
+ /* MTD device number is defined by the major / minor numbers */
+ if (MAJOR(stat.rdev) != MTD_CHAR_MAJOR || !S_ISCHR(stat.mode))
+ return ERR_PTR(-EINVAL);
+
+ minor = MINOR(stat.rdev);
+
+ if (minor & 1)
+ /*
+ * Just do not think the "/dev/mtdrX" devices support is need,
+ * so do not support them to avoid doing extra work.
+ */
+ return ERR_PTR(-EINVAL);
+
+ return get_mtd_device(NULL, minor / 2);
+}
+
+/**
+ * open_mtd_device - open MTD device by name, character device path, or number.
+ * @mtd_dev: name, character device node path, or MTD device device number
+ *
+ * This function tries to open and MTD device described by @mtd_dev string,
+ * which is first treated as ASCII MTD device number, and if it is not true, it
+ * is treated as MTD device name, and if that is also not true, it is treated
+ * as MTD character device node path. Returns MTD device description object in
+ * case of success and a negative error code in case of failure.
+ */
+static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
+{
+ struct mtd_info *mtd;
+ int mtd_num;
+ char *endp;
+
+ mtd_num = simple_strtoul(mtd_dev, &endp, 0);
+ if (*endp != '\0' || mtd_dev == endp) {
+ /*
+ * This does not look like an ASCII integer, probably this is
+ * MTD device name.
+ */
+ mtd = get_mtd_device_nm(mtd_dev);
+ if (PTR_ERR(mtd) == -ENODEV)
+ /* Probably this is an MTD character device node path */
+ mtd = open_mtd_by_chdev(mtd_dev);
+ } else
+ mtd = get_mtd_device(NULL, mtd_num);
+
+ return mtd;
+}
+
+static int __init ubi_init(void)
+{
+ int err, i, k;
+
+ /* Ensure that EC and VID headers have correct size */
+ BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
+ BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
+
+ if (mtd_devs > UBI_MAX_DEVICES) {
+ pr_err("UBI error: too many MTD devices, maximum is %d\n",
+ UBI_MAX_DEVICES);
+ return -EINVAL;
+ }
+
+ /* Create base sysfs directory and sysfs files */
+ err = class_register(&ubi_class);
+ if (err < 0)
+ return err;
+
+ err = misc_register(&ubi_ctrl_cdev);
+ if (err) {
+ pr_err("UBI error: cannot register device\n");
+ goto out;
+ }
+
+ ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
+ sizeof(struct ubi_wl_entry),
+ 0, 0, NULL);
+ if (!ubi_wl_entry_slab) {
+ err = -ENOMEM;
+ goto out_dev_unreg;
+ }
+
+ err = ubi_debugfs_init();
+ if (err)
+ goto out_slab;
+
+
+ /* Attach MTD devices */
+ for (i = 0; i < mtd_devs; i++) {
+ struct mtd_dev_param *p = &mtd_dev_param[i];
+ struct mtd_info *mtd;
+
+ cond_resched();
+
+ mtd = open_mtd_device(p->name);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ pr_err("UBI error: cannot open mtd %s, error %d\n",
+ p->name, err);
+ /* See comment below re-ubi_is_module(). */
+ if (ubi_is_module())
+ goto out_detach;
+ continue;
+ }
+
+ mutex_lock(&ubi_devices_mutex);
+ err = ubi_attach_mtd_dev(mtd, p->ubi_num,
+ p->vid_hdr_offs, p->max_beb_per1024,
+ p->enable_fm == 0);
+ mutex_unlock(&ubi_devices_mutex);
+ if (err < 0) {
+ pr_err("UBI error: cannot attach mtd%d\n",
+ mtd->index);
+ put_mtd_device(mtd);
+
+ /*
+ * Originally UBI stopped initializing on any error.
+ * However, later on it was found out that this
+ * behavior is not very good when UBI is compiled into
+ * the kernel and the MTD devices to attach are passed
+ * through the command line. Indeed, UBI failure
+ * stopped whole boot sequence.
+ *
+ * To fix this, we changed the behavior for the
+ * non-module case, but preserved the old behavior for
+ * the module case, just for compatibility. This is a
+ * little inconsistent, though.
+ */
+ if (ubi_is_module())
+ goto out_detach;
+ }
+ }
+
+ err = ubiblock_init();
+ if (err) {
+ pr_err("UBI error: block: cannot initialize, error %d\n", err);
+
+ /* See comment above re-ubi_is_module(). */
+ if (ubi_is_module())
+ goto out_detach;
+ }
+
+ return 0;
+
+out_detach:
+ for (k = 0; k < i; k++)
+ if (ubi_devices[k]) {
+ mutex_lock(&ubi_devices_mutex);
+ ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
+ mutex_unlock(&ubi_devices_mutex);
+ }
+ ubi_debugfs_exit();
+out_slab:
+ kmem_cache_destroy(ubi_wl_entry_slab);
+out_dev_unreg:
+ misc_deregister(&ubi_ctrl_cdev);
+out:
+ class_unregister(&ubi_class);
+ pr_err("UBI error: cannot initialize UBI, error %d\n", err);
+ return err;
+}
+late_initcall(ubi_init);
+
+static void __exit ubi_exit(void)
+{
+ int i;
+
+ ubiblock_exit();
+
+ for (i = 0; i < UBI_MAX_DEVICES; i++)
+ if (ubi_devices[i]) {
+ mutex_lock(&ubi_devices_mutex);
+ ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
+ mutex_unlock(&ubi_devices_mutex);
+ }
+ ubi_debugfs_exit();
+ kmem_cache_destroy(ubi_wl_entry_slab);
+ misc_deregister(&ubi_ctrl_cdev);
+ class_unregister(&ubi_class);
+}
+module_exit(ubi_exit);
+
+/**
+ * bytes_str_to_int - convert a number of bytes string into an integer.
+ * @str: the string to convert
+ *
+ * This function returns positive resulting integer in case of success and a
+ * negative error code in case of failure.
+ */
+static int bytes_str_to_int(const char *str)
+{
+ char *endp;
+ unsigned long result;
+
+ result = simple_strtoul(str, &endp, 0);
+ if (str == endp || result >= INT_MAX) {
+ pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
+ return -EINVAL;
+ }
+
+ switch (*endp) {
+ case 'G':
+ result *= 1024;
+ fallthrough;
+ case 'M':
+ result *= 1024;
+ fallthrough;
+ case 'K':
+ result *= 1024;
+ break;
+ case '\0':
+ break;
+ default:
+ pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
+ return -EINVAL;
+ }
+
+ return result;
+}
+
+/**
+ * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter.
+ * @val: the parameter value to parse
+ * @kp: not used
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of error.
+ */
+static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
+{
+ int i, len;
+ struct mtd_dev_param *p;
+ char buf[MTD_PARAM_LEN_MAX];
+ char *pbuf = &buf[0];
+ char *tokens[MTD_PARAM_MAX_COUNT], *token;
+
+ if (!val)
+ return -EINVAL;
+
+ if (mtd_devs == UBI_MAX_DEVICES) {
+ pr_err("UBI error: too many parameters, max. is %d\n",
+ UBI_MAX_DEVICES);
+ return -EINVAL;
+ }
+
+ len = strnlen(val, MTD_PARAM_LEN_MAX);
+ if (len == MTD_PARAM_LEN_MAX) {
+ pr_err("UBI error: parameter \"%s\" is too long, max. is %d\n",
+ val, MTD_PARAM_LEN_MAX);
+ return -EINVAL;
+ }
+
+ if (len == 0) {
+ pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n");
+ return 0;
+ }
+
+ strcpy(buf, val);
+
+ /* Get rid of the final newline */
+ if (buf[len - 1] == '\n')
+ buf[len - 1] = '\0';
+
+ for (i = 0; i < MTD_PARAM_MAX_COUNT; i++)
+ tokens[i] = strsep(&pbuf, ",");
+
+ if (pbuf) {
+ pr_err("UBI error: too many arguments at \"%s\"\n", val);
+ return -EINVAL;
+ }
+
+ p = &mtd_dev_param[mtd_devs];
+ strcpy(&p->name[0], tokens[0]);
+
+ token = tokens[1];
+ if (token) {
+ p->vid_hdr_offs = bytes_str_to_int(token);
+
+ if (p->vid_hdr_offs < 0)
+ return p->vid_hdr_offs;
+ }
+
+ token = tokens[2];
+ if (token) {
+ int err = kstrtoint(token, 10, &p->max_beb_per1024);
+
+ if (err) {
+ pr_err("UBI error: bad value for max_beb_per1024 parameter: %s\n",
+ token);
+ return -EINVAL;
+ }
+ }
+
+ token = tokens[3];
+ if (token) {
+ int err = kstrtoint(token, 10, &p->ubi_num);
+
+ if (err) {
+ pr_err("UBI error: bad value for ubi_num parameter: %s\n",
+ token);
+ return -EINVAL;
+ }
+ } else
+ p->ubi_num = UBI_DEV_NUM_AUTO;
+
+ token = tokens[4];
+ if (token) {
+ int err = kstrtoint(token, 10, &p->enable_fm);
+
+ if (err) {
+ pr_err("UBI error: bad value for enable_fm parameter: %s\n",
+ token);
+ return -EINVAL;
+ }
+ } else
+ p->enable_fm = 0;
+
+ mtd_devs += 1;
+ return 0;
+}
+
+module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 0400);
+MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|path>[,<vid_hdr_offs>[,max_beb_per1024[,ubi_num]]].\n"
+ "Multiple \"mtd\" parameters may be specified.\n"
+ "MTD devices may be specified by their number, name, or path to the MTD character device node.\n"
+ "Optional \"vid_hdr_offs\" parameter specifies UBI VID header position to be used by UBI. (default value if 0)\n"
+ "Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
+ __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
+ "Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n"
+ "Optional \"enable_fm\" parameter determines whether to enable fastmap during attach. If the value is non-zero, fastmap is enabled. Default value is 0.\n"
+ "\n"
+ "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
+ "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
+ "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
+ "Example 4: mtd=/dev/mtd1,0,0,5 - attach MTD device /dev/mtd1 to UBI 5 and using default values for the other fields.\n"
+ "example 5: mtd=1,0,0,5 mtd=2,0,0,6,1 - attach MTD device /dev/mtd1 to UBI 5 and disable fastmap; attach MTD device /dev/mtd2 to UBI 6 and enable fastmap.(only works when fastmap is enabled and fm_autoconvert=Y).\n"
+ "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
+#ifdef CONFIG_MTD_UBI_FASTMAP
+module_param(fm_autoconvert, bool, 0644);
+MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
+module_param(fm_debug, bool, 0);
+MODULE_PARM_DESC(fm_debug, "Set this parameter to enable fastmap debugging by default. Warning, this will make fastmap slow!");
+#endif
+MODULE_VERSION(__stringify(UBI_VERSION));
+MODULE_DESCRIPTION("UBI - Unsorted Block Images");
+MODULE_AUTHOR("Artem Bityutskiy");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
new file mode 100644
index 0000000000..f43430b9c1
--- /dev/null
+++ b/drivers/mtd/ubi/cdev.c
@@ -0,0 +1,1108 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * This file includes implementation of UBI character device operations.
+ *
+ * There are two kinds of character devices in UBI: UBI character devices and
+ * UBI volume character devices. UBI character devices allow users to
+ * manipulate whole volumes: create, remove, and re-size them. Volume character
+ * devices provide volume I/O capabilities.
+ *
+ * Major and minor numbers are assigned dynamically to both UBI and volume
+ * character devices.
+ *
+ * Well, there is the third kind of character devices - the UBI control
+ * character device, which allows to manipulate by UBI devices - create and
+ * delete them. In other words, it is used for attaching and detaching MTD
+ * devices.
+ */
+
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/ioctl.h>
+#include <linux/capability.h>
+#include <linux/uaccess.h>
+#include <linux/compat.h>
+#include <linux/math64.h>
+#include <mtd/ubi-user.h>
+#include "ubi.h"
+
+/**
+ * get_exclusive - get exclusive access to an UBI volume.
+ * @desc: volume descriptor
+ *
+ * This function changes UBI volume open mode to "exclusive". Returns previous
+ * mode value (positive integer) in case of success and a negative error code
+ * in case of failure.
+ */
+static int get_exclusive(struct ubi_volume_desc *desc)
+{
+ int users, err;
+ struct ubi_volume *vol = desc->vol;
+
+ spin_lock(&vol->ubi->volumes_lock);
+ users = vol->readers + vol->writers + vol->exclusive + vol->metaonly;
+ ubi_assert(users > 0);
+ if (users > 1) {
+ ubi_err(vol->ubi, "%d users for volume %d", users, vol->vol_id);
+ err = -EBUSY;
+ } else {
+ vol->readers = vol->writers = vol->metaonly = 0;
+ vol->exclusive = 1;
+ err = desc->mode;
+ desc->mode = UBI_EXCLUSIVE;
+ }
+ spin_unlock(&vol->ubi->volumes_lock);
+
+ return err;
+}
+
+/**
+ * revoke_exclusive - revoke exclusive mode.
+ * @desc: volume descriptor
+ * @mode: new mode to switch to
+ */
+static void revoke_exclusive(struct ubi_volume_desc *desc, int mode)
+{
+ struct ubi_volume *vol = desc->vol;
+
+ spin_lock(&vol->ubi->volumes_lock);
+ ubi_assert(vol->readers == 0 && vol->writers == 0 && vol->metaonly == 0);
+ ubi_assert(vol->exclusive == 1 && desc->mode == UBI_EXCLUSIVE);
+ vol->exclusive = 0;
+ if (mode == UBI_READONLY)
+ vol->readers = 1;
+ else if (mode == UBI_READWRITE)
+ vol->writers = 1;
+ else if (mode == UBI_METAONLY)
+ vol->metaonly = 1;
+ else
+ vol->exclusive = 1;
+ spin_unlock(&vol->ubi->volumes_lock);
+
+ desc->mode = mode;
+}
+
+static int vol_cdev_open(struct inode *inode, struct file *file)
+{
+ struct ubi_volume_desc *desc;
+ int vol_id = iminor(inode) - 1, mode, ubi_num;
+
+ ubi_num = ubi_major2num(imajor(inode));
+ if (ubi_num < 0)
+ return ubi_num;
+
+ if (file->f_mode & FMODE_WRITE)
+ mode = UBI_READWRITE;
+ else
+ mode = UBI_READONLY;
+
+ dbg_gen("open device %d, volume %d, mode %d",
+ ubi_num, vol_id, mode);
+
+ desc = ubi_open_volume(ubi_num, vol_id, mode);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ file->private_data = desc;
+ return 0;
+}
+
+static int vol_cdev_release(struct inode *inode, struct file *file)
+{
+ struct ubi_volume_desc *desc = file->private_data;
+ struct ubi_volume *vol = desc->vol;
+
+ dbg_gen("release device %d, volume %d, mode %d",
+ vol->ubi->ubi_num, vol->vol_id, desc->mode);
+
+ if (vol->updating) {
+ ubi_warn(vol->ubi, "update of volume %d not finished, volume is damaged",
+ vol->vol_id);
+ ubi_assert(!vol->changing_leb);
+ vol->updating = 0;
+ vfree(vol->upd_buf);
+ } else if (vol->changing_leb) {
+ dbg_gen("only %lld of %lld bytes received for atomic LEB change for volume %d:%d, cancel",
+ vol->upd_received, vol->upd_bytes, vol->ubi->ubi_num,
+ vol->vol_id);
+ vol->changing_leb = 0;
+ vfree(vol->upd_buf);
+ }
+
+ ubi_close_volume(desc);
+ return 0;
+}
+
+static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
+{
+ struct ubi_volume_desc *desc = file->private_data;
+ struct ubi_volume *vol = desc->vol;
+
+ if (vol->updating) {
+ /* Update is in progress, seeking is prohibited */
+ ubi_err(vol->ubi, "updating");
+ return -EBUSY;
+ }
+
+ return fixed_size_llseek(file, offset, origin, vol->used_bytes);
+}
+
+static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end,
+ int datasync)
+{
+ struct ubi_volume_desc *desc = file->private_data;
+ struct ubi_device *ubi = desc->vol->ubi;
+ struct inode *inode = file_inode(file);
+ int err;
+ inode_lock(inode);
+ err = ubi_sync(ubi->ubi_num);
+ inode_unlock(inode);
+ return err;
+}
+
+
+static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
+ loff_t *offp)
+{
+ struct ubi_volume_desc *desc = file->private_data;
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int err, lnum, off, len, tbuf_size;
+ size_t count_save = count;
+ void *tbuf;
+
+ dbg_gen("read %zd bytes from offset %lld of volume %d",
+ count, *offp, vol->vol_id);
+
+ if (vol->updating) {
+ ubi_err(vol->ubi, "updating");
+ return -EBUSY;
+ }
+ if (vol->upd_marker) {
+ ubi_err(vol->ubi, "damaged volume, update marker is set");
+ return -EBADF;
+ }
+ if (*offp == vol->used_bytes || count == 0)
+ return 0;
+
+ if (vol->corrupted)
+ dbg_gen("read from corrupted volume %d", vol->vol_id);
+
+ if (*offp + count > vol->used_bytes)
+ count_save = count = vol->used_bytes - *offp;
+
+ tbuf_size = vol->usable_leb_size;
+ if (count < tbuf_size)
+ tbuf_size = ALIGN(count, ubi->min_io_size);
+ tbuf = vmalloc(tbuf_size);
+ if (!tbuf)
+ return -ENOMEM;
+
+ len = count > tbuf_size ? tbuf_size : count;
+ lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
+
+ do {
+ cond_resched();
+
+ if (off + len >= vol->usable_leb_size)
+ len = vol->usable_leb_size - off;
+
+ err = ubi_eba_read_leb(ubi, vol, lnum, tbuf, off, len, 0);
+ if (err)
+ break;
+
+ off += len;
+ if (off == vol->usable_leb_size) {
+ lnum += 1;
+ off -= vol->usable_leb_size;
+ }
+
+ count -= len;
+ *offp += len;
+
+ err = copy_to_user(buf, tbuf, len);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ buf += len;
+ len = count > tbuf_size ? tbuf_size : count;
+ } while (count);
+
+ vfree(tbuf);
+ return err ? err : count_save - count;
+}
+
+/*
+ * This function allows to directly write to dynamic UBI volumes, without
+ * issuing the volume update operation.
+ */
+static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *offp)
+{
+ struct ubi_volume_desc *desc = file->private_data;
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int lnum, off, len, tbuf_size, err = 0;
+ size_t count_save = count;
+ char *tbuf;
+
+ if (!vol->direct_writes)
+ return -EPERM;
+
+ dbg_gen("requested: write %zd bytes to offset %lld of volume %u",
+ count, *offp, vol->vol_id);
+
+ if (vol->vol_type == UBI_STATIC_VOLUME)
+ return -EROFS;
+
+ lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
+ if (off & (ubi->min_io_size - 1)) {
+ ubi_err(ubi, "unaligned position");
+ return -EINVAL;
+ }
+
+ if (*offp + count > vol->used_bytes)
+ count_save = count = vol->used_bytes - *offp;
+
+ /* We can write only in fractions of the minimum I/O unit */
+ if (count & (ubi->min_io_size - 1)) {
+ ubi_err(ubi, "unaligned write length");
+ return -EINVAL;
+ }
+
+ tbuf_size = vol->usable_leb_size;
+ if (count < tbuf_size)
+ tbuf_size = ALIGN(count, ubi->min_io_size);
+ tbuf = vmalloc(tbuf_size);
+ if (!tbuf)
+ return -ENOMEM;
+
+ len = count > tbuf_size ? tbuf_size : count;
+
+ while (count) {
+ cond_resched();
+
+ if (off + len >= vol->usable_leb_size)
+ len = vol->usable_leb_size - off;
+
+ err = copy_from_user(tbuf, buf, len);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len);
+ if (err)
+ break;
+
+ off += len;
+ if (off == vol->usable_leb_size) {
+ lnum += 1;
+ off -= vol->usable_leb_size;
+ }
+
+ count -= len;
+ *offp += len;
+ buf += len;
+ len = count > tbuf_size ? tbuf_size : count;
+ }
+
+ vfree(tbuf);
+ return err ? err : count_save - count;
+}
+
+static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *offp)
+{
+ int err = 0;
+ struct ubi_volume_desc *desc = file->private_data;
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+
+ if (!vol->updating && !vol->changing_leb)
+ return vol_cdev_direct_write(file, buf, count, offp);
+
+ if (vol->updating)
+ err = ubi_more_update_data(ubi, vol, buf, count);
+ else
+ err = ubi_more_leb_change_data(ubi, vol, buf, count);
+
+ if (err < 0) {
+ ubi_err(ubi, "cannot accept more %zd bytes of data, error %d",
+ count, err);
+ return err;
+ }
+
+ if (err) {
+ /*
+ * The operation is finished, @err contains number of actually
+ * written bytes.
+ */
+ count = err;
+
+ if (vol->changing_leb) {
+ revoke_exclusive(desc, UBI_READWRITE);
+ return count;
+ }
+
+ /*
+ * We voluntarily do not take into account the skip_check flag
+ * as we want to make sure what we wrote was correctly written.
+ */
+ err = ubi_check_volume(ubi, vol->vol_id);
+ if (err < 0)
+ return err;
+
+ if (err) {
+ ubi_warn(ubi, "volume %d on UBI device %d is corrupted",
+ vol->vol_id, ubi->ubi_num);
+ vol->corrupted = 1;
+ }
+ vol->checked = 1;
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED);
+ revoke_exclusive(desc, UBI_READWRITE);
+ }
+
+ return count;
+}
+
+static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int err = 0;
+ struct ubi_volume_desc *desc = file->private_data;
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ void __user *argp = (void __user *)arg;
+
+ switch (cmd) {
+ /* Volume update command */
+ case UBI_IOCVOLUP:
+ {
+ int64_t bytes, rsvd_bytes;
+
+ if (!capable(CAP_SYS_RESOURCE)) {
+ err = -EPERM;
+ break;
+ }
+
+ err = copy_from_user(&bytes, argp, sizeof(int64_t));
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (desc->mode == UBI_READONLY) {
+ err = -EROFS;
+ break;
+ }
+
+ rsvd_bytes = (long long)vol->reserved_pebs *
+ vol->usable_leb_size;
+ if (bytes < 0 || bytes > rsvd_bytes) {
+ err = -EINVAL;
+ break;
+ }
+
+ err = get_exclusive(desc);
+ if (err < 0)
+ break;
+
+ err = ubi_start_update(ubi, vol, bytes);
+ if (bytes == 0) {
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED);
+ revoke_exclusive(desc, UBI_READWRITE);
+ }
+ break;
+ }
+
+ /* Atomic logical eraseblock change command */
+ case UBI_IOCEBCH:
+ {
+ struct ubi_leb_change_req req;
+
+ err = copy_from_user(&req, argp,
+ sizeof(struct ubi_leb_change_req));
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (desc->mode == UBI_READONLY ||
+ vol->vol_type == UBI_STATIC_VOLUME) {
+ err = -EROFS;
+ break;
+ }
+
+ /* Validate the request */
+ err = -EINVAL;
+ if (!ubi_leb_valid(vol, req.lnum) ||
+ req.bytes < 0 || req.bytes > vol->usable_leb_size)
+ break;
+
+ err = get_exclusive(desc);
+ if (err < 0)
+ break;
+
+ err = ubi_start_leb_change(ubi, vol, &req);
+ if (req.bytes == 0)
+ revoke_exclusive(desc, UBI_READWRITE);
+ break;
+ }
+
+ /* Logical eraseblock erasure command */
+ case UBI_IOCEBER:
+ {
+ int32_t lnum;
+
+ err = get_user(lnum, (__user int32_t *)argp);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (desc->mode == UBI_READONLY ||
+ vol->vol_type == UBI_STATIC_VOLUME) {
+ err = -EROFS;
+ break;
+ }
+
+ if (!ubi_leb_valid(vol, lnum)) {
+ err = -EINVAL;
+ break;
+ }
+
+ dbg_gen("erase LEB %d:%d", vol->vol_id, lnum);
+ err = ubi_eba_unmap_leb(ubi, vol, lnum);
+ if (err)
+ break;
+
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
+ break;
+ }
+
+ /* Logical eraseblock map command */
+ case UBI_IOCEBMAP:
+ {
+ struct ubi_map_req req;
+
+ err = copy_from_user(&req, argp, sizeof(struct ubi_map_req));
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+ err = ubi_leb_map(desc, req.lnum);
+ break;
+ }
+
+ /* Logical eraseblock un-map command */
+ case UBI_IOCEBUNMAP:
+ {
+ int32_t lnum;
+
+ err = get_user(lnum, (__user int32_t *)argp);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+ err = ubi_leb_unmap(desc, lnum);
+ break;
+ }
+
+ /* Check if logical eraseblock is mapped command */
+ case UBI_IOCEBISMAP:
+ {
+ int32_t lnum;
+
+ err = get_user(lnum, (__user int32_t *)argp);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+ err = ubi_is_mapped(desc, lnum);
+ break;
+ }
+
+ /* Set volume property command */
+ case UBI_IOCSETVOLPROP:
+ {
+ struct ubi_set_vol_prop_req req;
+
+ err = copy_from_user(&req, argp,
+ sizeof(struct ubi_set_vol_prop_req));
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+ switch (req.property) {
+ case UBI_VOL_PROP_DIRECT_WRITE:
+ mutex_lock(&ubi->device_mutex);
+ desc->vol->direct_writes = !!req.value;
+ mutex_unlock(&ubi->device_mutex);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ break;
+ }
+
+ /* Create a R/O block device on top of the UBI volume */
+ case UBI_IOCVOLCRBLK:
+ {
+ struct ubi_volume_info vi;
+
+ ubi_get_volume_info(desc, &vi);
+ err = ubiblock_create(&vi);
+ break;
+ }
+
+ /* Remove the R/O block device */
+ case UBI_IOCVOLRMBLK:
+ {
+ struct ubi_volume_info vi;
+
+ ubi_get_volume_info(desc, &vi);
+ err = ubiblock_remove(&vi);
+ break;
+ }
+
+ default:
+ err = -ENOTTY;
+ break;
+ }
+ return err;
+}
+
+/**
+ * verify_mkvol_req - verify volume creation request.
+ * @ubi: UBI device description object
+ * @req: the request to check
+ *
+ * This function zero if the request is correct, and %-EINVAL if not.
+ */
+static int verify_mkvol_req(const struct ubi_device *ubi,
+ const struct ubi_mkvol_req *req)
+{
+ int n, err = -EINVAL;
+
+ if (req->bytes < 0 || req->alignment < 0 || req->vol_type < 0 ||
+ req->name_len < 0)
+ goto bad;
+
+ if ((req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) &&
+ req->vol_id != UBI_VOL_NUM_AUTO)
+ goto bad;
+
+ if (req->alignment == 0)
+ goto bad;
+
+ if (req->bytes == 0)
+ goto bad;
+
+ if (req->vol_type != UBI_DYNAMIC_VOLUME &&
+ req->vol_type != UBI_STATIC_VOLUME)
+ goto bad;
+
+ if (req->flags & ~UBI_VOL_VALID_FLGS)
+ goto bad;
+
+ if (req->flags & UBI_VOL_SKIP_CRC_CHECK_FLG &&
+ req->vol_type != UBI_STATIC_VOLUME)
+ goto bad;
+
+ if (req->alignment > ubi->leb_size)
+ goto bad;
+
+ n = req->alignment & (ubi->min_io_size - 1);
+ if (req->alignment != 1 && n)
+ goto bad;
+
+ if (!req->name[0] || !req->name_len)
+ goto bad;
+
+ if (req->name_len > UBI_VOL_NAME_MAX) {
+ err = -ENAMETOOLONG;
+ goto bad;
+ }
+
+ n = strnlen(req->name, req->name_len + 1);
+ if (n != req->name_len)
+ goto bad;
+
+ return 0;
+
+bad:
+ ubi_err(ubi, "bad volume creation request");
+ ubi_dump_mkvol_req(req);
+ return err;
+}
+
+/**
+ * verify_rsvol_req - verify volume re-size request.
+ * @ubi: UBI device description object
+ * @req: the request to check
+ *
+ * This function returns zero if the request is correct, and %-EINVAL if not.
+ */
+static int verify_rsvol_req(const struct ubi_device *ubi,
+ const struct ubi_rsvol_req *req)
+{
+ if (req->bytes <= 0)
+ return -EINVAL;
+
+ if (req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * rename_volumes - rename UBI volumes.
+ * @ubi: UBI device description object
+ * @req: volumes re-name request
+ *
+ * This is a helper function for the volume re-name IOCTL which validates the
+ * request, opens the volume and calls corresponding volumes management
+ * function. Returns zero in case of success and a negative error code in case
+ * of failure.
+ */
+static int rename_volumes(struct ubi_device *ubi,
+ struct ubi_rnvol_req *req)
+{
+ int i, n, err;
+ struct list_head rename_list;
+ struct ubi_rename_entry *re, *re1;
+
+ if (req->count < 0 || req->count > UBI_MAX_RNVOL)
+ return -EINVAL;
+
+ if (req->count == 0)
+ return 0;
+
+ /* Validate volume IDs and names in the request */
+ for (i = 0; i < req->count; i++) {
+ if (req->ents[i].vol_id < 0 ||
+ req->ents[i].vol_id >= ubi->vtbl_slots)
+ return -EINVAL;
+ if (req->ents[i].name_len < 0)
+ return -EINVAL;
+ if (req->ents[i].name_len > UBI_VOL_NAME_MAX)
+ return -ENAMETOOLONG;
+ req->ents[i].name[req->ents[i].name_len] = '\0';
+ n = strlen(req->ents[i].name);
+ if (n != req->ents[i].name_len)
+ return -EINVAL;
+ }
+
+ /* Make sure volume IDs and names are unique */
+ for (i = 0; i < req->count - 1; i++) {
+ for (n = i + 1; n < req->count; n++) {
+ if (req->ents[i].vol_id == req->ents[n].vol_id) {
+ ubi_err(ubi, "duplicated volume id %d",
+ req->ents[i].vol_id);
+ return -EINVAL;
+ }
+ if (!strcmp(req->ents[i].name, req->ents[n].name)) {
+ ubi_err(ubi, "duplicated volume name \"%s\"",
+ req->ents[i].name);
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* Create the re-name list */
+ INIT_LIST_HEAD(&rename_list);
+ for (i = 0; i < req->count; i++) {
+ int vol_id = req->ents[i].vol_id;
+ int name_len = req->ents[i].name_len;
+ const char *name = req->ents[i].name;
+
+ re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
+ if (!re) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_METAONLY);
+ if (IS_ERR(re->desc)) {
+ err = PTR_ERR(re->desc);
+ ubi_err(ubi, "cannot open volume %d, error %d",
+ vol_id, err);
+ kfree(re);
+ goto out_free;
+ }
+
+ /* Skip this re-naming if the name does not really change */
+ if (re->desc->vol->name_len == name_len &&
+ !memcmp(re->desc->vol->name, name, name_len)) {
+ ubi_close_volume(re->desc);
+ kfree(re);
+ continue;
+ }
+
+ re->new_name_len = name_len;
+ memcpy(re->new_name, name, name_len);
+ list_add_tail(&re->list, &rename_list);
+ dbg_gen("will rename volume %d from \"%s\" to \"%s\"",
+ vol_id, re->desc->vol->name, name);
+ }
+
+ if (list_empty(&rename_list))
+ return 0;
+
+ /* Find out the volumes which have to be removed */
+ list_for_each_entry(re, &rename_list, list) {
+ struct ubi_volume_desc *desc;
+ int no_remove_needed = 0;
+
+ /*
+ * Volume @re->vol_id is going to be re-named to
+ * @re->new_name, while its current name is @name. If a volume
+ * with name @re->new_name currently exists, it has to be
+ * removed, unless it is also re-named in the request (@req).
+ */
+ list_for_each_entry(re1, &rename_list, list) {
+ if (re->new_name_len == re1->desc->vol->name_len &&
+ !memcmp(re->new_name, re1->desc->vol->name,
+ re1->desc->vol->name_len)) {
+ no_remove_needed = 1;
+ break;
+ }
+ }
+
+ if (no_remove_needed)
+ continue;
+
+ /*
+ * It seems we need to remove volume with name @re->new_name,
+ * if it exists.
+ */
+ desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name,
+ UBI_EXCLUSIVE);
+ if (IS_ERR(desc)) {
+ err = PTR_ERR(desc);
+ if (err == -ENODEV)
+ /* Re-naming into a non-existing volume name */
+ continue;
+
+ /* The volume exists but busy, or an error occurred */
+ ubi_err(ubi, "cannot open volume \"%s\", error %d",
+ re->new_name, err);
+ goto out_free;
+ }
+
+ re1 = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
+ if (!re1) {
+ err = -ENOMEM;
+ ubi_close_volume(desc);
+ goto out_free;
+ }
+
+ re1->remove = 1;
+ re1->desc = desc;
+ list_add(&re1->list, &rename_list);
+ dbg_gen("will remove volume %d, name \"%s\"",
+ re1->desc->vol->vol_id, re1->desc->vol->name);
+ }
+
+ mutex_lock(&ubi->device_mutex);
+ err = ubi_rename_volumes(ubi, &rename_list);
+ mutex_unlock(&ubi->device_mutex);
+
+out_free:
+ list_for_each_entry_safe(re, re1, &rename_list, list) {
+ ubi_close_volume(re->desc);
+ list_del(&re->list);
+ kfree(re);
+ }
+ return err;
+}
+
+static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int err = 0;
+ struct ubi_device *ubi;
+ struct ubi_volume_desc *desc;
+ void __user *argp = (void __user *)arg;
+
+ if (!capable(CAP_SYS_RESOURCE))
+ return -EPERM;
+
+ ubi = ubi_get_by_major(imajor(file->f_mapping->host));
+ if (!ubi)
+ return -ENODEV;
+
+ switch (cmd) {
+ /* Create volume command */
+ case UBI_IOCMKVOL:
+ {
+ struct ubi_mkvol_req req;
+
+ dbg_gen("create volume");
+ err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req));
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ err = verify_mkvol_req(ubi, &req);
+ if (err)
+ break;
+
+ mutex_lock(&ubi->device_mutex);
+ err = ubi_create_volume(ubi, &req);
+ mutex_unlock(&ubi->device_mutex);
+ if (err)
+ break;
+
+ err = put_user(req.vol_id, (__user int32_t *)argp);
+ if (err)
+ err = -EFAULT;
+
+ break;
+ }
+
+ /* Remove volume command */
+ case UBI_IOCRMVOL:
+ {
+ int vol_id;
+
+ dbg_gen("remove volume");
+ err = get_user(vol_id, (__user int32_t *)argp);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE);
+ if (IS_ERR(desc)) {
+ err = PTR_ERR(desc);
+ break;
+ }
+
+ mutex_lock(&ubi->device_mutex);
+ err = ubi_remove_volume(desc, 0);
+ mutex_unlock(&ubi->device_mutex);
+
+ /*
+ * The volume is deleted (unless an error occurred), and the
+ * 'struct ubi_volume' object will be freed when
+ * 'ubi_close_volume()' will call 'put_device()'.
+ */
+ ubi_close_volume(desc);
+ break;
+ }
+
+ /* Re-size volume command */
+ case UBI_IOCRSVOL:
+ {
+ int pebs;
+ struct ubi_rsvol_req req;
+
+ dbg_gen("re-size volume");
+ err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req));
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ err = verify_rsvol_req(ubi, &req);
+ if (err)
+ break;
+
+ desc = ubi_open_volume(ubi->ubi_num, req.vol_id, UBI_EXCLUSIVE);
+ if (IS_ERR(desc)) {
+ err = PTR_ERR(desc);
+ break;
+ }
+
+ pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1,
+ desc->vol->usable_leb_size);
+
+ mutex_lock(&ubi->device_mutex);
+ err = ubi_resize_volume(desc, pebs);
+ mutex_unlock(&ubi->device_mutex);
+ ubi_close_volume(desc);
+ break;
+ }
+
+ /* Re-name volumes command */
+ case UBI_IOCRNVOL:
+ {
+ struct ubi_rnvol_req *req;
+
+ dbg_gen("re-name volumes");
+ req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL);
+ if (!req) {
+ err = -ENOMEM;
+ break;
+ }
+
+ err = copy_from_user(req, argp, sizeof(struct ubi_rnvol_req));
+ if (err) {
+ err = -EFAULT;
+ kfree(req);
+ break;
+ }
+
+ err = rename_volumes(ubi, req);
+ kfree(req);
+ break;
+ }
+
+ /* Check a specific PEB for bitflips and scrub it if needed */
+ case UBI_IOCRPEB:
+ {
+ int pnum;
+
+ err = get_user(pnum, (__user int32_t *)argp);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ err = ubi_bitflip_check(ubi, pnum, 0);
+ break;
+ }
+
+ /* Force scrubbing for a specific PEB */
+ case UBI_IOCSPEB:
+ {
+ int pnum;
+
+ err = get_user(pnum, (__user int32_t *)argp);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ err = ubi_bitflip_check(ubi, pnum, 1);
+ break;
+ }
+
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
+ ubi_put_device(ubi);
+ return err;
+}
+
+static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int err = 0;
+ void __user *argp = (void __user *)arg;
+
+ if (!capable(CAP_SYS_RESOURCE))
+ return -EPERM;
+
+ switch (cmd) {
+ /* Attach an MTD device command */
+ case UBI_IOCATT:
+ {
+ struct ubi_attach_req req;
+ struct mtd_info *mtd;
+
+ dbg_gen("attach MTD device");
+ err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req));
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (req.mtd_num < 0 ||
+ (req.ubi_num < 0 && req.ubi_num != UBI_DEV_NUM_AUTO)) {
+ err = -EINVAL;
+ break;
+ }
+
+ mtd = get_mtd_device(NULL, req.mtd_num);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ break;
+ }
+
+ /*
+ * Note, further request verification is done by
+ * 'ubi_attach_mtd_dev()'.
+ */
+ mutex_lock(&ubi_devices_mutex);
+ err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset,
+ req.max_beb_per1024, !!req.disable_fm);
+ mutex_unlock(&ubi_devices_mutex);
+ if (err < 0)
+ put_mtd_device(mtd);
+ else
+ /* @err contains UBI device number */
+ err = put_user(err, (__user int32_t *)argp);
+
+ break;
+ }
+
+ /* Detach an MTD device command */
+ case UBI_IOCDET:
+ {
+ int ubi_num;
+
+ dbg_gen("detach MTD device");
+ err = get_user(ubi_num, (__user int32_t *)argp);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ mutex_lock(&ubi_devices_mutex);
+ err = ubi_detach_mtd_dev(ubi_num, 0);
+ mutex_unlock(&ubi_devices_mutex);
+ break;
+ }
+
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
+ return err;
+}
+
+/* UBI volume character device operations */
+const struct file_operations ubi_vol_cdev_operations = {
+ .owner = THIS_MODULE,
+ .open = vol_cdev_open,
+ .release = vol_cdev_release,
+ .llseek = vol_cdev_llseek,
+ .read = vol_cdev_read,
+ .write = vol_cdev_write,
+ .fsync = vol_cdev_fsync,
+ .unlocked_ioctl = vol_cdev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+};
+
+/* UBI character device operations */
+const struct file_operations ubi_cdev_operations = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .unlocked_ioctl = ubi_cdev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+};
+
+/* UBI control character device operations */
+const struct file_operations ubi_ctrl_cdev_operations = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = ctrl_cdev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .llseek = no_llseek,
+};
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
new file mode 100644
index 0000000000..27168f511d
--- /dev/null
+++ b/drivers/mtd/ubi/debug.c
@@ -0,0 +1,606 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+#include "ubi.h"
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+
+
+/**
+ * ubi_dump_flash - dump a region of flash.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to dump
+ * @offset: the starting offset within the physical eraseblock to dump
+ * @len: the length of the region to dump
+ */
+void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
+{
+ int err;
+ size_t read;
+ void *buf;
+ loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
+
+ buf = vmalloc(len);
+ if (!buf)
+ return;
+ err = mtd_read(ubi->mtd, addr, len, &read, buf);
+ if (err && err != -EUCLEAN) {
+ ubi_err(ubi, "err %d while reading %d bytes from PEB %d:%d, read %zd bytes",
+ err, len, pnum, offset, read);
+ goto out;
+ }
+
+ ubi_msg(ubi, "dumping %d bytes of data from PEB %d, offset %d",
+ len, pnum, offset);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
+out:
+ vfree(buf);
+ return;
+}
+
+/**
+ * ubi_dump_ec_hdr - dump an erase counter header.
+ * @ec_hdr: the erase counter header to dump
+ */
+void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
+{
+ pr_err("Erase counter header dump:\n");
+ pr_err("\tmagic %#08x\n", be32_to_cpu(ec_hdr->magic));
+ pr_err("\tversion %d\n", (int)ec_hdr->version);
+ pr_err("\tec %llu\n", (long long)be64_to_cpu(ec_hdr->ec));
+ pr_err("\tvid_hdr_offset %d\n", be32_to_cpu(ec_hdr->vid_hdr_offset));
+ pr_err("\tdata_offset %d\n", be32_to_cpu(ec_hdr->data_offset));
+ pr_err("\timage_seq %d\n", be32_to_cpu(ec_hdr->image_seq));
+ pr_err("\thdr_crc %#08x\n", be32_to_cpu(ec_hdr->hdr_crc));
+ pr_err("erase counter header hexdump:\n");
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
+ ec_hdr, UBI_EC_HDR_SIZE, 1);
+}
+
+/**
+ * ubi_dump_vid_hdr - dump a volume identifier header.
+ * @vid_hdr: the volume identifier header to dump
+ */
+void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
+{
+ pr_err("Volume identifier header dump:\n");
+ pr_err("\tmagic %08x\n", be32_to_cpu(vid_hdr->magic));
+ pr_err("\tversion %d\n", (int)vid_hdr->version);
+ pr_err("\tvol_type %d\n", (int)vid_hdr->vol_type);
+ pr_err("\tcopy_flag %d\n", (int)vid_hdr->copy_flag);
+ pr_err("\tcompat %d\n", (int)vid_hdr->compat);
+ pr_err("\tvol_id %d\n", be32_to_cpu(vid_hdr->vol_id));
+ pr_err("\tlnum %d\n", be32_to_cpu(vid_hdr->lnum));
+ pr_err("\tdata_size %d\n", be32_to_cpu(vid_hdr->data_size));
+ pr_err("\tused_ebs %d\n", be32_to_cpu(vid_hdr->used_ebs));
+ pr_err("\tdata_pad %d\n", be32_to_cpu(vid_hdr->data_pad));
+ pr_err("\tsqnum %llu\n",
+ (unsigned long long)be64_to_cpu(vid_hdr->sqnum));
+ pr_err("\thdr_crc %08x\n", be32_to_cpu(vid_hdr->hdr_crc));
+ pr_err("Volume identifier header hexdump:\n");
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
+ vid_hdr, UBI_VID_HDR_SIZE, 1);
+}
+
+/**
+ * ubi_dump_vol_info - dump volume information.
+ * @vol: UBI volume description object
+ */
+void ubi_dump_vol_info(const struct ubi_volume *vol)
+{
+ pr_err("Volume information dump:\n");
+ pr_err("\tvol_id %d\n", vol->vol_id);
+ pr_err("\treserved_pebs %d\n", vol->reserved_pebs);
+ pr_err("\talignment %d\n", vol->alignment);
+ pr_err("\tdata_pad %d\n", vol->data_pad);
+ pr_err("\tvol_type %d\n", vol->vol_type);
+ pr_err("\tname_len %d\n", vol->name_len);
+ pr_err("\tusable_leb_size %d\n", vol->usable_leb_size);
+ pr_err("\tused_ebs %d\n", vol->used_ebs);
+ pr_err("\tused_bytes %lld\n", vol->used_bytes);
+ pr_err("\tlast_eb_bytes %d\n", vol->last_eb_bytes);
+ pr_err("\tcorrupted %d\n", vol->corrupted);
+ pr_err("\tupd_marker %d\n", vol->upd_marker);
+ pr_err("\tskip_check %d\n", vol->skip_check);
+
+ if (vol->name_len <= UBI_VOL_NAME_MAX &&
+ strnlen(vol->name, vol->name_len + 1) == vol->name_len) {
+ pr_err("\tname %s\n", vol->name);
+ } else {
+ pr_err("\t1st 5 characters of name: %c%c%c%c%c\n",
+ vol->name[0], vol->name[1], vol->name[2],
+ vol->name[3], vol->name[4]);
+ }
+}
+
+/**
+ * ubi_dump_vtbl_record - dump a &struct ubi_vtbl_record object.
+ * @r: the object to dump
+ * @idx: volume table index
+ */
+void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
+{
+ int name_len = be16_to_cpu(r->name_len);
+
+ pr_err("Volume table record %d dump:\n", idx);
+ pr_err("\treserved_pebs %d\n", be32_to_cpu(r->reserved_pebs));
+ pr_err("\talignment %d\n", be32_to_cpu(r->alignment));
+ pr_err("\tdata_pad %d\n", be32_to_cpu(r->data_pad));
+ pr_err("\tvol_type %d\n", (int)r->vol_type);
+ pr_err("\tupd_marker %d\n", (int)r->upd_marker);
+ pr_err("\tname_len %d\n", name_len);
+
+ if (r->name[0] == '\0') {
+ pr_err("\tname NULL\n");
+ return;
+ }
+
+ if (name_len <= UBI_VOL_NAME_MAX &&
+ strnlen(&r->name[0], name_len + 1) == name_len) {
+ pr_err("\tname %s\n", &r->name[0]);
+ } else {
+ pr_err("\t1st 5 characters of name: %c%c%c%c%c\n",
+ r->name[0], r->name[1], r->name[2], r->name[3],
+ r->name[4]);
+ }
+ pr_err("\tcrc %#08x\n", be32_to_cpu(r->crc));
+}
+
+/**
+ * ubi_dump_av - dump a &struct ubi_ainf_volume object.
+ * @av: the object to dump
+ */
+void ubi_dump_av(const struct ubi_ainf_volume *av)
+{
+ pr_err("Volume attaching information dump:\n");
+ pr_err("\tvol_id %d\n", av->vol_id);
+ pr_err("\thighest_lnum %d\n", av->highest_lnum);
+ pr_err("\tleb_count %d\n", av->leb_count);
+ pr_err("\tcompat %d\n", av->compat);
+ pr_err("\tvol_type %d\n", av->vol_type);
+ pr_err("\tused_ebs %d\n", av->used_ebs);
+ pr_err("\tlast_data_size %d\n", av->last_data_size);
+ pr_err("\tdata_pad %d\n", av->data_pad);
+}
+
+/**
+ * ubi_dump_aeb - dump a &struct ubi_ainf_peb object.
+ * @aeb: the object to dump
+ * @type: object type: 0 - not corrupted, 1 - corrupted
+ */
+void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type)
+{
+ pr_err("eraseblock attaching information dump:\n");
+ pr_err("\tec %d\n", aeb->ec);
+ pr_err("\tpnum %d\n", aeb->pnum);
+ if (type == 0) {
+ pr_err("\tlnum %d\n", aeb->lnum);
+ pr_err("\tscrub %d\n", aeb->scrub);
+ pr_err("\tsqnum %llu\n", aeb->sqnum);
+ }
+}
+
+/**
+ * ubi_dump_mkvol_req - dump a &struct ubi_mkvol_req object.
+ * @req: the object to dump
+ */
+void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req)
+{
+ char nm[17];
+
+ pr_err("Volume creation request dump:\n");
+ pr_err("\tvol_id %d\n", req->vol_id);
+ pr_err("\talignment %d\n", req->alignment);
+ pr_err("\tbytes %lld\n", (long long)req->bytes);
+ pr_err("\tvol_type %d\n", req->vol_type);
+ pr_err("\tname_len %d\n", req->name_len);
+
+ memcpy(nm, req->name, 16);
+ nm[16] = 0;
+ pr_err("\t1st 16 characters of name: %s\n", nm);
+}
+
+/*
+ * Root directory for UBI stuff in debugfs. Contains sub-directories which
+ * contain the stuff specific to particular UBI devices.
+ */
+static struct dentry *dfs_rootdir;
+
+/**
+ * ubi_debugfs_init - create UBI debugfs directory.
+ *
+ * Create UBI debugfs directory. Returns zero in case of success and a negative
+ * error code in case of failure.
+ */
+int ubi_debugfs_init(void)
+{
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+
+ dfs_rootdir = debugfs_create_dir("ubi", NULL);
+ if (IS_ERR_OR_NULL(dfs_rootdir)) {
+ int err = dfs_rootdir ? PTR_ERR(dfs_rootdir) : -ENODEV;
+
+ pr_err("UBI error: cannot create \"ubi\" debugfs directory, error %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ubi_debugfs_exit - remove UBI debugfs directory.
+ */
+void ubi_debugfs_exit(void)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ debugfs_remove(dfs_rootdir);
+}
+
+/* Read an UBI debugfs file */
+static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long ubi_num = (unsigned long)file->private_data;
+ struct dentry *dent = file->f_path.dentry;
+ struct ubi_device *ubi;
+ struct ubi_debug_info *d;
+ char buf[8];
+ int val;
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -ENODEV;
+ d = &ubi->dbg;
+
+ if (dent == d->dfs_chk_gen)
+ val = d->chk_gen;
+ else if (dent == d->dfs_chk_io)
+ val = d->chk_io;
+ else if (dent == d->dfs_chk_fastmap)
+ val = d->chk_fastmap;
+ else if (dent == d->dfs_disable_bgt)
+ val = d->disable_bgt;
+ else if (dent == d->dfs_emulate_bitflips)
+ val = d->emulate_bitflips;
+ else if (dent == d->dfs_emulate_io_failures)
+ val = d->emulate_io_failures;
+ else if (dent == d->dfs_emulate_power_cut) {
+ snprintf(buf, sizeof(buf), "%u\n", d->emulate_power_cut);
+ count = simple_read_from_buffer(user_buf, count, ppos,
+ buf, strlen(buf));
+ goto out;
+ } else if (dent == d->dfs_power_cut_min) {
+ snprintf(buf, sizeof(buf), "%u\n", d->power_cut_min);
+ count = simple_read_from_buffer(user_buf, count, ppos,
+ buf, strlen(buf));
+ goto out;
+ } else if (dent == d->dfs_power_cut_max) {
+ snprintf(buf, sizeof(buf), "%u\n", d->power_cut_max);
+ count = simple_read_from_buffer(user_buf, count, ppos,
+ buf, strlen(buf));
+ goto out;
+ }
+ else {
+ count = -EINVAL;
+ goto out;
+ }
+
+ if (val)
+ buf[0] = '1';
+ else
+ buf[0] = '0';
+ buf[1] = '\n';
+ buf[2] = 0x00;
+
+ count = simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+
+out:
+ ubi_put_device(ubi);
+ return count;
+}
+
+/* Write an UBI debugfs file */
+static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long ubi_num = (unsigned long)file->private_data;
+ struct dentry *dent = file->f_path.dentry;
+ struct ubi_device *ubi;
+ struct ubi_debug_info *d;
+ size_t buf_size;
+ char buf[8] = {0};
+ int val;
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -ENODEV;
+ d = &ubi->dbg;
+
+ buf_size = min_t(size_t, count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size)) {
+ count = -EFAULT;
+ goto out;
+ }
+
+ if (dent == d->dfs_power_cut_min) {
+ if (kstrtouint(buf, 0, &d->power_cut_min) != 0)
+ count = -EINVAL;
+ goto out;
+ } else if (dent == d->dfs_power_cut_max) {
+ if (kstrtouint(buf, 0, &d->power_cut_max) != 0)
+ count = -EINVAL;
+ goto out;
+ } else if (dent == d->dfs_emulate_power_cut) {
+ if (kstrtoint(buf, 0, &val) != 0)
+ count = -EINVAL;
+ else
+ d->emulate_power_cut = val;
+ goto out;
+ }
+
+ if (buf[0] == '1')
+ val = 1;
+ else if (buf[0] == '0')
+ val = 0;
+ else {
+ count = -EINVAL;
+ goto out;
+ }
+
+ if (dent == d->dfs_chk_gen)
+ d->chk_gen = val;
+ else if (dent == d->dfs_chk_io)
+ d->chk_io = val;
+ else if (dent == d->dfs_chk_fastmap)
+ d->chk_fastmap = val;
+ else if (dent == d->dfs_disable_bgt)
+ d->disable_bgt = val;
+ else if (dent == d->dfs_emulate_bitflips)
+ d->emulate_bitflips = val;
+ else if (dent == d->dfs_emulate_io_failures)
+ d->emulate_io_failures = val;
+ else
+ count = -EINVAL;
+
+out:
+ ubi_put_device(ubi);
+ return count;
+}
+
+/* File operations for all UBI debugfs files except
+ * detailed_erase_block_info
+ */
+static const struct file_operations dfs_fops = {
+ .read = dfs_file_read,
+ .write = dfs_file_write,
+ .open = simple_open,
+ .llseek = no_llseek,
+ .owner = THIS_MODULE,
+};
+
+/* As long as the position is less then that total number of erase blocks,
+ * we still have more to print.
+ */
+static void *eraseblk_count_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct ubi_device *ubi = s->private;
+
+ if (*pos < ubi->peb_count)
+ return pos;
+
+ return NULL;
+}
+
+/* Since we are using the position as the iterator, we just need to check if we
+ * are done and increment the position.
+ */
+static void *eraseblk_count_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct ubi_device *ubi = s->private;
+
+ (*pos)++;
+
+ if (*pos < ubi->peb_count)
+ return pos;
+
+ return NULL;
+}
+
+static void eraseblk_count_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static int eraseblk_count_seq_show(struct seq_file *s, void *iter)
+{
+ struct ubi_device *ubi = s->private;
+ struct ubi_wl_entry *wl;
+ int *block_number = iter;
+ int erase_count = -1;
+ int err;
+
+ /* If this is the start, print a header */
+ if (*block_number == 0)
+ seq_puts(s, "physical_block_number\terase_count\n");
+
+ err = ubi_io_is_bad(ubi, *block_number);
+ if (err)
+ return err;
+
+ spin_lock(&ubi->wl_lock);
+
+ wl = ubi->lookuptbl[*block_number];
+ if (wl)
+ erase_count = wl->ec;
+
+ spin_unlock(&ubi->wl_lock);
+
+ if (erase_count < 0)
+ return 0;
+
+ seq_printf(s, "%-22d\t%-11d\n", *block_number, erase_count);
+
+ return 0;
+}
+
+static const struct seq_operations eraseblk_count_seq_ops = {
+ .start = eraseblk_count_seq_start,
+ .next = eraseblk_count_seq_next,
+ .stop = eraseblk_count_seq_stop,
+ .show = eraseblk_count_seq_show
+};
+
+static int eraseblk_count_open(struct inode *inode, struct file *f)
+{
+ struct seq_file *s;
+ int err;
+
+ err = seq_open(f, &eraseblk_count_seq_ops);
+ if (err)
+ return err;
+
+ s = f->private_data;
+ s->private = ubi_get_device((unsigned long)inode->i_private);
+
+ if (!s->private)
+ return -ENODEV;
+ else
+ return 0;
+}
+
+static int eraseblk_count_release(struct inode *inode, struct file *f)
+{
+ struct seq_file *s = f->private_data;
+ struct ubi_device *ubi = s->private;
+
+ ubi_put_device(ubi);
+
+ return seq_release(inode, f);
+}
+
+static const struct file_operations eraseblk_count_fops = {
+ .owner = THIS_MODULE,
+ .open = eraseblk_count_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = eraseblk_count_release,
+};
+
+/**
+ * ubi_debugfs_init_dev - initialize debugfs for an UBI device.
+ * @ubi: UBI device description object
+ *
+ * This function creates all debugfs files for UBI device @ubi. Returns zero in
+ * case of success and a negative error code in case of failure.
+ */
+int ubi_debugfs_init_dev(struct ubi_device *ubi)
+{
+ unsigned long ubi_num = ubi->ubi_num;
+ struct ubi_debug_info *d = &ubi->dbg;
+ umode_t mode = S_IRUSR | S_IWUSR;
+ int n;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+
+ n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME,
+ ubi->ubi_num);
+ if (n > UBI_DFS_DIR_LEN) {
+ /* The array size is too small */
+ return -EINVAL;
+ }
+
+ d->dfs_dir = debugfs_create_dir(d->dfs_dir_name, dfs_rootdir);
+
+ d->dfs_chk_gen = debugfs_create_file("chk_gen", mode, d->dfs_dir,
+ (void *)ubi_num, &dfs_fops);
+
+ d->dfs_chk_io = debugfs_create_file("chk_io", mode, d->dfs_dir,
+ (void *)ubi_num, &dfs_fops);
+
+ d->dfs_chk_fastmap = debugfs_create_file("chk_fastmap", mode,
+ d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+
+ d->dfs_disable_bgt = debugfs_create_file("tst_disable_bgt", mode,
+ d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+
+ d->dfs_emulate_bitflips = debugfs_create_file("tst_emulate_bitflips",
+ mode, d->dfs_dir,
+ (void *)ubi_num,
+ &dfs_fops);
+
+ d->dfs_emulate_io_failures = debugfs_create_file("tst_emulate_io_failures",
+ mode, d->dfs_dir,
+ (void *)ubi_num,
+ &dfs_fops);
+
+ d->dfs_emulate_power_cut = debugfs_create_file("tst_emulate_power_cut",
+ mode, d->dfs_dir,
+ (void *)ubi_num,
+ &dfs_fops);
+
+ d->dfs_power_cut_min = debugfs_create_file("tst_emulate_power_cut_min",
+ mode, d->dfs_dir,
+ (void *)ubi_num, &dfs_fops);
+
+ d->dfs_power_cut_max = debugfs_create_file("tst_emulate_power_cut_max",
+ mode, d->dfs_dir,
+ (void *)ubi_num, &dfs_fops);
+
+ debugfs_create_file("detailed_erase_block_info", S_IRUSR, d->dfs_dir,
+ (void *)ubi_num, &eraseblk_count_fops);
+
+ return 0;
+}
+
+/**
+ * ubi_debugfs_exit_dev - free all debugfs files corresponding to device @ubi
+ * @ubi: UBI device description object
+ */
+void ubi_debugfs_exit_dev(struct ubi_device *ubi)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ debugfs_remove_recursive(ubi->dbg.dfs_dir);
+}
+
+/**
+ * ubi_dbg_power_cut - emulate a power cut if it is time to do so
+ * @ubi: UBI device description object
+ * @caller: Flags set to indicate from where the function is being called
+ *
+ * Returns non-zero if a power cut was emulated, zero if not.
+ */
+int ubi_dbg_power_cut(struct ubi_device *ubi, int caller)
+{
+ unsigned int range;
+
+ if ((ubi->dbg.emulate_power_cut & caller) == 0)
+ return 0;
+
+ if (ubi->dbg.power_cut_counter == 0) {
+ ubi->dbg.power_cut_counter = ubi->dbg.power_cut_min;
+
+ if (ubi->dbg.power_cut_max > ubi->dbg.power_cut_min) {
+ range = ubi->dbg.power_cut_max - ubi->dbg.power_cut_min;
+ ubi->dbg.power_cut_counter += get_random_u32_below(range);
+ }
+ return 0;
+ }
+
+ ubi->dbg.power_cut_counter--;
+ if (ubi->dbg.power_cut_counter)
+ return 0;
+
+ ubi_msg(ubi, "XXXXXXXXXXXXXXX emulating a power cut XXXXXXXXXXXXXXXX");
+ ubi_ro_mode(ubi);
+ return 1;
+}
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
new file mode 100644
index 0000000000..23676f32b6
--- /dev/null
+++ b/drivers/mtd/ubi/debug.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+#ifndef __UBI_DEBUG_H__
+#define __UBI_DEBUG_H__
+
+void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len);
+void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr);
+void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
+
+#include <linux/random.h>
+
+#define ubi_assert(expr) do { \
+ if (unlikely(!(expr))) { \
+ pr_crit("UBI assert failed in %s at %u (pid %d)\n", \
+ __func__, __LINE__, current->pid); \
+ dump_stack(); \
+ } \
+} while (0)
+
+#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) \
+ print_hex_dump(l, ps, pt, r, g, b, len, a)
+
+#define ubi_dbg_msg(type, fmt, ...) \
+ pr_debug("UBI DBG " type " (pid %d): " fmt "\n", current->pid, \
+ ##__VA_ARGS__)
+
+/* General debugging messages */
+#define dbg_gen(fmt, ...) ubi_dbg_msg("gen", fmt, ##__VA_ARGS__)
+/* Messages from the eraseblock association sub-system */
+#define dbg_eba(fmt, ...) ubi_dbg_msg("eba", fmt, ##__VA_ARGS__)
+/* Messages from the wear-leveling sub-system */
+#define dbg_wl(fmt, ...) ubi_dbg_msg("wl", fmt, ##__VA_ARGS__)
+/* Messages from the input/output sub-system */
+#define dbg_io(fmt, ...) ubi_dbg_msg("io", fmt, ##__VA_ARGS__)
+/* Initialization and build messages */
+#define dbg_bld(fmt, ...) ubi_dbg_msg("bld", fmt, ##__VA_ARGS__)
+
+void ubi_dump_vol_info(const struct ubi_volume *vol);
+void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx);
+void ubi_dump_av(const struct ubi_ainf_volume *av);
+void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type);
+void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req);
+int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
+ int len);
+int ubi_debugfs_init(void);
+void ubi_debugfs_exit(void);
+int ubi_debugfs_init_dev(struct ubi_device *ubi);
+void ubi_debugfs_exit_dev(struct ubi_device *ubi);
+
+/**
+ * ubi_dbg_is_bgt_disabled - if the background thread is disabled.
+ * @ubi: UBI device description object
+ *
+ * Returns non-zero if the UBI background thread is disabled for testing
+ * purposes.
+ */
+static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
+{
+ return ubi->dbg.disable_bgt;
+}
+
+/**
+ * ubi_dbg_is_bitflip - if it is time to emulate a bit-flip.
+ * @ubi: UBI device description object
+ *
+ * Returns non-zero if a bit-flip should be emulated, otherwise returns zero.
+ */
+static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_bitflips)
+ return !get_random_u32_below(200);
+ return 0;
+}
+
+/**
+ * ubi_dbg_is_write_failure - if it is time to emulate a write failure.
+ * @ubi: UBI device description object
+ *
+ * Returns non-zero if a write failure should be emulated, otherwise returns
+ * zero.
+ */
+static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_io_failures)
+ return !get_random_u32_below(500);
+ return 0;
+}
+
+/**
+ * ubi_dbg_is_erase_failure - if its time to emulate an erase failure.
+ * @ubi: UBI device description object
+ *
+ * Returns non-zero if an erase failure should be emulated, otherwise returns
+ * zero.
+ */
+static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_io_failures)
+ return !get_random_u32_below(400);
+ return 0;
+}
+
+static inline int ubi_dbg_chk_io(const struct ubi_device *ubi)
+{
+ return ubi->dbg.chk_io;
+}
+
+static inline int ubi_dbg_chk_gen(const struct ubi_device *ubi)
+{
+ return ubi->dbg.chk_gen;
+}
+
+static inline int ubi_dbg_chk_fastmap(const struct ubi_device *ubi)
+{
+ return ubi->dbg.chk_fastmap;
+}
+
+static inline void ubi_enable_dbg_chk_fastmap(struct ubi_device *ubi)
+{
+ ubi->dbg.chk_fastmap = 1;
+}
+
+int ubi_dbg_power_cut(struct ubi_device *ubi, int caller);
+#endif /* !__UBI_DEBUG_H__ */
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
new file mode 100644
index 0000000000..655ff41863
--- /dev/null
+++ b/drivers/mtd/ubi/eba.c
@@ -0,0 +1,1709 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * The UBI Eraseblock Association (EBA) sub-system.
+ *
+ * This sub-system is responsible for I/O to/from logical eraseblock.
+ *
+ * Although in this implementation the EBA table is fully kept and managed in
+ * RAM, which assumes poor scalability, it might be (partially) maintained on
+ * flash in future implementations.
+ *
+ * The EBA sub-system implements per-logical eraseblock locking. Before
+ * accessing a logical eraseblock it is locked for reading or writing. The
+ * per-logical eraseblock locking is implemented by means of the lock tree. The
+ * lock tree is an RB-tree which refers all the currently locked logical
+ * eraseblocks. The lock tree elements are &struct ubi_ltree_entry objects.
+ * They are indexed by (@vol_id, @lnum) pairs.
+ *
+ * EBA also maintains the global sequence counter which is incremented each
+ * time a logical eraseblock is mapped to a physical eraseblock and it is
+ * stored in the volume identifier header. This means that each VID header has
+ * a unique sequence number. The sequence number is only increased an we assume
+ * 64 bits is enough to never overflow.
+ */
+
+#include <linux/slab.h>
+#include <linux/crc32.h>
+#include <linux/err.h>
+#include "ubi.h"
+
+/* Number of physical eraseblocks reserved for atomic LEB change operation */
+#define EBA_RESERVED_PEBS 1
+
+/**
+ * struct ubi_eba_entry - structure encoding a single LEB -> PEB association
+ * @pnum: the physical eraseblock number attached to the LEB
+ *
+ * This structure is encoding a LEB -> PEB association. Note that the LEB
+ * number is not stored here, because it is the index used to access the
+ * entries table.
+ */
+struct ubi_eba_entry {
+ int pnum;
+};
+
+/**
+ * struct ubi_eba_table - LEB -> PEB association information
+ * @entries: the LEB to PEB mapping (one entry per LEB).
+ *
+ * This structure is private to the EBA logic and should be kept here.
+ * It is encoding the LEB to PEB association table, and is subject to
+ * changes.
+ */
+struct ubi_eba_table {
+ struct ubi_eba_entry *entries;
+};
+
+/**
+ * ubi_next_sqnum - get next sequence number.
+ * @ubi: UBI device description object
+ *
+ * This function returns next sequence number to use, which is just the current
+ * global sequence counter value. It also increases the global sequence
+ * counter.
+ */
+unsigned long long ubi_next_sqnum(struct ubi_device *ubi)
+{
+ unsigned long long sqnum;
+
+ spin_lock(&ubi->ltree_lock);
+ sqnum = ubi->global_sqnum++;
+ spin_unlock(&ubi->ltree_lock);
+
+ return sqnum;
+}
+
+/**
+ * ubi_get_compat - get compatibility flags of a volume.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ *
+ * This function returns compatibility flags for an internal volume. User
+ * volumes have no compatibility flags, so %0 is returned.
+ */
+static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
+{
+ if (vol_id == UBI_LAYOUT_VOLUME_ID)
+ return UBI_LAYOUT_VOLUME_COMPAT;
+ return 0;
+}
+
+/**
+ * ubi_eba_get_ldesc - get information about a LEB
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @ldesc: the LEB descriptor to fill
+ *
+ * Used to query information about a specific LEB.
+ * It is currently only returning the physical position of the LEB, but will be
+ * extended to provide more information.
+ */
+void ubi_eba_get_ldesc(struct ubi_volume *vol, int lnum,
+ struct ubi_eba_leb_desc *ldesc)
+{
+ ldesc->lnum = lnum;
+ ldesc->pnum = vol->eba_tbl->entries[lnum].pnum;
+}
+
+/**
+ * ubi_eba_create_table - allocate a new EBA table and initialize it with all
+ * LEBs unmapped
+ * @vol: volume containing the EBA table to copy
+ * @nentries: number of entries in the table
+ *
+ * Allocate a new EBA table and initialize it with all LEBs unmapped.
+ * Returns a valid pointer if it succeed, an ERR_PTR() otherwise.
+ */
+struct ubi_eba_table *ubi_eba_create_table(struct ubi_volume *vol,
+ int nentries)
+{
+ struct ubi_eba_table *tbl;
+ int err = -ENOMEM;
+ int i;
+
+ tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
+ if (!tbl)
+ return ERR_PTR(-ENOMEM);
+
+ tbl->entries = kmalloc_array(nentries, sizeof(*tbl->entries),
+ GFP_KERNEL);
+ if (!tbl->entries)
+ goto err;
+
+ for (i = 0; i < nentries; i++)
+ tbl->entries[i].pnum = UBI_LEB_UNMAPPED;
+
+ return tbl;
+
+err:
+ kfree(tbl);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * ubi_eba_destroy_table - destroy an EBA table
+ * @tbl: the table to destroy
+ *
+ * Destroy an EBA table.
+ */
+void ubi_eba_destroy_table(struct ubi_eba_table *tbl)
+{
+ if (!tbl)
+ return;
+
+ kfree(tbl->entries);
+ kfree(tbl);
+}
+
+/**
+ * ubi_eba_copy_table - copy the EBA table attached to vol into another table
+ * @vol: volume containing the EBA table to copy
+ * @dst: destination
+ * @nentries: number of entries to copy
+ *
+ * Copy the EBA table stored in vol into the one pointed by dst.
+ */
+void ubi_eba_copy_table(struct ubi_volume *vol, struct ubi_eba_table *dst,
+ int nentries)
+{
+ struct ubi_eba_table *src;
+ int i;
+
+ ubi_assert(dst && vol && vol->eba_tbl);
+
+ src = vol->eba_tbl;
+
+ for (i = 0; i < nentries; i++)
+ dst->entries[i].pnum = src->entries[i].pnum;
+}
+
+/**
+ * ubi_eba_replace_table - assign a new EBA table to a volume
+ * @vol: volume containing the EBA table to copy
+ * @tbl: new EBA table
+ *
+ * Assign a new EBA table to the volume and release the old one.
+ */
+void ubi_eba_replace_table(struct ubi_volume *vol, struct ubi_eba_table *tbl)
+{
+ ubi_eba_destroy_table(vol->eba_tbl);
+ vol->eba_tbl = tbl;
+}
+
+/**
+ * ltree_lookup - look up the lock tree.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ *
+ * This function returns a pointer to the corresponding &struct ubi_ltree_entry
+ * object if the logical eraseblock is locked and %NULL if it is not.
+ * @ubi->ltree_lock has to be locked.
+ */
+static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
+ int lnum)
+{
+ struct rb_node *p;
+
+ p = ubi->ltree.rb_node;
+ while (p) {
+ struct ubi_ltree_entry *le;
+
+ le = rb_entry(p, struct ubi_ltree_entry, rb);
+
+ if (vol_id < le->vol_id)
+ p = p->rb_left;
+ else if (vol_id > le->vol_id)
+ p = p->rb_right;
+ else {
+ if (lnum < le->lnum)
+ p = p->rb_left;
+ else if (lnum > le->lnum)
+ p = p->rb_right;
+ else
+ return le;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * ltree_add_entry - add new entry to the lock tree.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ *
+ * This function adds new entry for logical eraseblock (@vol_id, @lnum) to the
+ * lock tree. If such entry is already there, its usage counter is increased.
+ * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
+ * failed.
+ */
+static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
+ int vol_id, int lnum)
+{
+ struct ubi_ltree_entry *le, *le1, *le_free;
+
+ le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS);
+ if (!le)
+ return ERR_PTR(-ENOMEM);
+
+ le->users = 0;
+ init_rwsem(&le->mutex);
+ le->vol_id = vol_id;
+ le->lnum = lnum;
+
+ spin_lock(&ubi->ltree_lock);
+ le1 = ltree_lookup(ubi, vol_id, lnum);
+
+ if (le1) {
+ /*
+ * This logical eraseblock is already locked. The newly
+ * allocated lock entry is not needed.
+ */
+ le_free = le;
+ le = le1;
+ } else {
+ struct rb_node **p, *parent = NULL;
+
+ /*
+ * No lock entry, add the newly allocated one to the
+ * @ubi->ltree RB-tree.
+ */
+ le_free = NULL;
+
+ p = &ubi->ltree.rb_node;
+ while (*p) {
+ parent = *p;
+ le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
+
+ if (vol_id < le1->vol_id)
+ p = &(*p)->rb_left;
+ else if (vol_id > le1->vol_id)
+ p = &(*p)->rb_right;
+ else {
+ ubi_assert(lnum != le1->lnum);
+ if (lnum < le1->lnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ }
+
+ rb_link_node(&le->rb, parent, p);
+ rb_insert_color(&le->rb, &ubi->ltree);
+ }
+ le->users += 1;
+ spin_unlock(&ubi->ltree_lock);
+
+ kfree(le_free);
+ return le;
+}
+
+/**
+ * leb_read_lock - lock logical eraseblock for reading.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ *
+ * This function locks a logical eraseblock for reading. Returns zero in case
+ * of success and a negative error code in case of failure.
+ */
+static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
+{
+ struct ubi_ltree_entry *le;
+
+ le = ltree_add_entry(ubi, vol_id, lnum);
+ if (IS_ERR(le))
+ return PTR_ERR(le);
+ down_read(&le->mutex);
+ return 0;
+}
+
+/**
+ * leb_read_unlock - unlock logical eraseblock.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ */
+static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
+{
+ struct ubi_ltree_entry *le;
+
+ spin_lock(&ubi->ltree_lock);
+ le = ltree_lookup(ubi, vol_id, lnum);
+ le->users -= 1;
+ ubi_assert(le->users >= 0);
+ up_read(&le->mutex);
+ if (le->users == 0) {
+ rb_erase(&le->rb, &ubi->ltree);
+ kfree(le);
+ }
+ spin_unlock(&ubi->ltree_lock);
+}
+
+/**
+ * leb_write_lock - lock logical eraseblock for writing.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ *
+ * This function locks a logical eraseblock for writing. Returns zero in case
+ * of success and a negative error code in case of failure.
+ */
+static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
+{
+ struct ubi_ltree_entry *le;
+
+ le = ltree_add_entry(ubi, vol_id, lnum);
+ if (IS_ERR(le))
+ return PTR_ERR(le);
+ down_write(&le->mutex);
+ return 0;
+}
+
+/**
+ * leb_write_trylock - try to lock logical eraseblock for writing.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ *
+ * This function locks a logical eraseblock for writing if there is no
+ * contention and does nothing if there is contention. Returns %0 in case of
+ * success, %1 in case of contention, and a negative error code in case of
+ * failure.
+ */
+static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
+{
+ struct ubi_ltree_entry *le;
+
+ le = ltree_add_entry(ubi, vol_id, lnum);
+ if (IS_ERR(le))
+ return PTR_ERR(le);
+ if (down_write_trylock(&le->mutex))
+ return 0;
+
+ /* Contention, cancel */
+ spin_lock(&ubi->ltree_lock);
+ le->users -= 1;
+ ubi_assert(le->users >= 0);
+ if (le->users == 0) {
+ rb_erase(&le->rb, &ubi->ltree);
+ kfree(le);
+ }
+ spin_unlock(&ubi->ltree_lock);
+
+ return 1;
+}
+
+/**
+ * leb_write_unlock - unlock logical eraseblock.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ */
+static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
+{
+ struct ubi_ltree_entry *le;
+
+ spin_lock(&ubi->ltree_lock);
+ le = ltree_lookup(ubi, vol_id, lnum);
+ le->users -= 1;
+ ubi_assert(le->users >= 0);
+ up_write(&le->mutex);
+ if (le->users == 0) {
+ rb_erase(&le->rb, &ubi->ltree);
+ kfree(le);
+ }
+ spin_unlock(&ubi->ltree_lock);
+}
+
+/**
+ * ubi_eba_is_mapped - check if a LEB is mapped.
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ *
+ * This function returns true if the LEB is mapped, false otherwise.
+ */
+bool ubi_eba_is_mapped(struct ubi_volume *vol, int lnum)
+{
+ return vol->eba_tbl->entries[lnum].pnum >= 0;
+}
+
+/**
+ * ubi_eba_unmap_leb - un-map logical eraseblock.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ *
+ * This function un-maps logical eraseblock @lnum and schedules corresponding
+ * physical eraseblock for erasure. Returns zero in case of success and a
+ * negative error code in case of failure.
+ */
+int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum)
+{
+ int err, pnum, vol_id = vol->vol_id;
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ err = leb_write_lock(ubi, vol_id, lnum);
+ if (err)
+ return err;
+
+ pnum = vol->eba_tbl->entries[lnum].pnum;
+ if (pnum < 0)
+ /* This logical eraseblock is already unmapped */
+ goto out_unlock;
+
+ dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
+
+ down_read(&ubi->fm_eba_sem);
+ vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED;
+ up_read(&ubi->fm_eba_sem);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
+
+out_unlock:
+ leb_write_unlock(ubi, vol_id, lnum);
+ return err;
+}
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * check_mapping - check and fixup a mapping
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @pnum: physical eraseblock number
+ *
+ * Checks whether a given mapping is valid. Fastmap cannot track LEB unmap
+ * operations, if such an operation is interrupted the mapping still looks
+ * good, but upon first read an ECC is reported to the upper layer.
+ * Normaly during the full-scan at attach time this is fixed, for Fastmap
+ * we have to deal with it while reading.
+ * If the PEB behind a LEB shows this symthom we change the mapping to
+ * %UBI_LEB_UNMAPPED and schedule the PEB for erasure.
+ *
+ * Returns 0 on success, negative error code in case of failure.
+ */
+static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ int *pnum)
+{
+ int err;
+ struct ubi_vid_io_buf *vidb;
+ struct ubi_vid_hdr *vid_hdr;
+
+ if (!ubi->fast_attach)
+ return 0;
+
+ if (!vol->checkmap || test_bit(lnum, vol->checkmap))
+ return 0;
+
+ vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
+ if (!vidb)
+ return -ENOMEM;
+
+ err = ubi_io_read_vid_hdr(ubi, *pnum, vidb, 0);
+ if (err > 0 && err != UBI_IO_BITFLIPS) {
+ int torture = 0;
+
+ switch (err) {
+ case UBI_IO_FF:
+ case UBI_IO_FF_BITFLIPS:
+ case UBI_IO_BAD_HDR:
+ case UBI_IO_BAD_HDR_EBADMSG:
+ break;
+ default:
+ ubi_assert(0);
+ }
+
+ if (err == UBI_IO_BAD_HDR_EBADMSG || err == UBI_IO_FF_BITFLIPS)
+ torture = 1;
+
+ down_read(&ubi->fm_eba_sem);
+ vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED;
+ up_read(&ubi->fm_eba_sem);
+ ubi_wl_put_peb(ubi, vol->vol_id, lnum, *pnum, torture);
+
+ *pnum = UBI_LEB_UNMAPPED;
+ } else if (err < 0) {
+ ubi_err(ubi, "unable to read VID header back from PEB %i: %i",
+ *pnum, err);
+
+ goto out_free;
+ } else {
+ int found_vol_id, found_lnum;
+
+ ubi_assert(err == 0 || err == UBI_IO_BITFLIPS);
+
+ vid_hdr = ubi_get_vid_hdr(vidb);
+ found_vol_id = be32_to_cpu(vid_hdr->vol_id);
+ found_lnum = be32_to_cpu(vid_hdr->lnum);
+
+ if (found_lnum != lnum || found_vol_id != vol->vol_id) {
+ ubi_err(ubi, "EBA mismatch! PEB %i is LEB %i:%i instead of LEB %i:%i",
+ *pnum, found_vol_id, found_lnum, vol->vol_id, lnum);
+ ubi_ro_mode(ubi);
+ err = -EINVAL;
+ goto out_free;
+ }
+ }
+
+ set_bit(lnum, vol->checkmap);
+ err = 0;
+
+out_free:
+ ubi_free_vid_buf(vidb);
+
+ return err;
+}
+#else
+static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ int *pnum)
+{
+ return 0;
+}
+#endif
+
+/**
+ * ubi_eba_read_leb - read data.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @buf: buffer to store the read data
+ * @offset: offset from where to read
+ * @len: how many bytes to read
+ * @check: data CRC check flag
+ *
+ * If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF
+ * bytes. The @check flag only makes sense for static volumes and forces
+ * eraseblock data CRC checking.
+ *
+ * In case of success this function returns zero. In case of a static volume,
+ * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be
+ * returned for any volume type if an ECC error was detected by the MTD device
+ * driver. Other negative error cored may be returned in case of other errors.
+ */
+int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ void *buf, int offset, int len, int check)
+{
+ int err, pnum, scrub = 0, vol_id = vol->vol_id;
+ struct ubi_vid_io_buf *vidb;
+ struct ubi_vid_hdr *vid_hdr;
+ uint32_t crc;
+
+ err = leb_read_lock(ubi, vol_id, lnum);
+ if (err)
+ return err;
+
+ pnum = vol->eba_tbl->entries[lnum].pnum;
+ if (pnum >= 0) {
+ err = check_mapping(ubi, vol, lnum, &pnum);
+ if (err < 0)
+ goto out_unlock;
+ }
+
+ if (pnum == UBI_LEB_UNMAPPED) {
+ /*
+ * The logical eraseblock is not mapped, fill the whole buffer
+ * with 0xFF bytes. The exception is static volumes for which
+ * it is an error to read unmapped logical eraseblocks.
+ */
+ dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)",
+ len, offset, vol_id, lnum);
+ leb_read_unlock(ubi, vol_id, lnum);
+ ubi_assert(vol->vol_type != UBI_STATIC_VOLUME);
+ memset(buf, 0xFF, len);
+ return 0;
+ }
+
+ dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d",
+ len, offset, vol_id, lnum, pnum);
+
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME)
+ check = 0;
+
+retry:
+ if (check) {
+ vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
+ if (!vidb) {
+ err = -ENOMEM;
+ goto out_unlock;
+ }
+
+ vid_hdr = ubi_get_vid_hdr(vidb);
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1);
+ if (err && err != UBI_IO_BITFLIPS) {
+ if (err > 0) {
+ /*
+ * The header is either absent or corrupted.
+ * The former case means there is a bug -
+ * switch to read-only mode just in case.
+ * The latter case means a real corruption - we
+ * may try to recover data. FIXME: but this is
+ * not implemented.
+ */
+ if (err == UBI_IO_BAD_HDR_EBADMSG ||
+ err == UBI_IO_BAD_HDR) {
+ ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d",
+ pnum, vol_id, lnum);
+ err = -EBADMSG;
+ } else {
+ /*
+ * Ending up here in the non-Fastmap case
+ * is a clear bug as the VID header had to
+ * be present at scan time to have it referenced.
+ * With fastmap the story is more complicated.
+ * Fastmap has the mapping info without the need
+ * of a full scan. So the LEB could have been
+ * unmapped, Fastmap cannot know this and keeps
+ * the LEB referenced.
+ * This is valid and works as the layer above UBI
+ * has to do bookkeeping about used/referenced
+ * LEBs in any case.
+ */
+ if (ubi->fast_attach) {
+ err = -EBADMSG;
+ } else {
+ err = -EINVAL;
+ ubi_ro_mode(ubi);
+ }
+ }
+ }
+ goto out_free;
+ } else if (err == UBI_IO_BITFLIPS)
+ scrub = 1;
+
+ ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs));
+ ubi_assert(len == be32_to_cpu(vid_hdr->data_size));
+
+ crc = be32_to_cpu(vid_hdr->data_crc);
+ ubi_free_vid_buf(vidb);
+ }
+
+ err = ubi_io_read_data(ubi, buf, pnum, offset, len);
+ if (err) {
+ if (err == UBI_IO_BITFLIPS)
+ scrub = 1;
+ else if (mtd_is_eccerr(err)) {
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME)
+ goto out_unlock;
+ scrub = 1;
+ if (!check) {
+ ubi_msg(ubi, "force data checking");
+ check = 1;
+ goto retry;
+ }
+ } else
+ goto out_unlock;
+ }
+
+ if (check) {
+ uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
+ if (crc1 != crc) {
+ ubi_warn(ubi, "CRC error: calculated %#08x, must be %#08x",
+ crc1, crc);
+ err = -EBADMSG;
+ goto out_unlock;
+ }
+ }
+
+ if (scrub)
+ err = ubi_wl_scrub_peb(ubi, pnum);
+
+ leb_read_unlock(ubi, vol_id, lnum);
+ return err;
+
+out_free:
+ ubi_free_vid_buf(vidb);
+out_unlock:
+ leb_read_unlock(ubi, vol_id, lnum);
+ return err;
+}
+
+/**
+ * ubi_eba_read_leb_sg - read data into a scatter gather list.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @sgl: UBI scatter gather list to store the read data
+ * @offset: offset from where to read
+ * @len: how many bytes to read
+ * @check: data CRC check flag
+ *
+ * This function works exactly like ubi_eba_read_leb(). But instead of
+ * storing the read data into a buffer it writes to an UBI scatter gather
+ * list.
+ */
+int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
+ struct ubi_sgl *sgl, int lnum, int offset, int len,
+ int check)
+{
+ int to_read;
+ int ret;
+ struct scatterlist *sg;
+
+ for (;;) {
+ ubi_assert(sgl->list_pos < UBI_MAX_SG_COUNT);
+ sg = &sgl->sg[sgl->list_pos];
+ if (len < sg->length - sgl->page_pos)
+ to_read = len;
+ else
+ to_read = sg->length - sgl->page_pos;
+
+ ret = ubi_eba_read_leb(ubi, vol, lnum,
+ sg_virt(sg) + sgl->page_pos, offset,
+ to_read, check);
+ if (ret < 0)
+ return ret;
+
+ offset += to_read;
+ len -= to_read;
+ if (!len) {
+ sgl->page_pos += to_read;
+ if (sgl->page_pos == sg->length) {
+ sgl->list_pos++;
+ sgl->page_pos = 0;
+ }
+
+ break;
+ }
+
+ sgl->list_pos++;
+ sgl->page_pos = 0;
+ }
+
+ return ret;
+}
+
+/**
+ * try_recover_peb - try to recover from write failure.
+ * @vol: volume description object
+ * @pnum: the physical eraseblock to recover
+ * @lnum: logical eraseblock number
+ * @buf: data which was not written because of the write failure
+ * @offset: offset of the failed write
+ * @len: how many bytes should have been written
+ * @vidb: VID buffer
+ * @retry: whether the caller should retry in case of failure
+ *
+ * This function is called in case of a write failure and moves all good data
+ * from the potentially bad physical eraseblock to a good physical eraseblock.
+ * This function also writes the data which was not written due to the failure.
+ * Returns 0 in case of success, and a negative error code in case of failure.
+ * In case of failure, the %retry parameter is set to false if this is a fatal
+ * error (retrying won't help), and true otherwise.
+ */
+static int try_recover_peb(struct ubi_volume *vol, int pnum, int lnum,
+ const void *buf, int offset, int len,
+ struct ubi_vid_io_buf *vidb, bool *retry)
+{
+ struct ubi_device *ubi = vol->ubi;
+ struct ubi_vid_hdr *vid_hdr;
+ int new_pnum, err, vol_id = vol->vol_id, data_size;
+ uint32_t crc;
+
+ *retry = false;
+
+ new_pnum = ubi_wl_get_peb(ubi);
+ if (new_pnum < 0) {
+ err = new_pnum;
+ goto out_put;
+ }
+
+ ubi_msg(ubi, "recover PEB %d, move data to PEB %d",
+ pnum, new_pnum);
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1);
+ if (err && err != UBI_IO_BITFLIPS) {
+ if (err > 0)
+ err = -EIO;
+ goto out_put;
+ }
+
+ vid_hdr = ubi_get_vid_hdr(vidb);
+ ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
+
+ mutex_lock(&ubi->buf_mutex);
+ memset(ubi->peb_buf + offset, 0xFF, len);
+
+ /* Read everything before the area where the write failure happened */
+ if (offset > 0) {
+ err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
+ if (err && err != UBI_IO_BITFLIPS)
+ goto out_unlock;
+ }
+
+ *retry = true;
+
+ memcpy(ubi->peb_buf + offset, buf, len);
+
+ data_size = offset + len;
+ crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ vid_hdr->copy_flag = 1;
+ vid_hdr->data_size = cpu_to_be32(data_size);
+ vid_hdr->data_crc = cpu_to_be32(crc);
+ err = ubi_io_write_vid_hdr(ubi, new_pnum, vidb);
+ if (err)
+ goto out_unlock;
+
+ err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
+
+out_unlock:
+ mutex_unlock(&ubi->buf_mutex);
+
+ if (!err)
+ vol->eba_tbl->entries[lnum].pnum = new_pnum;
+
+out_put:
+ up_read(&ubi->fm_eba_sem);
+
+ if (!err) {
+ ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
+ ubi_msg(ubi, "data was successfully recovered");
+ } else if (new_pnum >= 0) {
+ /*
+ * Bad luck? This physical eraseblock is bad too? Crud. Let's
+ * try to get another one.
+ */
+ ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
+ ubi_warn(ubi, "failed to write to PEB %d", new_pnum);
+ }
+
+ return err;
+}
+
+/**
+ * recover_peb - recover from write failure.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock to recover
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ * @buf: data which was not written because of the write failure
+ * @offset: offset of the failed write
+ * @len: how many bytes should have been written
+ *
+ * This function is called in case of a write failure and moves all good data
+ * from the potentially bad physical eraseblock to a good physical eraseblock.
+ * This function also writes the data which was not written due to the failure.
+ * Returns 0 in case of success, and a negative error code in case of failure.
+ * This function tries %UBI_IO_RETRIES before giving up.
+ */
+static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
+ const void *buf, int offset, int len)
+{
+ int err, idx = vol_id2idx(ubi, vol_id), tries;
+ struct ubi_volume *vol = ubi->volumes[idx];
+ struct ubi_vid_io_buf *vidb;
+
+ vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
+ if (!vidb)
+ return -ENOMEM;
+
+ for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
+ bool retry;
+
+ err = try_recover_peb(vol, pnum, lnum, buf, offset, len, vidb,
+ &retry);
+ if (!err || !retry)
+ break;
+
+ ubi_msg(ubi, "try again");
+ }
+
+ ubi_free_vid_buf(vidb);
+
+ return err;
+}
+
+/**
+ * try_write_vid_and_data - try to write VID header and data to a new PEB.
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @vidb: the VID buffer to write
+ * @buf: buffer containing the data
+ * @offset: where to start writing data
+ * @len: how many bytes should be written
+ *
+ * This function tries to write VID header and data belonging to logical
+ * eraseblock @lnum of volume @vol to a new physical eraseblock. Returns zero
+ * in case of success and a negative error code in case of failure.
+ * In case of error, it is possible that something was still written to the
+ * flash media, but may be some garbage.
+ */
+static int try_write_vid_and_data(struct ubi_volume *vol, int lnum,
+ struct ubi_vid_io_buf *vidb, const void *buf,
+ int offset, int len)
+{
+ struct ubi_device *ubi = vol->ubi;
+ int pnum, opnum, err, err2, vol_id = vol->vol_id;
+
+ pnum = ubi_wl_get_peb(ubi);
+ if (pnum < 0) {
+ err = pnum;
+ goto out_put;
+ }
+
+ opnum = vol->eba_tbl->entries[lnum].pnum;
+
+ dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
+ len, offset, vol_id, lnum, pnum);
+
+ err = ubi_io_write_vid_hdr(ubi, pnum, vidb);
+ if (err) {
+ ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
+ vol_id, lnum, pnum);
+ goto out_put;
+ }
+
+ if (len) {
+ err = ubi_io_write_data(ubi, buf, pnum, offset, len);
+ if (err) {
+ ubi_warn(ubi,
+ "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
+ len, offset, vol_id, lnum, pnum);
+ goto out_put;
+ }
+ }
+
+ vol->eba_tbl->entries[lnum].pnum = pnum;
+
+out_put:
+ up_read(&ubi->fm_eba_sem);
+
+ if (err && pnum >= 0) {
+ err2 = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
+ if (err2) {
+ ubi_warn(ubi, "failed to return physical eraseblock %d, error %d",
+ pnum, err2);
+ }
+ } else if (!err && opnum >= 0) {
+ err2 = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0);
+ if (err2) {
+ ubi_warn(ubi, "failed to return physical eraseblock %d, error %d",
+ opnum, err2);
+ }
+ }
+
+ return err;
+}
+
+/**
+ * ubi_eba_write_leb - write data to dynamic volume.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @buf: the data to write
+ * @offset: offset within the logical eraseblock where to write
+ * @len: how many bytes to write
+ *
+ * This function writes data to logical eraseblock @lnum of a dynamic volume
+ * @vol. Returns zero in case of success and a negative error code in case
+ * of failure. In case of error, it is possible that something was still
+ * written to the flash media, but may be some garbage.
+ * This function retries %UBI_IO_RETRIES times before giving up.
+ */
+int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ const void *buf, int offset, int len)
+{
+ int err, pnum, tries, vol_id = vol->vol_id;
+ struct ubi_vid_io_buf *vidb;
+ struct ubi_vid_hdr *vid_hdr;
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ err = leb_write_lock(ubi, vol_id, lnum);
+ if (err)
+ return err;
+
+ pnum = vol->eba_tbl->entries[lnum].pnum;
+ if (pnum >= 0) {
+ err = check_mapping(ubi, vol, lnum, &pnum);
+ if (err < 0)
+ goto out;
+ }
+
+ if (pnum >= 0) {
+ dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
+ len, offset, vol_id, lnum, pnum);
+
+ err = ubi_io_write_data(ubi, buf, pnum, offset, len);
+ if (err) {
+ ubi_warn(ubi, "failed to write data to PEB %d", pnum);
+ if (err == -EIO && ubi->bad_allowed)
+ err = recover_peb(ubi, pnum, vol_id, lnum, buf,
+ offset, len);
+ }
+
+ goto out;
+ }
+
+ /*
+ * The logical eraseblock is not mapped. We have to get a free physical
+ * eraseblock and write the volume identifier header there first.
+ */
+ vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
+ if (!vidb) {
+ leb_write_unlock(ubi, vol_id, lnum);
+ return -ENOMEM;
+ }
+
+ vid_hdr = ubi_get_vid_hdr(vidb);
+
+ vid_hdr->vol_type = UBI_VID_DYNAMIC;
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ vid_hdr->vol_id = cpu_to_be32(vol_id);
+ vid_hdr->lnum = cpu_to_be32(lnum);
+ vid_hdr->compat = ubi_get_compat(ubi, vol_id);
+ vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
+
+ for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
+ err = try_write_vid_and_data(vol, lnum, vidb, buf, offset, len);
+ if (err != -EIO || !ubi->bad_allowed)
+ break;
+
+ /*
+ * Fortunately, this is the first write operation to this
+ * physical eraseblock, so just put it and request a new one.
+ * We assume that if this physical eraseblock went bad, the
+ * erase code will handle that.
+ */
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ ubi_msg(ubi, "try another PEB");
+ }
+
+ ubi_free_vid_buf(vidb);
+
+out:
+ if (err)
+ ubi_ro_mode(ubi);
+
+ leb_write_unlock(ubi, vol_id, lnum);
+
+ return err;
+}
+
+/**
+ * ubi_eba_write_leb_st - write data to static volume.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @buf: data to write
+ * @len: how many bytes to write
+ * @used_ebs: how many logical eraseblocks will this volume contain
+ *
+ * This function writes data to logical eraseblock @lnum of static volume
+ * @vol. The @used_ebs argument should contain total number of logical
+ * eraseblock in this static volume.
+ *
+ * When writing to the last logical eraseblock, the @len argument doesn't have
+ * to be aligned to the minimal I/O unit size. Instead, it has to be equivalent
+ * to the real data size, although the @buf buffer has to contain the
+ * alignment. In all other cases, @len has to be aligned.
+ *
+ * It is prohibited to write more than once to logical eraseblocks of static
+ * volumes. This function returns zero in case of success and a negative error
+ * code in case of failure.
+ */
+int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum, const void *buf, int len, int used_ebs)
+{
+ int err, tries, data_size = len, vol_id = vol->vol_id;
+ struct ubi_vid_io_buf *vidb;
+ struct ubi_vid_hdr *vid_hdr;
+ uint32_t crc;
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ if (lnum == used_ebs - 1)
+ /* If this is the last LEB @len may be unaligned */
+ len = ALIGN(data_size, ubi->min_io_size);
+ else
+ ubi_assert(!(len & (ubi->min_io_size - 1)));
+
+ vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
+ if (!vidb)
+ return -ENOMEM;
+
+ vid_hdr = ubi_get_vid_hdr(vidb);
+
+ err = leb_write_lock(ubi, vol_id, lnum);
+ if (err)
+ goto out;
+
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ vid_hdr->vol_id = cpu_to_be32(vol_id);
+ vid_hdr->lnum = cpu_to_be32(lnum);
+ vid_hdr->compat = ubi_get_compat(ubi, vol_id);
+ vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
+
+ crc = crc32(UBI_CRC32_INIT, buf, data_size);
+ vid_hdr->vol_type = UBI_VID_STATIC;
+ vid_hdr->data_size = cpu_to_be32(data_size);
+ vid_hdr->used_ebs = cpu_to_be32(used_ebs);
+ vid_hdr->data_crc = cpu_to_be32(crc);
+
+ ubi_assert(vol->eba_tbl->entries[lnum].pnum < 0);
+
+ for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
+ err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len);
+ if (err != -EIO || !ubi->bad_allowed)
+ break;
+
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ ubi_msg(ubi, "try another PEB");
+ }
+
+ if (err)
+ ubi_ro_mode(ubi);
+
+ leb_write_unlock(ubi, vol_id, lnum);
+
+out:
+ ubi_free_vid_buf(vidb);
+
+ return err;
+}
+
+/*
+ * ubi_eba_atomic_leb_change - change logical eraseblock atomically.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @buf: data to write
+ * @len: how many bytes to write
+ *
+ * This function changes the contents of a logical eraseblock atomically. @buf
+ * has to contain new logical eraseblock data, and @len - the length of the
+ * data, which has to be aligned. This function guarantees that in case of an
+ * unclean reboot the old contents is preserved. Returns zero in case of
+ * success and a negative error code in case of failure.
+ *
+ * UBI reserves one LEB for the "atomic LEB change" operation, so only one
+ * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
+ */
+int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum, const void *buf, int len)
+{
+ int err, tries, vol_id = vol->vol_id;
+ struct ubi_vid_io_buf *vidb;
+ struct ubi_vid_hdr *vid_hdr;
+ uint32_t crc;
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ if (len == 0) {
+ /*
+ * Special case when data length is zero. In this case the LEB
+ * has to be unmapped and mapped somewhere else.
+ */
+ err = ubi_eba_unmap_leb(ubi, vol, lnum);
+ if (err)
+ return err;
+ return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
+ }
+
+ vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
+ if (!vidb)
+ return -ENOMEM;
+
+ vid_hdr = ubi_get_vid_hdr(vidb);
+
+ mutex_lock(&ubi->alc_mutex);
+ err = leb_write_lock(ubi, vol_id, lnum);
+ if (err)
+ goto out_mutex;
+
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ vid_hdr->vol_id = cpu_to_be32(vol_id);
+ vid_hdr->lnum = cpu_to_be32(lnum);
+ vid_hdr->compat = ubi_get_compat(ubi, vol_id);
+ vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
+
+ crc = crc32(UBI_CRC32_INIT, buf, len);
+ vid_hdr->vol_type = UBI_VID_DYNAMIC;
+ vid_hdr->data_size = cpu_to_be32(len);
+ vid_hdr->copy_flag = 1;
+ vid_hdr->data_crc = cpu_to_be32(crc);
+
+ dbg_eba("change LEB %d:%d", vol_id, lnum);
+
+ for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
+ err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len);
+ if (err != -EIO || !ubi->bad_allowed)
+ break;
+
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ ubi_msg(ubi, "try another PEB");
+ }
+
+ /*
+ * This flash device does not admit of bad eraseblocks or
+ * something nasty and unexpected happened. Switch to read-only
+ * mode just in case.
+ */
+ if (err)
+ ubi_ro_mode(ubi);
+
+ leb_write_unlock(ubi, vol_id, lnum);
+
+out_mutex:
+ mutex_unlock(&ubi->alc_mutex);
+ ubi_free_vid_buf(vidb);
+ return err;
+}
+
+/**
+ * is_error_sane - check whether a read error is sane.
+ * @err: code of the error happened during reading
+ *
+ * This is a helper function for 'ubi_eba_copy_leb()' which is called when we
+ * cannot read data from the target PEB (an error @err happened). If the error
+ * code is sane, then we treat this error as non-fatal. Otherwise the error is
+ * fatal and UBI will be switched to R/O mode later.
+ *
+ * The idea is that we try not to switch to R/O mode if the read error is
+ * something which suggests there was a real read problem. E.g., %-EIO. Or a
+ * memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O
+ * mode, simply because we do not know what happened at the MTD level, and we
+ * cannot handle this. E.g., the underlying driver may have become crazy, and
+ * it is safer to switch to R/O mode to preserve the data.
+ *
+ * And bear in mind, this is about reading from the target PEB, i.e. the PEB
+ * which we have just written.
+ */
+static int is_error_sane(int err)
+{
+ if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR ||
+ err == UBI_IO_BAD_HDR_EBADMSG || err == -ETIMEDOUT)
+ return 0;
+ return 1;
+}
+
+/**
+ * ubi_eba_copy_leb - copy logical eraseblock.
+ * @ubi: UBI device description object
+ * @from: physical eraseblock number from where to copy
+ * @to: physical eraseblock number where to copy
+ * @vidb: data structure from where the VID header is derived
+ *
+ * This function copies logical eraseblock from physical eraseblock @from to
+ * physical eraseblock @to. The @vid_hdr buffer may be changed by this
+ * function. Returns:
+ * o %0 in case of success;
+ * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_TARGET_BITFLIPS, etc;
+ * o a negative error code in case of failure.
+ */
+int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
+ struct ubi_vid_io_buf *vidb)
+{
+ int err, vol_id, lnum, data_size, aldata_size, idx;
+ struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb);
+ struct ubi_volume *vol;
+ uint32_t crc;
+
+ ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
+
+ vol_id = be32_to_cpu(vid_hdr->vol_id);
+ lnum = be32_to_cpu(vid_hdr->lnum);
+
+ dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
+
+ if (vid_hdr->vol_type == UBI_VID_STATIC) {
+ data_size = be32_to_cpu(vid_hdr->data_size);
+ aldata_size = ALIGN(data_size, ubi->min_io_size);
+ } else
+ data_size = aldata_size =
+ ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
+
+ idx = vol_id2idx(ubi, vol_id);
+ spin_lock(&ubi->volumes_lock);
+ /*
+ * Note, we may race with volume deletion, which means that the volume
+ * this logical eraseblock belongs to might be being deleted. Since the
+ * volume deletion un-maps all the volume's logical eraseblocks, it will
+ * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
+ */
+ vol = ubi->volumes[idx];
+ spin_unlock(&ubi->volumes_lock);
+ if (!vol) {
+ /* No need to do further work, cancel */
+ dbg_wl("volume %d is being removed, cancel", vol_id);
+ return MOVE_CANCEL_RACE;
+ }
+
+ /*
+ * We do not want anybody to write to this logical eraseblock while we
+ * are moving it, so lock it.
+ *
+ * Note, we are using non-waiting locking here, because we cannot sleep
+ * on the LEB, since it may cause deadlocks. Indeed, imagine a task is
+ * unmapping the LEB which is mapped to the PEB we are going to move
+ * (@from). This task locks the LEB and goes sleep in the
+ * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
+ * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
+ * LEB is already locked, we just do not move it and return
+ * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
+ * we do not know the reasons of the contention - it may be just a
+ * normal I/O on this LEB, so we want to re-try.
+ */
+ err = leb_write_trylock(ubi, vol_id, lnum);
+ if (err) {
+ dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
+ return MOVE_RETRY;
+ }
+
+ /*
+ * The LEB might have been put meanwhile, and the task which put it is
+ * probably waiting on @ubi->move_mutex. No need to continue the work,
+ * cancel it.
+ */
+ if (vol->eba_tbl->entries[lnum].pnum != from) {
+ dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel",
+ vol_id, lnum, from, vol->eba_tbl->entries[lnum].pnum);
+ err = MOVE_CANCEL_RACE;
+ goto out_unlock_leb;
+ }
+
+ /*
+ * OK, now the LEB is locked and we can safely start moving it. Since
+ * this function utilizes the @ubi->peb_buf buffer which is shared
+ * with some other functions - we lock the buffer by taking the
+ * @ubi->buf_mutex.
+ */
+ mutex_lock(&ubi->buf_mutex);
+ dbg_wl("read %d bytes of data", aldata_size);
+ err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size);
+ if (err && err != UBI_IO_BITFLIPS) {
+ ubi_warn(ubi, "error %d while reading data from PEB %d",
+ err, from);
+ err = MOVE_SOURCE_RD_ERR;
+ goto out_unlock_buf;
+ }
+
+ /*
+ * Now we have got to calculate how much data we have to copy. In
+ * case of a static volume it is fairly easy - the VID header contains
+ * the data size. In case of a dynamic volume it is more difficult - we
+ * have to read the contents, cut 0xFF bytes from the end and copy only
+ * the first part. We must do this to avoid writing 0xFF bytes as it
+ * may have some side-effects. And not only this. It is important not
+ * to include those 0xFFs to CRC because later the they may be filled
+ * by data.
+ */
+ if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
+ aldata_size = data_size =
+ ubi_calc_data_len(ubi, ubi->peb_buf, data_size);
+
+ cond_resched();
+ crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
+ cond_resched();
+
+ /*
+ * It may turn out to be that the whole @from physical eraseblock
+ * contains only 0xFF bytes. Then we have to only write the VID header
+ * and do not write any data. This also means we should not set
+ * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc.
+ */
+ if (data_size > 0) {
+ vid_hdr->copy_flag = 1;
+ vid_hdr->data_size = cpu_to_be32(data_size);
+ vid_hdr->data_crc = cpu_to_be32(crc);
+ }
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+
+ err = ubi_io_write_vid_hdr(ubi, to, vidb);
+ if (err) {
+ if (err == -EIO)
+ err = MOVE_TARGET_WR_ERR;
+ goto out_unlock_buf;
+ }
+
+ cond_resched();
+
+ /* Read the VID header back and check if it was written correctly */
+ err = ubi_io_read_vid_hdr(ubi, to, vidb, 1);
+ if (err) {
+ if (err != UBI_IO_BITFLIPS) {
+ ubi_warn(ubi, "error %d while reading VID header back from PEB %d",
+ err, to);
+ if (is_error_sane(err))
+ err = MOVE_TARGET_RD_ERR;
+ } else
+ err = MOVE_TARGET_BITFLIPS;
+ goto out_unlock_buf;
+ }
+
+ if (data_size > 0) {
+ err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size);
+ if (err) {
+ if (err == -EIO)
+ err = MOVE_TARGET_WR_ERR;
+ goto out_unlock_buf;
+ }
+
+ cond_resched();
+ }
+
+ ubi_assert(vol->eba_tbl->entries[lnum].pnum == from);
+ vol->eba_tbl->entries[lnum].pnum = to;
+
+out_unlock_buf:
+ mutex_unlock(&ubi->buf_mutex);
+out_unlock_leb:
+ leb_write_unlock(ubi, vol_id, lnum);
+ return err;
+}
+
+/**
+ * print_rsvd_warning - warn about not having enough reserved PEBs.
+ * @ubi: UBI device description object
+ * @ai: UBI attach info object
+ *
+ * This is a helper function for 'ubi_eba_init()' which is called when UBI
+ * cannot reserve enough PEBs for bad block handling. This function makes a
+ * decision whether we have to print a warning or not. The algorithm is as
+ * follows:
+ * o if this is a new UBI image, then just print the warning
+ * o if this is an UBI image which has already been used for some time, print
+ * a warning only if we can reserve less than 10% of the expected amount of
+ * the reserved PEB.
+ *
+ * The idea is that when UBI is used, PEBs become bad, and the reserved pool
+ * of PEBs becomes smaller, which is normal and we do not want to scare users
+ * with a warning every time they attach the MTD device. This was an issue
+ * reported by real users.
+ */
+static void print_rsvd_warning(struct ubi_device *ubi,
+ struct ubi_attach_info *ai)
+{
+ /*
+ * The 1 << 18 (256KiB) number is picked randomly, just a reasonably
+ * large number to distinguish between newly flashed and used images.
+ */
+ if (ai->max_sqnum > (1 << 18)) {
+ int min = ubi->beb_rsvd_level / 10;
+
+ if (!min)
+ min = 1;
+ if (ubi->beb_rsvd_pebs > min)
+ return;
+ }
+
+ ubi_warn(ubi, "cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
+ ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
+ if (ubi->corr_peb_count)
+ ubi_warn(ubi, "%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
+}
+
+/**
+ * self_check_eba - run a self check on the EBA table constructed by fastmap.
+ * @ubi: UBI device description object
+ * @ai_fastmap: UBI attach info object created by fastmap
+ * @ai_scan: UBI attach info object created by scanning
+ *
+ * Returns < 0 in case of an internal error, 0 otherwise.
+ * If a bad EBA table entry was found it will be printed out and
+ * ubi_assert() triggers.
+ */
+int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
+ struct ubi_attach_info *ai_scan)
+{
+ int i, j, num_volumes, ret = 0;
+ int **scan_eba, **fm_eba;
+ struct ubi_ainf_volume *av;
+ struct ubi_volume *vol;
+ struct ubi_ainf_peb *aeb;
+ struct rb_node *rb;
+
+ num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
+
+ scan_eba = kmalloc_array(num_volumes, sizeof(*scan_eba), GFP_KERNEL);
+ if (!scan_eba)
+ return -ENOMEM;
+
+ fm_eba = kmalloc_array(num_volumes, sizeof(*fm_eba), GFP_KERNEL);
+ if (!fm_eba) {
+ kfree(scan_eba);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_volumes; i++) {
+ vol = ubi->volumes[i];
+ if (!vol)
+ continue;
+
+ scan_eba[i] = kmalloc_array(vol->reserved_pebs,
+ sizeof(**scan_eba),
+ GFP_KERNEL);
+ if (!scan_eba[i]) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ fm_eba[i] = kmalloc_array(vol->reserved_pebs,
+ sizeof(**fm_eba),
+ GFP_KERNEL);
+ if (!fm_eba[i]) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ for (j = 0; j < vol->reserved_pebs; j++)
+ scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED;
+
+ av = ubi_find_av(ai_scan, idx2vol_id(ubi, i));
+ if (!av)
+ continue;
+
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
+ scan_eba[i][aeb->lnum] = aeb->pnum;
+
+ av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i));
+ if (!av)
+ continue;
+
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
+ fm_eba[i][aeb->lnum] = aeb->pnum;
+
+ for (j = 0; j < vol->reserved_pebs; j++) {
+ if (scan_eba[i][j] != fm_eba[i][j]) {
+ if (scan_eba[i][j] == UBI_LEB_UNMAPPED ||
+ fm_eba[i][j] == UBI_LEB_UNMAPPED)
+ continue;
+
+ ubi_err(ubi, "LEB:%i:%i is PEB:%i instead of %i!",
+ vol->vol_id, j, fm_eba[i][j],
+ scan_eba[i][j]);
+ ubi_assert(0);
+ }
+ }
+ }
+
+out_free:
+ for (i = 0; i < num_volumes; i++) {
+ if (!ubi->volumes[i])
+ continue;
+
+ kfree(scan_eba[i]);
+ kfree(fm_eba[i]);
+ }
+
+ kfree(scan_eba);
+ kfree(fm_eba);
+ return ret;
+}
+
+/**
+ * ubi_eba_init - initialize the EBA sub-system using attaching information.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
+{
+ int i, err, num_volumes;
+ struct ubi_ainf_volume *av;
+ struct ubi_volume *vol;
+ struct ubi_ainf_peb *aeb;
+ struct rb_node *rb;
+
+ dbg_eba("initialize EBA sub-system");
+
+ spin_lock_init(&ubi->ltree_lock);
+ mutex_init(&ubi->alc_mutex);
+ ubi->ltree = RB_ROOT;
+
+ ubi->global_sqnum = ai->max_sqnum + 1;
+ num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
+
+ for (i = 0; i < num_volumes; i++) {
+ struct ubi_eba_table *tbl;
+
+ vol = ubi->volumes[i];
+ if (!vol)
+ continue;
+
+ cond_resched();
+
+ tbl = ubi_eba_create_table(vol, vol->reserved_pebs);
+ if (IS_ERR(tbl)) {
+ err = PTR_ERR(tbl);
+ goto out_free;
+ }
+
+ ubi_eba_replace_table(vol, tbl);
+
+ av = ubi_find_av(ai, idx2vol_id(ubi, i));
+ if (!av)
+ continue;
+
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
+ if (aeb->lnum >= vol->reserved_pebs) {
+ /*
+ * This may happen in case of an unclean reboot
+ * during re-size.
+ */
+ ubi_move_aeb_to_list(av, aeb, &ai->erase);
+ } else {
+ struct ubi_eba_entry *entry;
+
+ entry = &vol->eba_tbl->entries[aeb->lnum];
+ entry->pnum = aeb->pnum;
+ }
+ }
+ }
+
+ if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
+ ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
+ ubi->avail_pebs, EBA_RESERVED_PEBS);
+ if (ubi->corr_peb_count)
+ ubi_err(ubi, "%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
+ err = -ENOSPC;
+ goto out_free;
+ }
+ ubi->avail_pebs -= EBA_RESERVED_PEBS;
+ ubi->rsvd_pebs += EBA_RESERVED_PEBS;
+
+ if (ubi->bad_allowed) {
+ ubi_calculate_reserved(ubi);
+
+ if (ubi->avail_pebs < ubi->beb_rsvd_level) {
+ /* No enough free physical eraseblocks */
+ ubi->beb_rsvd_pebs = ubi->avail_pebs;
+ print_rsvd_warning(ubi, ai);
+ } else
+ ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
+
+ ubi->avail_pebs -= ubi->beb_rsvd_pebs;
+ ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
+ }
+
+ dbg_eba("EBA sub-system is initialized");
+ return 0;
+
+out_free:
+ for (i = 0; i < num_volumes; i++) {
+ if (!ubi->volumes[i])
+ continue;
+ ubi_eba_replace_table(ubi->volumes[i], NULL);
+ }
+ return err;
+}
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
new file mode 100644
index 0000000000..863f571f1a
--- /dev/null
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -0,0 +1,490 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012 Linutronix GmbH
+ * Copyright (c) 2014 sigma star gmbh
+ * Author: Richard Weinberger <richard@nod.at>
+ */
+
+/**
+ * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
+ * @wrk: the work description object
+ */
+static void update_fastmap_work_fn(struct work_struct *wrk)
+{
+ struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
+
+ ubi_update_fastmap(ubi);
+ spin_lock(&ubi->wl_lock);
+ ubi->fm_work_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+}
+
+/**
+ * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
+ * @root: the RB-tree where to look for
+ */
+static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
+{
+ struct rb_node *p;
+ struct ubi_wl_entry *e, *victim = NULL;
+ int max_ec = UBI_MAX_ERASECOUNTER;
+
+ ubi_rb_for_each_entry(p, e, root, u.rb) {
+ if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
+ victim = e;
+ max_ec = e->ec;
+ }
+ }
+
+ return victim;
+}
+
+static inline void return_unused_peb(struct ubi_device *ubi,
+ struct ubi_wl_entry *e)
+{
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+}
+
+/**
+ * return_unused_pool_pebs - returns unused PEB to the free tree.
+ * @ubi: UBI device description object
+ * @pool: fastmap pool description object
+ */
+static void return_unused_pool_pebs(struct ubi_device *ubi,
+ struct ubi_fm_pool *pool)
+{
+ int i;
+ struct ubi_wl_entry *e;
+
+ for (i = pool->used; i < pool->size; i++) {
+ e = ubi->lookuptbl[pool->pebs[i]];
+ return_unused_peb(ubi, e);
+ }
+}
+
+/**
+ * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
+ * @ubi: UBI device description object
+ * @anchor: This PEB will be used as anchor PEB by fastmap
+ *
+ * The function returns a physical erase block with a given maximal number
+ * and removes it from the wl subsystem.
+ * Must be called with wl_lock held!
+ */
+struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
+{
+ struct ubi_wl_entry *e = NULL;
+
+ if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
+ goto out;
+
+ if (anchor)
+ e = find_anchor_wl_entry(&ubi->free);
+ else
+ e = find_mean_wl_entry(ubi, &ubi->free);
+
+ if (!e)
+ goto out;
+
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+
+ /* remove it from the free list,
+ * the wl subsystem does no longer know this erase block */
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+out:
+ return e;
+}
+
+/*
+ * has_enough_free_count - whether ubi has enough free pebs to fill fm pools
+ * @ubi: UBI device description object
+ * @is_wl_pool: whether UBI is filling wear leveling pool
+ *
+ * This helper function checks whether there are enough free pebs (deducted
+ * by fastmap pebs) to fill fm_pool and fm_wl_pool, above rule works after
+ * there is at least one of free pebs is filled into fm_wl_pool.
+ * For wear leveling pool, UBI should also reserve free pebs for bad pebs
+ * handling, because there maybe no enough free pebs for user volumes after
+ * producing new bad pebs.
+ */
+static bool has_enough_free_count(struct ubi_device *ubi, bool is_wl_pool)
+{
+ int fm_used = 0; // fastmap non anchor pebs.
+ int beb_rsvd_pebs;
+
+ if (!ubi->free.rb_node)
+ return false;
+
+ beb_rsvd_pebs = is_wl_pool ? ubi->beb_rsvd_pebs : 0;
+ if (ubi->fm_wl_pool.size > 0 && !(ubi->ro_mode || ubi->fm_disabled))
+ fm_used = ubi->fm_size / ubi->leb_size - 1;
+
+ return ubi->free_count - beb_rsvd_pebs > fm_used;
+}
+
+/**
+ * ubi_refill_pools - refills all fastmap PEB pools.
+ * @ubi: UBI device description object
+ */
+void ubi_refill_pools(struct ubi_device *ubi)
+{
+ struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
+ struct ubi_fm_pool *pool = &ubi->fm_pool;
+ struct ubi_wl_entry *e;
+ int enough;
+
+ spin_lock(&ubi->wl_lock);
+
+ return_unused_pool_pebs(ubi, wl_pool);
+ return_unused_pool_pebs(ubi, pool);
+
+ wl_pool->size = 0;
+ pool->size = 0;
+
+ if (ubi->fm_anchor) {
+ wl_tree_add(ubi->fm_anchor, &ubi->free);
+ ubi->free_count++;
+ ubi->fm_anchor = NULL;
+ }
+
+ if (!ubi->fm_disabled)
+ /*
+ * All available PEBs are in ubi->free, now is the time to get
+ * the best anchor PEBs.
+ */
+ ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
+
+ for (;;) {
+ enough = 0;
+ if (pool->size < pool->max_size) {
+ if (!has_enough_free_count(ubi, false))
+ break;
+
+ e = wl_get_wle(ubi);
+ if (!e)
+ break;
+
+ pool->pebs[pool->size] = e->pnum;
+ pool->size++;
+ } else
+ enough++;
+
+ if (wl_pool->size < wl_pool->max_size) {
+ if (!has_enough_free_count(ubi, true))
+ break;
+
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+
+ wl_pool->pebs[wl_pool->size] = e->pnum;
+ wl_pool->size++;
+ } else
+ enough++;
+
+ if (enough == 2)
+ break;
+ }
+
+ wl_pool->used = 0;
+ pool->used = 0;
+
+ spin_unlock(&ubi->wl_lock);
+}
+
+/**
+ * produce_free_peb - produce a free physical eraseblock.
+ * @ubi: UBI device description object
+ *
+ * This function tries to make a free PEB by means of synchronous execution of
+ * pending works. This may be needed if, for example the background thread is
+ * disabled. Returns zero in case of success and a negative error code in case
+ * of failure.
+ */
+static int produce_free_peb(struct ubi_device *ubi)
+{
+ int err;
+
+ while (!ubi->free.rb_node && ubi->works_count) {
+ dbg_wl("do one work synchronously");
+ err = do_work(ubi);
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ubi_wl_get_peb - get a physical eraseblock.
+ * @ubi: UBI device description object
+ *
+ * This function returns a physical eraseblock in case of success and a
+ * negative error code in case of failure.
+ * Returns with ubi->fm_eba_sem held in read mode!
+ */
+int ubi_wl_get_peb(struct ubi_device *ubi)
+{
+ int ret, attempts = 0;
+ struct ubi_fm_pool *pool = &ubi->fm_pool;
+ struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
+
+again:
+ down_read(&ubi->fm_eba_sem);
+ spin_lock(&ubi->wl_lock);
+
+ /* We check here also for the WL pool because at this point we can
+ * refill the WL pool synchronous. */
+ if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->fm_eba_sem);
+ ret = ubi_update_fastmap(ubi);
+ if (ret) {
+ ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
+ down_read(&ubi->fm_eba_sem);
+ return -ENOSPC;
+ }
+ down_read(&ubi->fm_eba_sem);
+ spin_lock(&ubi->wl_lock);
+ }
+
+ if (pool->used == pool->size) {
+ spin_unlock(&ubi->wl_lock);
+ attempts++;
+ if (attempts == 10) {
+ ubi_err(ubi, "Unable to get a free PEB from user WL pool");
+ ret = -ENOSPC;
+ goto out;
+ }
+ up_read(&ubi->fm_eba_sem);
+ ret = produce_free_peb(ubi);
+ if (ret < 0) {
+ down_read(&ubi->fm_eba_sem);
+ goto out;
+ }
+ goto again;
+ }
+
+ ubi_assert(pool->used < pool->size);
+ ret = pool->pebs[pool->used++];
+ prot_queue_add(ubi, ubi->lookuptbl[ret]);
+ spin_unlock(&ubi->wl_lock);
+out:
+ return ret;
+}
+
+/**
+ * next_peb_for_wl - returns next PEB to be used internally by the
+ * WL sub-system.
+ *
+ * @ubi: UBI device description object
+ */
+static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi)
+{
+ struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
+ int pnum;
+
+ if (pool->used == pool->size)
+ return NULL;
+
+ pnum = pool->pebs[pool->used];
+ return ubi->lookuptbl[pnum];
+}
+
+/**
+ * need_wear_leveling - checks whether to trigger a wear leveling work.
+ * UBI fetches free PEB from wl_pool, we check free PEBs from both 'wl_pool'
+ * and 'ubi->free', because free PEB in 'ubi->free' tree maybe moved into
+ * 'wl_pool' by ubi_refill_pools().
+ *
+ * @ubi: UBI device description object
+ */
+static bool need_wear_leveling(struct ubi_device *ubi)
+{
+ int ec;
+ struct ubi_wl_entry *e;
+
+ if (!ubi->used.rb_node)
+ return false;
+
+ e = next_peb_for_wl(ubi);
+ if (!e) {
+ if (!ubi->free.rb_node)
+ return false;
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ ec = e->ec;
+ } else {
+ ec = e->ec;
+ if (ubi->free.rb_node) {
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ ec = max(ec, e->ec);
+ }
+ }
+ e = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
+
+ return ec - e->ec >= UBI_WL_THRESHOLD;
+}
+
+/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
+ *
+ * @ubi: UBI device description object
+ */
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
+{
+ struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
+ int pnum;
+
+ ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
+
+ if (pool->used == pool->size) {
+ /* We cannot update the fastmap here because this
+ * function is called in atomic context.
+ * Let's fail here and refill/update it as soon as possible. */
+ if (!ubi->fm_work_scheduled) {
+ ubi->fm_work_scheduled = 1;
+ schedule_work(&ubi->fm_work);
+ }
+ return NULL;
+ }
+
+ pnum = pool->pebs[pool->used++];
+ return ubi->lookuptbl[pnum];
+}
+
+/**
+ * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
+ * @ubi: UBI device description object
+ */
+int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
+{
+ struct ubi_work *wrk;
+ struct ubi_wl_entry *anchor;
+
+ spin_lock(&ubi->wl_lock);
+
+ /* Do we already have an anchor? */
+ if (ubi->fm_anchor) {
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ }
+
+ /* See if we can find an anchor PEB on the list of free PEBs */
+ anchor = ubi_wl_get_fm_peb(ubi, 1);
+ if (anchor) {
+ ubi->fm_anchor = anchor;
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ }
+
+ ubi->fm_do_produce_anchor = 1;
+ /* No luck, trigger wear leveling to produce a new anchor PEB. */
+ if (ubi->wl_scheduled) {
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ }
+ ubi->wl_scheduled = 1;
+ spin_unlock(&ubi->wl_lock);
+
+ wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
+ if (!wrk) {
+ spin_lock(&ubi->wl_lock);
+ ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+ return -ENOMEM;
+ }
+
+ wrk->func = &wear_leveling_worker;
+ __schedule_ubi_work(ubi, wrk);
+ return 0;
+}
+
+/**
+ * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
+ * sub-system.
+ * see: ubi_wl_put_peb()
+ *
+ * @ubi: UBI device description object
+ * @fm_e: physical eraseblock to return
+ * @lnum: the last used logical eraseblock number for the PEB
+ * @torture: if this physical eraseblock has to be tortured
+ */
+int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
+ int lnum, int torture)
+{
+ struct ubi_wl_entry *e;
+ int vol_id, pnum = fm_e->pnum;
+
+ dbg_wl("PEB %d", pnum);
+
+ ubi_assert(pnum >= 0);
+ ubi_assert(pnum < ubi->peb_count);
+
+ spin_lock(&ubi->wl_lock);
+ e = ubi->lookuptbl[pnum];
+
+ /* This can happen if we recovered from a fastmap the very
+ * first time and writing now a new one. In this case the wl system
+ * has never seen any PEB used by the original fastmap.
+ */
+ if (!e) {
+ e = fm_e;
+ ubi_assert(e->ec >= 0);
+ ubi->lookuptbl[pnum] = e;
+ }
+
+ spin_unlock(&ubi->wl_lock);
+
+ vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
+ return schedule_erase(ubi, e, vol_id, lnum, torture, true);
+}
+
+/**
+ * ubi_is_erase_work - checks whether a work is erase work.
+ * @wrk: The work object to be checked
+ */
+int ubi_is_erase_work(struct ubi_work *wrk)
+{
+ return wrk->func == erase_worker;
+}
+
+static void ubi_fastmap_close(struct ubi_device *ubi)
+{
+ int i;
+
+ return_unused_pool_pebs(ubi, &ubi->fm_pool);
+ return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
+
+ if (ubi->fm_anchor) {
+ return_unused_peb(ubi, ubi->fm_anchor);
+ ubi->fm_anchor = NULL;
+ }
+
+ if (ubi->fm) {
+ for (i = 0; i < ubi->fm->used_blocks; i++)
+ kfree(ubi->fm->e[i]);
+ }
+ kfree(ubi->fm);
+}
+
+/**
+ * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
+ * See find_mean_wl_entry()
+ *
+ * @ubi: UBI device description object
+ * @e: physical eraseblock to return
+ * @root: RB tree to test against.
+ */
+static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
+ struct ubi_wl_entry *e,
+ struct rb_root *root) {
+ if (e && !ubi->fm_disabled && !ubi->fm &&
+ e->pnum < UBI_FM_MAX_START)
+ e = rb_entry(rb_next(root->rb_node),
+ struct ubi_wl_entry, u.rb);
+
+ return e;
+}
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
new file mode 100644
index 0000000000..28c8151a07
--- /dev/null
+++ b/drivers/mtd/ubi/fastmap.c
@@ -0,0 +1,1697 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012 Linutronix GmbH
+ * Copyright (c) 2014 sigma star gmbh
+ * Author: Richard Weinberger <richard@nod.at>
+ */
+
+#include <linux/crc32.h>
+#include <linux/bitmap.h>
+#include "ubi.h"
+
+/**
+ * init_seen - allocate memory for used for debugging.
+ * @ubi: UBI device description object
+ */
+static inline unsigned long *init_seen(struct ubi_device *ubi)
+{
+ unsigned long *ret;
+
+ if (!ubi_dbg_chk_fastmap(ubi))
+ return NULL;
+
+ ret = bitmap_zalloc(ubi->peb_count, GFP_KERNEL);
+ if (!ret)
+ return ERR_PTR(-ENOMEM);
+
+ return ret;
+}
+
+/**
+ * free_seen - free the seen logic integer array.
+ * @seen: integer array of @ubi->peb_count size
+ */
+static inline void free_seen(unsigned long *seen)
+{
+ bitmap_free(seen);
+}
+
+/**
+ * set_seen - mark a PEB as seen.
+ * @ubi: UBI device description object
+ * @pnum: The PEB to be makred as seen
+ * @seen: integer array of @ubi->peb_count size
+ */
+static inline void set_seen(struct ubi_device *ubi, int pnum, unsigned long *seen)
+{
+ if (!ubi_dbg_chk_fastmap(ubi) || !seen)
+ return;
+
+ set_bit(pnum, seen);
+}
+
+/**
+ * self_check_seen - check whether all PEB have been seen by fastmap.
+ * @ubi: UBI device description object
+ * @seen: integer array of @ubi->peb_count size
+ */
+static int self_check_seen(struct ubi_device *ubi, unsigned long *seen)
+{
+ int pnum, ret = 0;
+
+ if (!ubi_dbg_chk_fastmap(ubi) || !seen)
+ return 0;
+
+ for (pnum = 0; pnum < ubi->peb_count; pnum++) {
+ if (!test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
+ ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
+ * @ubi: UBI device description object
+ */
+size_t ubi_calc_fm_size(struct ubi_device *ubi)
+{
+ size_t size;
+
+ size = sizeof(struct ubi_fm_sb) +
+ sizeof(struct ubi_fm_hdr) +
+ sizeof(struct ubi_fm_scan_pool) +
+ sizeof(struct ubi_fm_scan_pool) +
+ (ubi->peb_count * sizeof(struct ubi_fm_ec)) +
+ (sizeof(struct ubi_fm_eba) +
+ (ubi->peb_count * sizeof(__be32))) +
+ sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
+ return roundup(size, ubi->leb_size);
+}
+
+
+/**
+ * new_fm_vbuf() - allocate a new volume header for fastmap usage.
+ * @ubi: UBI device description object
+ * @vol_id: the VID of the new header
+ *
+ * Returns a new struct ubi_vid_hdr on success.
+ * NULL indicates out of memory.
+ */
+static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id)
+{
+ struct ubi_vid_io_buf *new;
+ struct ubi_vid_hdr *vh;
+
+ new = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
+ if (!new)
+ goto out;
+
+ vh = ubi_get_vid_hdr(new);
+ vh->vol_type = UBI_VID_DYNAMIC;
+ vh->vol_id = cpu_to_be32(vol_id);
+
+ /* UBI implementations without fastmap support have to delete the
+ * fastmap.
+ */
+ vh->compat = UBI_COMPAT_DELETE;
+
+out:
+ return new;
+}
+
+/**
+ * add_aeb - create and add a attach erase block to a given list.
+ * @ai: UBI attach info object
+ * @list: the target list
+ * @pnum: PEB number of the new attach erase block
+ * @ec: erease counter of the new LEB
+ * @scrub: scrub this PEB after attaching
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
+ int pnum, int ec, int scrub)
+{
+ struct ubi_ainf_peb *aeb;
+
+ aeb = ubi_alloc_aeb(ai, pnum, ec);
+ if (!aeb)
+ return -ENOMEM;
+
+ aeb->lnum = -1;
+ aeb->scrub = scrub;
+ aeb->copy_flag = aeb->sqnum = 0;
+
+ ai->ec_sum += aeb->ec;
+ ai->ec_count++;
+
+ if (ai->max_ec < aeb->ec)
+ ai->max_ec = aeb->ec;
+
+ if (ai->min_ec > aeb->ec)
+ ai->min_ec = aeb->ec;
+
+ list_add_tail(&aeb->u.list, list);
+
+ return 0;
+}
+
+/**
+ * add_vol - create and add a new volume to ubi_attach_info.
+ * @ai: ubi_attach_info object
+ * @vol_id: VID of the new volume
+ * @used_ebs: number of used EBS
+ * @data_pad: data padding value of the new volume
+ * @vol_type: volume type
+ * @last_eb_bytes: number of bytes in the last LEB
+ *
+ * Returns the new struct ubi_ainf_volume on success.
+ * NULL indicates an error.
+ */
+static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
+ int used_ebs, int data_pad, u8 vol_type,
+ int last_eb_bytes)
+{
+ struct ubi_ainf_volume *av;
+
+ av = ubi_add_av(ai, vol_id);
+ if (IS_ERR(av))
+ return av;
+
+ av->data_pad = data_pad;
+ av->last_data_size = last_eb_bytes;
+ av->compat = 0;
+ av->vol_type = vol_type;
+ if (av->vol_type == UBI_STATIC_VOLUME)
+ av->used_ebs = used_ebs;
+
+ dbg_bld("found volume (ID %i)", vol_id);
+ return av;
+}
+
+/**
+ * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
+ * from it's original list.
+ * @ai: ubi_attach_info object
+ * @aeb: the to be assigned SEB
+ * @av: target scan volume
+ */
+static void assign_aeb_to_av(struct ubi_attach_info *ai,
+ struct ubi_ainf_peb *aeb,
+ struct ubi_ainf_volume *av)
+{
+ struct ubi_ainf_peb *tmp_aeb;
+ struct rb_node **p = &av->root.rb_node, *parent = NULL;
+
+ while (*p) {
+ parent = *p;
+
+ tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+ if (aeb->lnum != tmp_aeb->lnum) {
+ if (aeb->lnum < tmp_aeb->lnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+
+ continue;
+ } else
+ break;
+ }
+
+ list_del(&aeb->u.list);
+ av->leb_count++;
+
+ rb_link_node(&aeb->u.rb, parent, p);
+ rb_insert_color(&aeb->u.rb, &av->root);
+}
+
+/**
+ * update_vol - inserts or updates a LEB which was found a pool.
+ * @ubi: the UBI device object
+ * @ai: attach info object
+ * @av: the volume this LEB belongs to
+ * @new_vh: the volume header derived from new_aeb
+ * @new_aeb: the AEB to be examined
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
+ struct ubi_ainf_peb *new_aeb)
+{
+ struct rb_node **p = &av->root.rb_node, *parent = NULL;
+ struct ubi_ainf_peb *aeb, *victim;
+ int cmp_res;
+
+ while (*p) {
+ parent = *p;
+ aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+
+ if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
+ if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+
+ continue;
+ }
+
+ /* This case can happen if the fastmap gets written
+ * because of a volume change (creation, deletion, ..).
+ * Then a PEB can be within the persistent EBA and the pool.
+ */
+ if (aeb->pnum == new_aeb->pnum) {
+ ubi_assert(aeb->lnum == new_aeb->lnum);
+ ubi_free_aeb(ai, new_aeb);
+
+ return 0;
+ }
+
+ cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
+ if (cmp_res < 0)
+ return cmp_res;
+
+ /* new_aeb is newer */
+ if (cmp_res & 1) {
+ victim = ubi_alloc_aeb(ai, aeb->pnum, aeb->ec);
+ if (!victim)
+ return -ENOMEM;
+
+ list_add_tail(&victim->u.list, &ai->erase);
+
+ if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
+ av->last_data_size =
+ be32_to_cpu(new_vh->data_size);
+
+ dbg_bld("vol %i: AEB %i's PEB %i is the newer",
+ av->vol_id, aeb->lnum, new_aeb->pnum);
+
+ aeb->ec = new_aeb->ec;
+ aeb->pnum = new_aeb->pnum;
+ aeb->copy_flag = new_vh->copy_flag;
+ aeb->scrub = new_aeb->scrub;
+ aeb->sqnum = new_aeb->sqnum;
+ ubi_free_aeb(ai, new_aeb);
+
+ /* new_aeb is older */
+ } else {
+ dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
+ av->vol_id, aeb->lnum, new_aeb->pnum);
+ list_add_tail(&new_aeb->u.list, &ai->erase);
+ }
+
+ return 0;
+ }
+ /* This LEB is new, let's add it to the volume */
+
+ if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
+ av->highest_lnum = be32_to_cpu(new_vh->lnum);
+ av->last_data_size = be32_to_cpu(new_vh->data_size);
+ }
+
+ if (av->vol_type == UBI_STATIC_VOLUME)
+ av->used_ebs = be32_to_cpu(new_vh->used_ebs);
+
+ av->leb_count++;
+
+ rb_link_node(&new_aeb->u.rb, parent, p);
+ rb_insert_color(&new_aeb->u.rb, &av->root);
+
+ return 0;
+}
+
+/**
+ * process_pool_aeb - we found a non-empty PEB in a pool.
+ * @ubi: UBI device object
+ * @ai: attach info object
+ * @new_vh: the volume header derived from new_aeb
+ * @new_aeb: the AEB to be examined
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ struct ubi_vid_hdr *new_vh,
+ struct ubi_ainf_peb *new_aeb)
+{
+ int vol_id = be32_to_cpu(new_vh->vol_id);
+ struct ubi_ainf_volume *av;
+
+ if (vol_id == UBI_FM_SB_VOLUME_ID || vol_id == UBI_FM_DATA_VOLUME_ID) {
+ ubi_free_aeb(ai, new_aeb);
+
+ return 0;
+ }
+
+ /* Find the volume this SEB belongs to */
+ av = ubi_find_av(ai, vol_id);
+ if (!av) {
+ ubi_err(ubi, "orphaned volume in fastmap pool!");
+ ubi_free_aeb(ai, new_aeb);
+ return UBI_BAD_FASTMAP;
+ }
+
+ ubi_assert(vol_id == av->vol_id);
+
+ return update_vol(ubi, ai, av, new_vh, new_aeb);
+}
+
+/**
+ * unmap_peb - unmap a PEB.
+ * If fastmap detects a free PEB in the pool it has to check whether
+ * this PEB has been unmapped after writing the fastmap.
+ *
+ * @ai: UBI attach info object
+ * @pnum: The PEB to be unmapped
+ */
+static void unmap_peb(struct ubi_attach_info *ai, int pnum)
+{
+ struct ubi_ainf_volume *av;
+ struct rb_node *node, *node2;
+ struct ubi_ainf_peb *aeb;
+
+ ubi_rb_for_each_entry(node, av, &ai->volumes, rb) {
+ ubi_rb_for_each_entry(node2, aeb, &av->root, u.rb) {
+ if (aeb->pnum == pnum) {
+ rb_erase(&aeb->u.rb, &av->root);
+ av->leb_count--;
+ ubi_free_aeb(ai, aeb);
+ return;
+ }
+ }
+ }
+}
+
+/**
+ * scan_pool - scans a pool for changed (no longer empty PEBs).
+ * @ubi: UBI device object
+ * @ai: attach info object
+ * @pebs: an array of all PEB numbers in the to be scanned pool
+ * @pool_size: size of the pool (number of entries in @pebs)
+ * @max_sqnum: pointer to the maximal sequence number
+ * @free: list of PEBs which are most likely free (and go into @ai->free)
+ *
+ * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
+ * < 0 indicates an internal error.
+ */
+static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
+ struct list_head *free)
+{
+ struct ubi_vid_io_buf *vb;
+ struct ubi_vid_hdr *vh;
+ struct ubi_ec_hdr *ech;
+ struct ubi_ainf_peb *new_aeb;
+ int i, pnum, err, ret = 0;
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech)
+ return -ENOMEM;
+
+ vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
+ if (!vb) {
+ kfree(ech);
+ return -ENOMEM;
+ }
+
+ vh = ubi_get_vid_hdr(vb);
+
+ dbg_bld("scanning fastmap pool: size = %i", pool_size);
+
+ /*
+ * Now scan all PEBs in the pool to find changes which have been made
+ * after the creation of the fastmap
+ */
+ for (i = 0; i < pool_size; i++) {
+ int scrub = 0;
+ int image_seq;
+
+ pnum = be32_to_cpu(pebs[i]);
+
+ if (ubi_io_is_bad(ubi, pnum)) {
+ ubi_err(ubi, "bad PEB in fastmap pool!");
+ ret = UBI_BAD_FASTMAP;
+ goto out;
+ }
+
+ err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+ if (err && err != UBI_IO_BITFLIPS) {
+ ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
+ pnum, err);
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ goto out;
+ } else if (err == UBI_IO_BITFLIPS)
+ scrub = 1;
+
+ /*
+ * Older UBI implementations have image_seq set to zero, so
+ * we shouldn't fail if image_seq == 0.
+ */
+ image_seq = be32_to_cpu(ech->image_seq);
+
+ if (image_seq && (image_seq != ubi->image_seq)) {
+ ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
+ be32_to_cpu(ech->image_seq), ubi->image_seq);
+ ret = UBI_BAD_FASTMAP;
+ goto out;
+ }
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
+ if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
+ unsigned long long ec = be64_to_cpu(ech->ec);
+ unmap_peb(ai, pnum);
+ dbg_bld("Adding PEB to free: %i", pnum);
+
+ if (err == UBI_IO_FF_BITFLIPS)
+ scrub = 1;
+
+ ret = add_aeb(ai, free, pnum, ec, scrub);
+ if (ret)
+ goto out;
+ continue;
+ } else if (err == 0 || err == UBI_IO_BITFLIPS) {
+ dbg_bld("Found non empty PEB:%i in pool", pnum);
+
+ if (err == UBI_IO_BITFLIPS)
+ scrub = 1;
+
+ new_aeb = ubi_alloc_aeb(ai, pnum, be64_to_cpu(ech->ec));
+ if (!new_aeb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ new_aeb->lnum = be32_to_cpu(vh->lnum);
+ new_aeb->sqnum = be64_to_cpu(vh->sqnum);
+ new_aeb->copy_flag = vh->copy_flag;
+ new_aeb->scrub = scrub;
+
+ if (*max_sqnum < new_aeb->sqnum)
+ *max_sqnum = new_aeb->sqnum;
+
+ err = process_pool_aeb(ubi, ai, vh, new_aeb);
+ if (err) {
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ goto out;
+ }
+ } else {
+ /* We are paranoid and fall back to scanning mode */
+ ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ goto out;
+ }
+
+ }
+
+out:
+ ubi_free_vid_buf(vb);
+ kfree(ech);
+ return ret;
+}
+
+/**
+ * count_fastmap_pebs - Counts the PEBs found by fastmap.
+ * @ai: The UBI attach info object
+ */
+static int count_fastmap_pebs(struct ubi_attach_info *ai)
+{
+ struct ubi_ainf_peb *aeb;
+ struct ubi_ainf_volume *av;
+ struct rb_node *rb1, *rb2;
+ int n = 0;
+
+ list_for_each_entry(aeb, &ai->erase, u.list)
+ n++;
+
+ list_for_each_entry(aeb, &ai->free, u.list)
+ n++;
+
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+ n++;
+
+ return n;
+}
+
+/**
+ * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
+ * @ubi: UBI device object
+ * @ai: UBI attach info object
+ * @fm: the fastmap to be attached
+ *
+ * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
+ * < 0 indicates an internal error.
+ */
+static int ubi_attach_fastmap(struct ubi_device *ubi,
+ struct ubi_attach_info *ai,
+ struct ubi_fastmap_layout *fm)
+{
+ struct list_head used, free;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
+ struct ubi_fm_sb *fmsb;
+ struct ubi_fm_hdr *fmhdr;
+ struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
+ struct ubi_fm_ec *fmec;
+ struct ubi_fm_volhdr *fmvhdr;
+ struct ubi_fm_eba *fm_eba;
+ int ret, i, j, pool_size, wl_pool_size;
+ size_t fm_pos = 0, fm_size = ubi->fm_size;
+ unsigned long long max_sqnum = 0;
+ void *fm_raw = ubi->fm_buf;
+
+ INIT_LIST_HEAD(&used);
+ INIT_LIST_HEAD(&free);
+ ai->min_ec = UBI_MAX_ERASECOUNTER;
+
+ fmsb = (struct ubi_fm_sb *)(fm_raw);
+ ai->max_sqnum = fmsb->sqnum;
+ fm_pos += sizeof(struct ubi_fm_sb);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmhdr);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
+ ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
+ goto fail_bad;
+ }
+
+ fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+ if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) {
+ ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC);
+ goto fail_bad;
+ }
+
+ fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl_wl);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+ if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) {
+ ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC);
+ goto fail_bad;
+ }
+
+ pool_size = be16_to_cpu(fmpl->size);
+ wl_pool_size = be16_to_cpu(fmpl_wl->size);
+ fm->max_pool_size = be16_to_cpu(fmpl->max_size);
+ fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size);
+
+ if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
+ ubi_err(ubi, "bad pool size: %i", pool_size);
+ goto fail_bad;
+ }
+
+ if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
+ ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
+ goto fail_bad;
+ }
+
+
+ if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
+ fm->max_pool_size < 0) {
+ ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
+ goto fail_bad;
+ }
+
+ if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
+ fm->max_wl_pool_size < 0) {
+ ubi_err(ubi, "bad maximal WL pool size: %i",
+ fm->max_wl_pool_size);
+ goto fail_bad;
+ }
+
+ /* read EC values from free list */
+ for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ ret = add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 0);
+ if (ret)
+ goto fail;
+ }
+
+ /* read EC values from used list */
+ for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 0);
+ if (ret)
+ goto fail;
+ }
+
+ /* read EC values from scrub list */
+ for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 1);
+ if (ret)
+ goto fail;
+ }
+
+ /* read EC values from erase list */
+ for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ ret = add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 1);
+ if (ret)
+ goto fail;
+ }
+
+ ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
+ ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
+
+ /* Iterate over all volumes and read their EBA table */
+ for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
+ fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmvhdr);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
+ ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
+ goto fail_bad;
+ }
+
+ av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
+ be32_to_cpu(fmvhdr->used_ebs),
+ be32_to_cpu(fmvhdr->data_pad),
+ fmvhdr->vol_type,
+ be32_to_cpu(fmvhdr->last_eb_bytes));
+
+ if (IS_ERR(av)) {
+ if (PTR_ERR(av) == -EEXIST)
+ ubi_err(ubi, "volume (ID %i) already exists",
+ fmvhdr->vol_id);
+
+ goto fail_bad;
+ }
+
+ ai->vols_found++;
+ if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
+ ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
+
+ fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fm_eba);
+ fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
+ ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
+ goto fail_bad;
+ }
+
+ for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
+ int pnum = be32_to_cpu(fm_eba->pnum[j]);
+
+ if (pnum < 0)
+ continue;
+
+ aeb = NULL;
+ list_for_each_entry(tmp_aeb, &used, u.list) {
+ if (tmp_aeb->pnum == pnum) {
+ aeb = tmp_aeb;
+ break;
+ }
+ }
+
+ if (!aeb) {
+ ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
+ goto fail_bad;
+ }
+
+ aeb->lnum = j;
+
+ if (av->highest_lnum <= aeb->lnum)
+ av->highest_lnum = aeb->lnum;
+
+ assign_aeb_to_av(ai, aeb, av);
+
+ dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
+ aeb->pnum, aeb->lnum, av->vol_id);
+ }
+ }
+
+ ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free);
+ if (ret)
+ goto fail;
+
+ ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free);
+ if (ret)
+ goto fail;
+
+ if (max_sqnum > ai->max_sqnum)
+ ai->max_sqnum = max_sqnum;
+
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
+ list_move_tail(&tmp_aeb->u.list, &ai->free);
+
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
+ list_move_tail(&tmp_aeb->u.list, &ai->erase);
+
+ ubi_assert(list_empty(&free));
+
+ /*
+ * If fastmap is leaking PEBs (must not happen), raise a
+ * fat warning and fall back to scanning mode.
+ * We do this here because in ubi_wl_init() it's too late
+ * and we cannot fall back to scanning.
+ */
+ if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
+ ai->bad_peb_count - fm->used_blocks))
+ goto fail_bad;
+
+ return 0;
+
+fail_bad:
+ ret = UBI_BAD_FASTMAP;
+fail:
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
+ list_del(&tmp_aeb->u.list);
+ ubi_free_aeb(ai, tmp_aeb);
+ }
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
+ list_del(&tmp_aeb->u.list);
+ ubi_free_aeb(ai, tmp_aeb);
+ }
+
+ return ret;
+}
+
+/**
+ * find_fm_anchor - find the most recent Fastmap superblock (anchor)
+ * @ai: UBI attach info to be filled
+ */
+static int find_fm_anchor(struct ubi_attach_info *ai)
+{
+ int ret = -1;
+ struct ubi_ainf_peb *aeb;
+ unsigned long long max_sqnum = 0;
+
+ list_for_each_entry(aeb, &ai->fastmap, u.list) {
+ if (aeb->vol_id == UBI_FM_SB_VOLUME_ID && aeb->sqnum > max_sqnum) {
+ max_sqnum = aeb->sqnum;
+ ret = aeb->pnum;
+ }
+ }
+
+ return ret;
+}
+
+static struct ubi_ainf_peb *clone_aeb(struct ubi_attach_info *ai,
+ struct ubi_ainf_peb *old)
+{
+ struct ubi_ainf_peb *new;
+
+ new = ubi_alloc_aeb(ai, old->pnum, old->ec);
+ if (!new)
+ return NULL;
+
+ new->vol_id = old->vol_id;
+ new->sqnum = old->sqnum;
+ new->lnum = old->lnum;
+ new->scrub = old->scrub;
+ new->copy_flag = old->copy_flag;
+
+ return new;
+}
+
+/**
+ * ubi_scan_fastmap - scan the fastmap.
+ * @ubi: UBI device object
+ * @ai: UBI attach info to be filled
+ * @scan_ai: UBI attach info from the first 64 PEBs,
+ * used to find the most recent Fastmap data structure
+ *
+ * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
+ * UBI_BAD_FASTMAP if one was found but is not usable.
+ * < 0 indicates an internal error.
+ */
+int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ struct ubi_attach_info *scan_ai)
+{
+ struct ubi_fm_sb *fmsb, *fmsb2;
+ struct ubi_vid_io_buf *vb;
+ struct ubi_vid_hdr *vh;
+ struct ubi_ec_hdr *ech;
+ struct ubi_fastmap_layout *fm;
+ struct ubi_ainf_peb *aeb;
+ int i, used_blocks, pnum, fm_anchor, ret = 0;
+ size_t fm_size;
+ __be32 crc, tmp_crc;
+ unsigned long long sqnum = 0;
+
+ fm_anchor = find_fm_anchor(scan_ai);
+ if (fm_anchor < 0)
+ return UBI_NO_FASTMAP;
+
+ /* Copy all (possible) fastmap blocks into our new attach structure. */
+ list_for_each_entry(aeb, &scan_ai->fastmap, u.list) {
+ struct ubi_ainf_peb *new;
+
+ new = clone_aeb(ai, aeb);
+ if (!new)
+ return -ENOMEM;
+
+ list_add(&new->u.list, &ai->fastmap);
+ }
+
+ down_write(&ubi->fm_protect);
+ memset(ubi->fm_buf, 0, ubi->fm_size);
+
+ fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
+ if (!fmsb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ fm = kzalloc(sizeof(*fm), GFP_KERNEL);
+ if (!fm) {
+ ret = -ENOMEM;
+ kfree(fmsb);
+ goto out;
+ }
+
+ ret = ubi_io_read_data(ubi, fmsb, fm_anchor, 0, sizeof(*fmsb));
+ if (ret && ret != UBI_IO_BITFLIPS)
+ goto free_fm_sb;
+ else if (ret == UBI_IO_BITFLIPS)
+ fm->to_be_tortured[0] = 1;
+
+ if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
+ ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ if (fmsb->version != UBI_FM_FMT_VERSION) {
+ ubi_err(ubi, "bad fastmap version: %i, expected: %i",
+ fmsb->version, UBI_FM_FMT_VERSION);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ used_blocks = be32_to_cpu(fmsb->used_blocks);
+ if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
+ ubi_err(ubi, "number of fastmap blocks is invalid: %i",
+ used_blocks);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ fm_size = ubi->leb_size * used_blocks;
+ if (fm_size != ubi->fm_size) {
+ ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
+ fm_size, ubi->fm_size);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech) {
+ ret = -ENOMEM;
+ goto free_fm_sb;
+ }
+
+ vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
+ if (!vb) {
+ ret = -ENOMEM;
+ goto free_hdr;
+ }
+
+ vh = ubi_get_vid_hdr(vb);
+
+ for (i = 0; i < used_blocks; i++) {
+ int image_seq;
+
+ pnum = be32_to_cpu(fmsb->block_loc[i]);
+
+ if (ubi_io_is_bad(ubi, pnum)) {
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ if (i == 0 && pnum != fm_anchor) {
+ ubi_err(ubi, "Fastmap anchor PEB mismatch: PEB: %i vs. %i",
+ pnum, fm_anchor);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+ if (ret && ret != UBI_IO_BITFLIPS) {
+ ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
+ i, pnum);
+ if (ret > 0)
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ } else if (ret == UBI_IO_BITFLIPS)
+ fm->to_be_tortured[i] = 1;
+
+ image_seq = be32_to_cpu(ech->image_seq);
+ if (!ubi->image_seq)
+ ubi->image_seq = image_seq;
+
+ /*
+ * Older UBI implementations have image_seq set to zero, so
+ * we shouldn't fail if image_seq == 0.
+ */
+ if (image_seq && (image_seq != ubi->image_seq)) {
+ ubi_err(ubi, "wrong image seq:%d instead of %d",
+ be32_to_cpu(ech->image_seq), ubi->image_seq);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ ret = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
+ if (ret && ret != UBI_IO_BITFLIPS) {
+ ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
+ i, pnum);
+ goto free_hdr;
+ }
+
+ if (i == 0) {
+ if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
+ ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
+ be32_to_cpu(vh->vol_id),
+ UBI_FM_SB_VOLUME_ID);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+ } else {
+ if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
+ ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
+ be32_to_cpu(vh->vol_id),
+ UBI_FM_DATA_VOLUME_ID);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+ }
+
+ if (sqnum < be64_to_cpu(vh->sqnum))
+ sqnum = be64_to_cpu(vh->sqnum);
+
+ ret = ubi_io_read_data(ubi, ubi->fm_buf + (ubi->leb_size * i),
+ pnum, 0, ubi->leb_size);
+ if (ret && ret != UBI_IO_BITFLIPS) {
+ ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
+ "err: %i)", i, pnum, ret);
+ goto free_hdr;
+ }
+ }
+
+ kfree(fmsb);
+ fmsb = NULL;
+
+ fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
+ tmp_crc = be32_to_cpu(fmsb2->data_crc);
+ fmsb2->data_crc = 0;
+ crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
+ if (crc != tmp_crc) {
+ ubi_err(ubi, "fastmap data CRC is invalid");
+ ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
+ tmp_crc, crc);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ fmsb2->sqnum = sqnum;
+
+ fm->used_blocks = used_blocks;
+
+ ret = ubi_attach_fastmap(ubi, ai, fm);
+ if (ret) {
+ if (ret > 0)
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ for (i = 0; i < used_blocks; i++) {
+ struct ubi_wl_entry *e;
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e) {
+ while (i--)
+ kmem_cache_free(ubi_wl_entry_slab, fm->e[i]);
+
+ ret = -ENOMEM;
+ goto free_hdr;
+ }
+
+ e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
+ e->ec = be32_to_cpu(fmsb2->block_ec[i]);
+ fm->e[i] = e;
+ }
+
+ ubi->fm = fm;
+ ubi->fm_pool.max_size = ubi->fm->max_pool_size;
+ ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
+ ubi_msg(ubi, "attached by fastmap");
+ ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
+ ubi_msg(ubi, "fastmap WL pool size: %d",
+ ubi->fm_wl_pool.max_size);
+ ubi->fm_disabled = 0;
+ ubi->fast_attach = 1;
+
+ ubi_free_vid_buf(vb);
+ kfree(ech);
+out:
+ up_write(&ubi->fm_protect);
+ if (ret == UBI_BAD_FASTMAP)
+ ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
+ return ret;
+
+free_hdr:
+ ubi_free_vid_buf(vb);
+ kfree(ech);
+free_fm_sb:
+ kfree(fmsb);
+ kfree(fm);
+ goto out;
+}
+
+int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count)
+{
+ struct ubi_device *ubi = vol->ubi;
+
+ if (!ubi->fast_attach)
+ return 0;
+
+ vol->checkmap = bitmap_zalloc(leb_count, GFP_KERNEL);
+ if (!vol->checkmap)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol)
+{
+ bitmap_free(vol->checkmap);
+}
+
+/**
+ * ubi_write_fastmap - writes a fastmap.
+ * @ubi: UBI device object
+ * @new_fm: the to be written fastmap
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int ubi_write_fastmap(struct ubi_device *ubi,
+ struct ubi_fastmap_layout *new_fm)
+{
+ size_t fm_pos = 0;
+ void *fm_raw;
+ struct ubi_fm_sb *fmsb;
+ struct ubi_fm_hdr *fmh;
+ struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
+ struct ubi_fm_ec *fec;
+ struct ubi_fm_volhdr *fvh;
+ struct ubi_fm_eba *feba;
+ struct ubi_wl_entry *wl_e;
+ struct ubi_volume *vol;
+ struct ubi_vid_io_buf *avbuf, *dvbuf;
+ struct ubi_vid_hdr *avhdr, *dvhdr;
+ struct ubi_work *ubi_wrk;
+ struct rb_node *tmp_rb;
+ int ret, i, j, free_peb_count, used_peb_count, vol_count;
+ int scrub_peb_count, erase_peb_count;
+ unsigned long *seen_pebs;
+
+ fm_raw = ubi->fm_buf;
+ memset(ubi->fm_buf, 0, ubi->fm_size);
+
+ avbuf = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
+ if (!avbuf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID);
+ if (!dvbuf) {
+ ret = -ENOMEM;
+ goto out_free_avbuf;
+ }
+
+ avhdr = ubi_get_vid_hdr(avbuf);
+ dvhdr = ubi_get_vid_hdr(dvbuf);
+
+ seen_pebs = init_seen(ubi);
+ if (IS_ERR(seen_pebs)) {
+ ret = PTR_ERR(seen_pebs);
+ goto out_free_dvbuf;
+ }
+
+ spin_lock(&ubi->volumes_lock);
+ spin_lock(&ubi->wl_lock);
+
+ fmsb = (struct ubi_fm_sb *)fm_raw;
+ fm_pos += sizeof(*fmsb);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmh);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
+ fmsb->version = UBI_FM_FMT_VERSION;
+ fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
+ /* the max sqnum will be filled in while *reading* the fastmap */
+ fmsb->sqnum = 0;
+
+ fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
+ free_peb_count = 0;
+ used_peb_count = 0;
+ scrub_peb_count = 0;
+ erase_peb_count = 0;
+ vol_count = 0;
+
+ fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl);
+ fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
+ fmpl->size = cpu_to_be16(ubi->fm_pool.size);
+ fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size);
+
+ for (i = 0; i < ubi->fm_pool.size; i++) {
+ fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
+ set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
+ }
+
+ fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl_wl);
+ fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
+ fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size);
+ fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
+
+ for (i = 0; i < ubi->fm_wl_pool.size; i++) {
+ fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
+ set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
+ }
+
+ ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ set_seen(ubi, wl_e->pnum, seen_pebs);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ free_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ fmh->free_peb_count = cpu_to_be32(free_peb_count);
+
+ ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ set_seen(ubi, wl_e->pnum, seen_pebs);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ used_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+
+ ubi_for_each_protected_peb(ubi, i, wl_e) {
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ set_seen(ubi, wl_e->pnum, seen_pebs);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ used_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ fmh->used_peb_count = cpu_to_be32(used_peb_count);
+
+ ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ set_seen(ubi, wl_e->pnum, seen_pebs);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ scrub_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
+
+
+ list_for_each_entry(ubi_wrk, &ubi->works, list) {
+ if (ubi_is_erase_work(ubi_wrk)) {
+ wl_e = ubi_wrk->e;
+ ubi_assert(wl_e);
+
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ set_seen(ubi, wl_e->pnum, seen_pebs);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ erase_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ }
+ fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
+
+ for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
+ vol = ubi->volumes[i];
+
+ if (!vol)
+ continue;
+
+ vol_count++;
+
+ fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fvh);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
+ fvh->vol_id = cpu_to_be32(vol->vol_id);
+ fvh->vol_type = vol->vol_type;
+ fvh->used_ebs = cpu_to_be32(vol->used_ebs);
+ fvh->data_pad = cpu_to_be32(vol->data_pad);
+ fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
+
+ ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
+ vol->vol_type == UBI_STATIC_VOLUME);
+
+ feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ for (j = 0; j < vol->reserved_pebs; j++) {
+ struct ubi_eba_leb_desc ldesc;
+
+ ubi_eba_get_ldesc(vol, j, &ldesc);
+ feba->pnum[j] = cpu_to_be32(ldesc.pnum);
+ }
+
+ feba->reserved_pebs = cpu_to_be32(j);
+ feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
+ }
+ fmh->vol_count = cpu_to_be32(vol_count);
+ fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
+
+ avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ avhdr->lnum = 0;
+
+ spin_unlock(&ubi->wl_lock);
+ spin_unlock(&ubi->volumes_lock);
+
+ dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
+ ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf);
+ if (ret) {
+ ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
+ goto out_free_seen;
+ }
+
+ for (i = 0; i < new_fm->used_blocks; i++) {
+ fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
+ set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
+ fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
+ }
+
+ fmsb->data_crc = 0;
+ fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
+ ubi->fm_size));
+
+ for (i = 1; i < new_fm->used_blocks; i++) {
+ dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ dvhdr->lnum = cpu_to_be32(i);
+ dbg_bld("writing fastmap data to PEB %i sqnum %llu",
+ new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
+ ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvbuf);
+ if (ret) {
+ ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
+ new_fm->e[i]->pnum);
+ goto out_free_seen;
+ }
+ }
+
+ for (i = 0; i < new_fm->used_blocks; i++) {
+ ret = ubi_io_write_data(ubi, fm_raw + (i * ubi->leb_size),
+ new_fm->e[i]->pnum, 0, ubi->leb_size);
+ if (ret) {
+ ubi_err(ubi, "unable to write fastmap to PEB %i!",
+ new_fm->e[i]->pnum);
+ goto out_free_seen;
+ }
+ }
+
+ ubi_assert(new_fm);
+ ubi->fm = new_fm;
+
+ ret = self_check_seen(ubi, seen_pebs);
+ dbg_bld("fastmap written!");
+
+out_free_seen:
+ free_seen(seen_pebs);
+out_free_dvbuf:
+ ubi_free_vid_buf(dvbuf);
+out_free_avbuf:
+ ubi_free_vid_buf(avbuf);
+
+out:
+ return ret;
+}
+
+/**
+ * erase_block - Manually erase a PEB.
+ * @ubi: UBI device object
+ * @pnum: PEB to be erased
+ *
+ * Returns the new EC value on success, < 0 indicates an internal error.
+ */
+static int erase_block(struct ubi_device *ubi, int pnum)
+{
+ int ret;
+ struct ubi_ec_hdr *ec_hdr;
+ long long ec;
+
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ec_hdr)
+ return -ENOMEM;
+
+ ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
+ if (ret < 0)
+ goto out;
+ else if (ret && ret != UBI_IO_BITFLIPS) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = ubi_io_sync_erase(ubi, pnum, 0);
+ if (ret < 0)
+ goto out;
+
+ ec = be64_to_cpu(ec_hdr->ec);
+ ec += ret;
+ if (ec > UBI_MAX_ERASECOUNTER) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ec_hdr->ec = cpu_to_be64(ec);
+ ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
+ if (ret < 0)
+ goto out;
+
+ ret = ec;
+out:
+ kfree(ec_hdr);
+ return ret;
+}
+
+/**
+ * invalidate_fastmap - destroys a fastmap.
+ * @ubi: UBI device object
+ *
+ * This function ensures that upon next UBI attach a full scan
+ * is issued. We need this if UBI is about to write a new fastmap
+ * but is unable to do so. In this case we have two options:
+ * a) Make sure that the current fastmap will not be usued upon
+ * attach time and contine or b) fall back to RO mode to have the
+ * current fastmap in a valid state.
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int invalidate_fastmap(struct ubi_device *ubi)
+{
+ int ret;
+ struct ubi_fastmap_layout *fm;
+ struct ubi_wl_entry *e;
+ struct ubi_vid_io_buf *vb = NULL;
+ struct ubi_vid_hdr *vh;
+
+ if (!ubi->fm)
+ return 0;
+
+ ubi->fm = NULL;
+
+ ret = -ENOMEM;
+ fm = kzalloc(sizeof(*fm), GFP_KERNEL);
+ if (!fm)
+ goto out;
+
+ vb = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
+ if (!vb)
+ goto out_free_fm;
+
+ vh = ubi_get_vid_hdr(vb);
+
+ ret = -ENOSPC;
+ e = ubi_wl_get_fm_peb(ubi, 1);
+ if (!e)
+ goto out_free_fm;
+
+ /*
+ * Create fake fastmap such that UBI will fall back
+ * to scanning mode.
+ */
+ vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ ret = ubi_io_write_vid_hdr(ubi, e->pnum, vb);
+ if (ret < 0) {
+ ubi_wl_put_fm_peb(ubi, e, 0, 0);
+ goto out_free_fm;
+ }
+
+ fm->used_blocks = 1;
+ fm->e[0] = e;
+
+ ubi->fm = fm;
+
+out:
+ ubi_free_vid_buf(vb);
+ return ret;
+
+out_free_fm:
+ kfree(fm);
+ goto out;
+}
+
+/**
+ * return_fm_pebs - returns all PEBs used by a fastmap back to the
+ * WL sub-system.
+ * @ubi: UBI device object
+ * @fm: fastmap layout object
+ */
+static void return_fm_pebs(struct ubi_device *ubi,
+ struct ubi_fastmap_layout *fm)
+{
+ int i;
+
+ if (!fm)
+ return;
+
+ for (i = 0; i < fm->used_blocks; i++) {
+ if (fm->e[i]) {
+ ubi_wl_put_fm_peb(ubi, fm->e[i], i,
+ fm->to_be_tortured[i]);
+ fm->e[i] = NULL;
+ }
+ }
+}
+
+/**
+ * ubi_update_fastmap - will be called by UBI if a volume changes or
+ * a fastmap pool becomes full.
+ * @ubi: UBI device object
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+int ubi_update_fastmap(struct ubi_device *ubi)
+{
+ int ret, i, j;
+ struct ubi_fastmap_layout *new_fm, *old_fm;
+ struct ubi_wl_entry *tmp_e;
+
+ down_write(&ubi->fm_protect);
+ down_write(&ubi->work_sem);
+ down_write(&ubi->fm_eba_sem);
+
+ ubi_refill_pools(ubi);
+
+ if (ubi->ro_mode || ubi->fm_disabled) {
+ up_write(&ubi->fm_eba_sem);
+ up_write(&ubi->work_sem);
+ up_write(&ubi->fm_protect);
+ return 0;
+ }
+
+ new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
+ if (!new_fm) {
+ up_write(&ubi->fm_eba_sem);
+ up_write(&ubi->work_sem);
+ up_write(&ubi->fm_protect);
+ return -ENOMEM;
+ }
+
+ new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
+ old_fm = ubi->fm;
+ ubi->fm = NULL;
+
+ if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
+ ubi_err(ubi, "fastmap too large");
+ ret = -ENOSPC;
+ goto err;
+ }
+
+ for (i = 1; i < new_fm->used_blocks; i++) {
+ spin_lock(&ubi->wl_lock);
+ tmp_e = ubi_wl_get_fm_peb(ubi, 0);
+ spin_unlock(&ubi->wl_lock);
+
+ if (!tmp_e) {
+ if (old_fm && old_fm->e[i]) {
+ ret = erase_block(ubi, old_fm->e[i]->pnum);
+ if (ret < 0) {
+ ubi_err(ubi, "could not erase old fastmap PEB");
+
+ for (j = 1; j < i; j++) {
+ ubi_wl_put_fm_peb(ubi, new_fm->e[j],
+ j, 0);
+ new_fm->e[j] = NULL;
+ }
+ goto err;
+ }
+ new_fm->e[i] = old_fm->e[i];
+ old_fm->e[i] = NULL;
+ } else {
+ ubi_err(ubi, "could not get any free erase block");
+
+ for (j = 1; j < i; j++) {
+ ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
+ new_fm->e[j] = NULL;
+ }
+
+ ret = -ENOSPC;
+ goto err;
+ }
+ } else {
+ new_fm->e[i] = tmp_e;
+
+ if (old_fm && old_fm->e[i]) {
+ ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
+ old_fm->to_be_tortured[i]);
+ old_fm->e[i] = NULL;
+ }
+ }
+ }
+
+ /* Old fastmap is larger than the new one */
+ if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
+ for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
+ ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
+ old_fm->to_be_tortured[i]);
+ old_fm->e[i] = NULL;
+ }
+ }
+
+ spin_lock(&ubi->wl_lock);
+ tmp_e = ubi->fm_anchor;
+ ubi->fm_anchor = NULL;
+ spin_unlock(&ubi->wl_lock);
+
+ if (old_fm) {
+ /* no fresh anchor PEB was found, reuse the old one */
+ if (!tmp_e) {
+ ret = erase_block(ubi, old_fm->e[0]->pnum);
+ if (ret < 0) {
+ ubi_err(ubi, "could not erase old anchor PEB");
+
+ for (i = 1; i < new_fm->used_blocks; i++) {
+ ubi_wl_put_fm_peb(ubi, new_fm->e[i],
+ i, 0);
+ new_fm->e[i] = NULL;
+ }
+ goto err;
+ }
+ new_fm->e[0] = old_fm->e[0];
+ new_fm->e[0]->ec = ret;
+ old_fm->e[0] = NULL;
+ } else {
+ /* we've got a new anchor PEB, return the old one */
+ ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
+ old_fm->to_be_tortured[0]);
+ new_fm->e[0] = tmp_e;
+ old_fm->e[0] = NULL;
+ }
+ } else {
+ if (!tmp_e) {
+ ubi_err(ubi, "could not find any anchor PEB");
+
+ for (i = 1; i < new_fm->used_blocks; i++) {
+ ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
+ new_fm->e[i] = NULL;
+ }
+
+ ret = -ENOSPC;
+ goto err;
+ }
+ new_fm->e[0] = tmp_e;
+ }
+
+ ret = ubi_write_fastmap(ubi, new_fm);
+
+ if (ret)
+ goto err;
+
+out_unlock:
+ up_write(&ubi->fm_eba_sem);
+ up_write(&ubi->work_sem);
+ up_write(&ubi->fm_protect);
+ kfree(old_fm);
+
+ ubi_ensure_anchor_pebs(ubi);
+
+ return ret;
+
+err:
+ ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
+
+ ret = invalidate_fastmap(ubi);
+ if (ret < 0) {
+ ubi_err(ubi, "Unable to invalidate current fastmap!");
+ ubi_ro_mode(ubi);
+ } else {
+ return_fm_pebs(ubi, old_fm);
+ return_fm_pebs(ubi, new_fm);
+ ret = 0;
+ }
+
+ kfree(new_fm);
+ goto out_unlock;
+}
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
new file mode 100644
index 0000000000..1b980d15d9
--- /dev/null
+++ b/drivers/mtd/ubi/gluebi.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём), Joern Engel
+ */
+
+/*
+ * This is a small driver which implements fake MTD devices on top of UBI
+ * volumes. This sounds strange, but it is in fact quite useful to make
+ * MTD-oriented software (including all the legacy software) work on top of
+ * UBI.
+ *
+ * Gluebi emulates MTD devices of "MTD_UBIVOLUME" type. Their minimal I/O unit
+ * size (@mtd->writesize) is equivalent to the UBI minimal I/O unit. The
+ * eraseblock size is equivalent to the logical eraseblock size of the volume.
+ */
+
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mtd/ubi.h>
+#include <linux/mtd/mtd.h>
+#include "ubi-media.h"
+
+#define err_msg(fmt, ...) \
+ pr_err("gluebi (pid %d): %s: " fmt "\n", \
+ current->pid, __func__, ##__VA_ARGS__)
+
+/**
+ * struct gluebi_device - a gluebi device description data structure.
+ * @mtd: emulated MTD device description object
+ * @refcnt: gluebi device reference count
+ * @desc: UBI volume descriptor
+ * @ubi_num: UBI device number this gluebi device works on
+ * @vol_id: ID of UBI volume this gluebi device works on
+ * @list: link in a list of gluebi devices
+ */
+struct gluebi_device {
+ struct mtd_info mtd;
+ int refcnt;
+ struct ubi_volume_desc *desc;
+ int ubi_num;
+ int vol_id;
+ struct list_head list;
+};
+
+/* List of all gluebi devices */
+static LIST_HEAD(gluebi_devices);
+static DEFINE_MUTEX(devices_mutex);
+
+/**
+ * find_gluebi_nolock - find a gluebi device.
+ * @ubi_num: UBI device number
+ * @vol_id: volume ID
+ *
+ * This function seraches for gluebi device corresponding to UBI device
+ * @ubi_num and UBI volume @vol_id. Returns the gluebi device description
+ * object in case of success and %NULL in case of failure. The caller has to
+ * have the &devices_mutex locked.
+ */
+static struct gluebi_device *find_gluebi_nolock(int ubi_num, int vol_id)
+{
+ struct gluebi_device *gluebi;
+
+ list_for_each_entry(gluebi, &gluebi_devices, list)
+ if (gluebi->ubi_num == ubi_num && gluebi->vol_id == vol_id)
+ return gluebi;
+ return NULL;
+}
+
+/**
+ * gluebi_get_device - get MTD device reference.
+ * @mtd: the MTD device description object
+ *
+ * This function is called every time the MTD device is being opened and
+ * implements the MTD get_device() operation. Returns zero in case of success
+ * and a negative error code in case of failure.
+ */
+static int gluebi_get_device(struct mtd_info *mtd)
+{
+ struct gluebi_device *gluebi;
+ int ubi_mode = UBI_READONLY;
+
+ if (mtd->flags & MTD_WRITEABLE)
+ ubi_mode = UBI_READWRITE;
+
+ gluebi = container_of(mtd, struct gluebi_device, mtd);
+ mutex_lock(&devices_mutex);
+ if (gluebi->refcnt > 0) {
+ /*
+ * The MTD device is already referenced and this is just one
+ * more reference. MTD allows many users to open the same
+ * volume simultaneously and do not distinguish between
+ * readers/writers/exclusive/meta openers as UBI does. So we do
+ * not open the UBI volume again - just increase the reference
+ * counter and return.
+ */
+ gluebi->refcnt += 1;
+ mutex_unlock(&devices_mutex);
+ return 0;
+ }
+
+ /*
+ * This is the first reference to this UBI volume via the MTD device
+ * interface. Open the corresponding volume in read-write mode.
+ */
+ gluebi->desc = ubi_open_volume(gluebi->ubi_num, gluebi->vol_id,
+ ubi_mode);
+ if (IS_ERR(gluebi->desc)) {
+ mutex_unlock(&devices_mutex);
+ return PTR_ERR(gluebi->desc);
+ }
+ gluebi->refcnt += 1;
+ mutex_unlock(&devices_mutex);
+ return 0;
+}
+
+/**
+ * gluebi_put_device - put MTD device reference.
+ * @mtd: the MTD device description object
+ *
+ * This function is called every time the MTD device is being put. Returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+static void gluebi_put_device(struct mtd_info *mtd)
+{
+ struct gluebi_device *gluebi;
+
+ gluebi = container_of(mtd, struct gluebi_device, mtd);
+ mutex_lock(&devices_mutex);
+ gluebi->refcnt -= 1;
+ if (gluebi->refcnt == 0)
+ ubi_close_volume(gluebi->desc);
+ mutex_unlock(&devices_mutex);
+}
+
+/**
+ * gluebi_read - read operation of emulated MTD devices.
+ * @mtd: MTD device description object
+ * @from: absolute offset from where to read
+ * @len: how many bytes to read
+ * @retlen: count of read bytes is returned here
+ * @buf: buffer to store the read data
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, unsigned char *buf)
+{
+ int err = 0, lnum, offs, bytes_left;
+ struct gluebi_device *gluebi;
+
+ gluebi = container_of(mtd, struct gluebi_device, mtd);
+ lnum = div_u64_rem(from, mtd->erasesize, &offs);
+ bytes_left = len;
+ while (bytes_left) {
+ size_t to_read = mtd->erasesize - offs;
+
+ if (to_read > bytes_left)
+ to_read = bytes_left;
+
+ err = ubi_read(gluebi->desc, lnum, buf, offs, to_read);
+ if (err)
+ break;
+
+ lnum += 1;
+ offs = 0;
+ bytes_left -= to_read;
+ buf += to_read;
+ }
+
+ *retlen = len - bytes_left;
+ return err;
+}
+
+/**
+ * gluebi_write - write operation of emulated MTD devices.
+ * @mtd: MTD device description object
+ * @to: absolute offset where to write
+ * @len: how many bytes to write
+ * @retlen: count of written bytes is returned here
+ * @buf: buffer with data to write
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ int err = 0, lnum, offs, bytes_left;
+ struct gluebi_device *gluebi;
+
+ gluebi = container_of(mtd, struct gluebi_device, mtd);
+ lnum = div_u64_rem(to, mtd->erasesize, &offs);
+
+ if (len % mtd->writesize || offs % mtd->writesize)
+ return -EINVAL;
+
+ bytes_left = len;
+ while (bytes_left) {
+ size_t to_write = mtd->erasesize - offs;
+
+ if (to_write > bytes_left)
+ to_write = bytes_left;
+
+ err = ubi_leb_write(gluebi->desc, lnum, buf, offs, to_write);
+ if (err)
+ break;
+
+ lnum += 1;
+ offs = 0;
+ bytes_left -= to_write;
+ buf += to_write;
+ }
+
+ *retlen = len - bytes_left;
+ return err;
+}
+
+/**
+ * gluebi_erase - erase operation of emulated MTD devices.
+ * @mtd: the MTD device description object
+ * @instr: the erase operation description
+ *
+ * This function calls the erase callback when finishes. Returns zero in case
+ * of success and a negative error code in case of failure.
+ */
+static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ int err, i, lnum, count;
+ struct gluebi_device *gluebi;
+
+ if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd))
+ return -EINVAL;
+
+ lnum = mtd_div_by_eb(instr->addr, mtd);
+ count = mtd_div_by_eb(instr->len, mtd);
+ gluebi = container_of(mtd, struct gluebi_device, mtd);
+
+ for (i = 0; i < count - 1; i++) {
+ err = ubi_leb_unmap(gluebi->desc, lnum + i);
+ if (err)
+ goto out_err;
+ }
+ /*
+ * MTD erase operations are synchronous, so we have to make sure the
+ * physical eraseblock is wiped out.
+ *
+ * Thus, perform leb_erase instead of leb_unmap operation - leb_erase
+ * will wait for the end of operations
+ */
+ err = ubi_leb_erase(gluebi->desc, lnum + i);
+ if (err)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ instr->fail_addr = (long long)lnum * mtd->erasesize;
+ return err;
+}
+
+/**
+ * gluebi_create - create a gluebi device for an UBI volume.
+ * @di: UBI device description object
+ * @vi: UBI volume description object
+ *
+ * This function is called when a new UBI volume is created in order to create
+ * corresponding fake MTD device. Returns zero in case of success and a
+ * negative error code in case of failure.
+ */
+static int gluebi_create(struct ubi_device_info *di,
+ struct ubi_volume_info *vi)
+{
+ struct gluebi_device *gluebi, *g;
+ struct mtd_info *mtd;
+
+ gluebi = kzalloc(sizeof(struct gluebi_device), GFP_KERNEL);
+ if (!gluebi)
+ return -ENOMEM;
+
+ mtd = &gluebi->mtd;
+ mtd->name = kmemdup(vi->name, vi->name_len + 1, GFP_KERNEL);
+ if (!mtd->name) {
+ kfree(gluebi);
+ return -ENOMEM;
+ }
+
+ gluebi->vol_id = vi->vol_id;
+ gluebi->ubi_num = vi->ubi_num;
+ mtd->type = MTD_UBIVOLUME;
+ if (!di->ro_mode)
+ mtd->flags = MTD_WRITEABLE;
+ mtd->owner = THIS_MODULE;
+ mtd->writesize = di->min_io_size;
+ mtd->erasesize = vi->usable_leb_size;
+ mtd->_read = gluebi_read;
+ mtd->_write = gluebi_write;
+ mtd->_erase = gluebi_erase;
+ mtd->_get_device = gluebi_get_device;
+ mtd->_put_device = gluebi_put_device;
+
+ /*
+ * In case of dynamic a volume, MTD device size is just volume size. In
+ * case of a static volume the size is equivalent to the amount of data
+ * bytes.
+ */
+ if (vi->vol_type == UBI_DYNAMIC_VOLUME)
+ mtd->size = (unsigned long long)vi->usable_leb_size * vi->size;
+ else
+ mtd->size = vi->used_bytes;
+
+ /* Just a sanity check - make sure this gluebi device does not exist */
+ mutex_lock(&devices_mutex);
+ g = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
+ if (g)
+ err_msg("gluebi MTD device %d form UBI device %d volume %d already exists",
+ g->mtd.index, vi->ubi_num, vi->vol_id);
+ mutex_unlock(&devices_mutex);
+
+ if (mtd_device_register(mtd, NULL, 0)) {
+ err_msg("cannot add MTD device");
+ kfree(mtd->name);
+ kfree(gluebi);
+ return -ENFILE;
+ }
+
+ mutex_lock(&devices_mutex);
+ list_add_tail(&gluebi->list, &gluebi_devices);
+ mutex_unlock(&devices_mutex);
+ return 0;
+}
+
+/**
+ * gluebi_remove - remove a gluebi device.
+ * @vi: UBI volume description object
+ *
+ * This function is called when an UBI volume is removed and it removes
+ * corresponding fake MTD device. Returns zero in case of success and a
+ * negative error code in case of failure.
+ */
+static int gluebi_remove(struct ubi_volume_info *vi)
+{
+ int err = 0;
+ struct mtd_info *mtd;
+ struct gluebi_device *gluebi;
+
+ mutex_lock(&devices_mutex);
+ gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
+ if (!gluebi) {
+ err_msg("got remove notification for unknown UBI device %d volume %d",
+ vi->ubi_num, vi->vol_id);
+ err = -ENOENT;
+ } else if (gluebi->refcnt)
+ err = -EBUSY;
+ else
+ list_del(&gluebi->list);
+ mutex_unlock(&devices_mutex);
+ if (err)
+ return err;
+
+ mtd = &gluebi->mtd;
+ err = mtd_device_unregister(mtd);
+ if (err) {
+ err_msg("cannot remove fake MTD device %d, UBI device %d, volume %d, error %d",
+ mtd->index, gluebi->ubi_num, gluebi->vol_id, err);
+ mutex_lock(&devices_mutex);
+ list_add_tail(&gluebi->list, &gluebi_devices);
+ mutex_unlock(&devices_mutex);
+ return err;
+ }
+
+ kfree(mtd->name);
+ kfree(gluebi);
+ return 0;
+}
+
+/**
+ * gluebi_updated - UBI volume was updated notifier.
+ * @vi: volume info structure
+ *
+ * This function is called every time an UBI volume is updated. It does nothing
+ * if te volume @vol is dynamic, and changes MTD device size if the
+ * volume is static. This is needed because static volumes cannot be read past
+ * data they contain. This function returns zero in case of success and a
+ * negative error code in case of error.
+ */
+static int gluebi_updated(struct ubi_volume_info *vi)
+{
+ struct gluebi_device *gluebi;
+
+ mutex_lock(&devices_mutex);
+ gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
+ if (!gluebi) {
+ mutex_unlock(&devices_mutex);
+ err_msg("got update notification for unknown UBI device %d volume %d",
+ vi->ubi_num, vi->vol_id);
+ return -ENOENT;
+ }
+
+ if (vi->vol_type == UBI_STATIC_VOLUME)
+ gluebi->mtd.size = vi->used_bytes;
+ mutex_unlock(&devices_mutex);
+ return 0;
+}
+
+/**
+ * gluebi_resized - UBI volume was re-sized notifier.
+ * @vi: volume info structure
+ *
+ * This function is called every time an UBI volume is re-size. It changes the
+ * corresponding fake MTD device size. This function returns zero in case of
+ * success and a negative error code in case of error.
+ */
+static int gluebi_resized(struct ubi_volume_info *vi)
+{
+ struct gluebi_device *gluebi;
+
+ mutex_lock(&devices_mutex);
+ gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
+ if (!gluebi) {
+ mutex_unlock(&devices_mutex);
+ err_msg("got update notification for unknown UBI device %d volume %d",
+ vi->ubi_num, vi->vol_id);
+ return -ENOENT;
+ }
+ gluebi->mtd.size = vi->used_bytes;
+ mutex_unlock(&devices_mutex);
+ return 0;
+}
+
+/**
+ * gluebi_notify - UBI notification handler.
+ * @nb: registered notifier block
+ * @l: notification type
+ * @ns_ptr: pointer to the &struct ubi_notification object
+ */
+static int gluebi_notify(struct notifier_block *nb, unsigned long l,
+ void *ns_ptr)
+{
+ struct ubi_notification *nt = ns_ptr;
+
+ switch (l) {
+ case UBI_VOLUME_ADDED:
+ gluebi_create(&nt->di, &nt->vi);
+ break;
+ case UBI_VOLUME_REMOVED:
+ gluebi_remove(&nt->vi);
+ break;
+ case UBI_VOLUME_RESIZED:
+ gluebi_resized(&nt->vi);
+ break;
+ case UBI_VOLUME_UPDATED:
+ gluebi_updated(&nt->vi);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block gluebi_notifier = {
+ .notifier_call = gluebi_notify,
+};
+
+static int __init ubi_gluebi_init(void)
+{
+ return ubi_register_volume_notifier(&gluebi_notifier, 0);
+}
+
+static void __exit ubi_gluebi_exit(void)
+{
+ struct gluebi_device *gluebi, *g;
+
+ list_for_each_entry_safe(gluebi, g, &gluebi_devices, list) {
+ int err;
+ struct mtd_info *mtd = &gluebi->mtd;
+
+ err = mtd_device_unregister(mtd);
+ if (err)
+ err_msg("error %d while removing gluebi MTD device %d, UBI device %d, volume %d - ignoring",
+ err, mtd->index, gluebi->ubi_num,
+ gluebi->vol_id);
+ kfree(mtd->name);
+ kfree(gluebi);
+ }
+ ubi_unregister_volume_notifier(&gluebi_notifier);
+}
+
+module_init(ubi_gluebi_init);
+module_exit(ubi_gluebi_exit);
+MODULE_DESCRIPTION("MTD emulation layer over UBI volumes");
+MODULE_AUTHOR("Artem Bityutskiy, Joern Engel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
new file mode 100644
index 0000000000..01b6448612
--- /dev/null
+++ b/drivers/mtd/ubi/io.c
@@ -0,0 +1,1398 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ * Copyright (c) Nokia Corporation, 2006, 2007
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * UBI input/output sub-system.
+ *
+ * This sub-system provides a uniform way to work with all kinds of the
+ * underlying MTD devices. It also implements handy functions for reading and
+ * writing UBI headers.
+ *
+ * We are trying to have a paranoid mindset and not to trust to what we read
+ * from the flash media in order to be more secure and robust. So this
+ * sub-system validates every single header it reads from the flash media.
+ *
+ * Some words about how the eraseblock headers are stored.
+ *
+ * The erase counter header is always stored at offset zero. By default, the
+ * VID header is stored after the EC header at the closest aligned offset
+ * (i.e. aligned to the minimum I/O unit size). Data starts next to the VID
+ * header at the closest aligned offset. But this default layout may be
+ * changed. For example, for different reasons (e.g., optimization) UBI may be
+ * asked to put the VID header at further offset, and even at an unaligned
+ * offset. Of course, if the offset of the VID header is unaligned, UBI adds
+ * proper padding in front of it. Data offset may also be changed but it has to
+ * be aligned.
+ *
+ * About minimal I/O units. In general, UBI assumes flash device model where
+ * there is only one minimal I/O unit size. E.g., in case of NOR flash it is 1,
+ * in case of NAND flash it is a NAND page, etc. This is reported by MTD in the
+ * @ubi->mtd->writesize field. But as an exception, UBI admits use of another
+ * (smaller) minimal I/O unit size for EC and VID headers to make it possible
+ * to do different optimizations.
+ *
+ * This is extremely useful in case of NAND flashes which admit of several
+ * write operations to one NAND page. In this case UBI can fit EC and VID
+ * headers at one NAND page. Thus, UBI may use "sub-page" size as the minimal
+ * I/O unit for the headers (the @ubi->hdrs_min_io_size field). But it still
+ * reports NAND page size (@ubi->min_io_size) as a minimal I/O unit for the UBI
+ * users.
+ *
+ * Example: some Samsung NANDs with 2KiB pages allow 4x 512-byte writes, so
+ * although the minimal I/O unit is 2K, UBI uses 512 bytes for EC and VID
+ * headers.
+ *
+ * Q: why not just to treat sub-page as a minimal I/O unit of this flash
+ * device, e.g., make @ubi->min_io_size = 512 in the example above?
+ *
+ * A: because when writing a sub-page, MTD still writes a full 2K page but the
+ * bytes which are not relevant to the sub-page are 0xFF. So, basically,
+ * writing 4x512 sub-pages is 4 times slower than writing one 2KiB NAND page.
+ * Thus, we prefer to use sub-pages only for EC and VID headers.
+ *
+ * As it was noted above, the VID header may start at a non-aligned offset.
+ * For example, in case of a 2KiB page NAND flash with a 512 bytes sub-page,
+ * the VID header may reside at offset 1984 which is the last 64 bytes of the
+ * last sub-page (EC header is always at offset zero). This causes some
+ * difficulties when reading and writing VID headers.
+ *
+ * Suppose we have a 64-byte buffer and we read a VID header at it. We change
+ * the data and want to write this VID header out. As we can only write in
+ * 512-byte chunks, we have to allocate one more buffer and copy our VID header
+ * to offset 448 of this buffer.
+ *
+ * The I/O sub-system does the following trick in order to avoid this extra
+ * copy. It always allocates a @ubi->vid_hdr_alsize bytes buffer for the VID
+ * header and returns a pointer to offset @ubi->vid_hdr_shift of this buffer.
+ * When the VID header is being written out, it shifts the VID header pointer
+ * back and writes the whole sub-page.
+ */
+
+#include <linux/crc32.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include "ubi.h"
+
+static int self_check_not_bad(const struct ubi_device *ubi, int pnum);
+static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
+static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_ec_hdr *ec_hdr);
+static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
+static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_vid_hdr *vid_hdr);
+static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
+ int offset, int len);
+
+/**
+ * ubi_io_read - read data from a physical eraseblock.
+ * @ubi: UBI device description object
+ * @buf: buffer where to store the read data
+ * @pnum: physical eraseblock number to read from
+ * @offset: offset within the physical eraseblock from where to read
+ * @len: how many bytes to read
+ *
+ * This function reads data from offset @offset of physical eraseblock @pnum
+ * and stores the read data in the @buf buffer. The following return codes are
+ * possible:
+ *
+ * o %0 if all the requested data were successfully read;
+ * o %UBI_IO_BITFLIPS if all the requested data were successfully read, but
+ * correctable bit-flips were detected; this is harmless but may indicate
+ * that this eraseblock may become bad soon (but do not have to);
+ * o %-EBADMSG if the MTD subsystem reported about data integrity problems, for
+ * example it can be an ECC error in case of NAND; this most probably means
+ * that the data is corrupted;
+ * o %-EIO if some I/O error occurred;
+ * o other negative error codes in case of other errors.
+ */
+int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
+ int len)
+{
+ int err, retries = 0;
+ size_t read;
+ loff_t addr;
+
+ dbg_io("read %d bytes from PEB %d:%d", len, pnum, offset);
+
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+ ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
+ ubi_assert(len > 0);
+
+ err = self_check_not_bad(ubi, pnum);
+ if (err)
+ return err;
+
+ /*
+ * Deliberately corrupt the buffer to improve robustness. Indeed, if we
+ * do not do this, the following may happen:
+ * 1. The buffer contains data from previous operation, e.g., read from
+ * another PEB previously. The data looks like expected, e.g., if we
+ * just do not read anything and return - the caller would not
+ * notice this. E.g., if we are reading a VID header, the buffer may
+ * contain a valid VID header from another PEB.
+ * 2. The driver is buggy and returns us success or -EBADMSG or
+ * -EUCLEAN, but it does not actually put any data to the buffer.
+ *
+ * This may confuse UBI or upper layers - they may think the buffer
+ * contains valid data while in fact it is just old data. This is
+ * especially possible because UBI (and UBIFS) relies on CRC, and
+ * treats data as correct even in case of ECC errors if the CRC is
+ * correct.
+ *
+ * Try to prevent this situation by changing the first byte of the
+ * buffer.
+ */
+ *((uint8_t *)buf) ^= 0xFF;
+
+ addr = (loff_t)pnum * ubi->peb_size + offset;
+retry:
+ err = mtd_read(ubi->mtd, addr, len, &read, buf);
+ if (err) {
+ const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : "";
+
+ if (mtd_is_bitflip(err)) {
+ /*
+ * -EUCLEAN is reported if there was a bit-flip which
+ * was corrected, so this is harmless.
+ *
+ * We do not report about it here unless debugging is
+ * enabled. A corresponding message will be printed
+ * later, when it is has been scrubbed.
+ */
+ ubi_msg(ubi, "fixable bit-flip detected at PEB %d",
+ pnum);
+ ubi_assert(len == read);
+ return UBI_IO_BITFLIPS;
+ }
+
+ if (retries++ < UBI_IO_RETRIES) {
+ ubi_warn(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
+ err, errstr, len, pnum, offset, read);
+ yield();
+ goto retry;
+ }
+
+ ubi_err(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
+ err, errstr, len, pnum, offset, read);
+ dump_stack();
+
+ /*
+ * The driver should never return -EBADMSG if it failed to read
+ * all the requested data. But some buggy drivers might do
+ * this, so we change it to -EIO.
+ */
+ if (read != len && mtd_is_eccerr(err)) {
+ ubi_assert(0);
+ err = -EIO;
+ }
+ } else {
+ ubi_assert(len == read);
+
+ if (ubi_dbg_is_bitflip(ubi)) {
+ dbg_gen("bit-flip (emulated)");
+ err = UBI_IO_BITFLIPS;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * ubi_io_write - write data to a physical eraseblock.
+ * @ubi: UBI device description object
+ * @buf: buffer with the data to write
+ * @pnum: physical eraseblock number to write to
+ * @offset: offset within the physical eraseblock where to write
+ * @len: how many bytes to write
+ *
+ * This function writes @len bytes of data from buffer @buf to offset @offset
+ * of physical eraseblock @pnum. If all the data were successfully written,
+ * zero is returned. If an error occurred, this function returns a negative
+ * error code. If %-EIO is returned, the physical eraseblock most probably went
+ * bad.
+ *
+ * Note, in case of an error, it is possible that something was still written
+ * to the flash media, but may be some garbage.
+ */
+int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
+ int len)
+{
+ int err;
+ size_t written;
+ loff_t addr;
+
+ dbg_io("write %d bytes to PEB %d:%d", len, pnum, offset);
+
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+ ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
+ ubi_assert(offset % ubi->hdrs_min_io_size == 0);
+ ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0);
+
+ if (ubi->ro_mode) {
+ ubi_err(ubi, "read-only mode");
+ return -EROFS;
+ }
+
+ err = self_check_not_bad(ubi, pnum);
+ if (err)
+ return err;
+
+ /* The area we are writing to has to contain all 0xFF bytes */
+ err = ubi_self_check_all_ff(ubi, pnum, offset, len);
+ if (err)
+ return err;
+
+ if (offset >= ubi->leb_start) {
+ /*
+ * We write to the data area of the physical eraseblock. Make
+ * sure it has valid EC and VID headers.
+ */
+ err = self_check_peb_ec_hdr(ubi, pnum);
+ if (err)
+ return err;
+ err = self_check_peb_vid_hdr(ubi, pnum);
+ if (err)
+ return err;
+ }
+
+ if (ubi_dbg_is_write_failure(ubi)) {
+ ubi_err(ubi, "cannot write %d bytes to PEB %d:%d (emulated)",
+ len, pnum, offset);
+ dump_stack();
+ return -EIO;
+ }
+
+ addr = (loff_t)pnum * ubi->peb_size + offset;
+ err = mtd_write(ubi->mtd, addr, len, &written, buf);
+ if (err) {
+ ubi_err(ubi, "error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
+ err, len, pnum, offset, written);
+ dump_stack();
+ ubi_dump_flash(ubi, pnum, offset, len);
+ } else
+ ubi_assert(written == len);
+
+ if (!err) {
+ err = self_check_write(ubi, buf, pnum, offset, len);
+ if (err)
+ return err;
+
+ /*
+ * Since we always write sequentially, the rest of the PEB has
+ * to contain only 0xFF bytes.
+ */
+ offset += len;
+ len = ubi->peb_size - offset;
+ if (len)
+ err = ubi_self_check_all_ff(ubi, pnum, offset, len);
+ }
+
+ return err;
+}
+
+/**
+ * do_sync_erase - synchronously erase a physical eraseblock.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to erase
+ *
+ * This function synchronously erases physical eraseblock @pnum and returns
+ * zero in case of success and a negative error code in case of failure. If
+ * %-EIO is returned, the physical eraseblock most probably went bad.
+ */
+static int do_sync_erase(struct ubi_device *ubi, int pnum)
+{
+ int err, retries = 0;
+ struct erase_info ei;
+
+ dbg_io("erase PEB %d", pnum);
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ if (ubi->ro_mode) {
+ ubi_err(ubi, "read-only mode");
+ return -EROFS;
+ }
+
+retry:
+ memset(&ei, 0, sizeof(struct erase_info));
+
+ ei.addr = (loff_t)pnum * ubi->peb_size;
+ ei.len = ubi->peb_size;
+
+ err = mtd_erase(ubi->mtd, &ei);
+ if (err) {
+ if (retries++ < UBI_IO_RETRIES) {
+ ubi_warn(ubi, "error %d while erasing PEB %d, retry",
+ err, pnum);
+ yield();
+ goto retry;
+ }
+ ubi_err(ubi, "cannot erase PEB %d, error %d", pnum, err);
+ dump_stack();
+ return err;
+ }
+
+ err = ubi_self_check_all_ff(ubi, pnum, 0, ubi->peb_size);
+ if (err)
+ return err;
+
+ if (ubi_dbg_is_erase_failure(ubi)) {
+ ubi_err(ubi, "cannot erase PEB %d (emulated)", pnum);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* Patterns to write to a physical eraseblock when torturing it */
+static uint8_t patterns[] = {0xa5, 0x5a, 0x0};
+
+/**
+ * torture_peb - test a supposedly bad physical eraseblock.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to test
+ *
+ * This function returns %-EIO if the physical eraseblock did not pass the
+ * test, a positive number of erase operations done if the test was
+ * successfully passed, and other negative error codes in case of other errors.
+ */
+static int torture_peb(struct ubi_device *ubi, int pnum)
+{
+ int err, i, patt_count;
+
+ ubi_msg(ubi, "run torture test for PEB %d", pnum);
+ patt_count = ARRAY_SIZE(patterns);
+ ubi_assert(patt_count > 0);
+
+ mutex_lock(&ubi->buf_mutex);
+ for (i = 0; i < patt_count; i++) {
+ err = do_sync_erase(ubi, pnum);
+ if (err)
+ goto out;
+
+ /* Make sure the PEB contains only 0xFF bytes */
+ err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
+ if (err)
+ goto out;
+
+ err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size);
+ if (err == 0) {
+ ubi_err(ubi, "erased PEB %d, but a non-0xFF byte found",
+ pnum);
+ err = -EIO;
+ goto out;
+ }
+
+ /* Write a pattern and check it */
+ memset(ubi->peb_buf, patterns[i], ubi->peb_size);
+ err = ubi_io_write(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
+ if (err)
+ goto out;
+
+ memset(ubi->peb_buf, ~patterns[i], ubi->peb_size);
+ err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
+ if (err)
+ goto out;
+
+ err = ubi_check_pattern(ubi->peb_buf, patterns[i],
+ ubi->peb_size);
+ if (err == 0) {
+ ubi_err(ubi, "pattern %x checking failed for PEB %d",
+ patterns[i], pnum);
+ err = -EIO;
+ goto out;
+ }
+ }
+
+ err = patt_count;
+ ubi_msg(ubi, "PEB %d passed torture test, do not mark it as bad", pnum);
+
+out:
+ mutex_unlock(&ubi->buf_mutex);
+ if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
+ /*
+ * If a bit-flip or data integrity error was detected, the test
+ * has not passed because it happened on a freshly erased
+ * physical eraseblock which means something is wrong with it.
+ */
+ ubi_err(ubi, "read problems on freshly erased PEB %d, must be bad",
+ pnum);
+ err = -EIO;
+ }
+ return err;
+}
+
+/**
+ * nor_erase_prepare - prepare a NOR flash PEB for erasure.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock number to prepare
+ *
+ * NOR flash, or at least some of them, have peculiar embedded PEB erasure
+ * algorithm: the PEB is first filled with zeroes, then it is erased. And
+ * filling with zeroes starts from the end of the PEB. This was observed with
+ * Spansion S29GL512N NOR flash.
+ *
+ * This means that in case of a power cut we may end up with intact data at the
+ * beginning of the PEB, and all zeroes at the end of PEB. In other words, the
+ * EC and VID headers are OK, but a large chunk of data at the end of PEB is
+ * zeroed. This makes UBI mistakenly treat this PEB as used and associate it
+ * with an LEB, which leads to subsequent failures (e.g., UBIFS fails).
+ *
+ * This function is called before erasing NOR PEBs and it zeroes out EC and VID
+ * magic numbers in order to invalidate them and prevent the failures. Returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
+{
+ int err;
+ size_t written;
+ loff_t addr;
+ uint32_t data = 0;
+ struct ubi_ec_hdr ec_hdr;
+ struct ubi_vid_io_buf vidb;
+
+ /*
+ * Note, we cannot generally define VID header buffers on stack,
+ * because of the way we deal with these buffers (see the header
+ * comment in this file). But we know this is a NOR-specific piece of
+ * code, so we can do this. But yes, this is error-prone and we should
+ * (pre-)allocate VID header buffer instead.
+ */
+ struct ubi_vid_hdr vid_hdr;
+
+ /*
+ * If VID or EC is valid, we have to corrupt them before erasing.
+ * It is important to first invalidate the EC header, and then the VID
+ * header. Otherwise a power cut may lead to valid EC header and
+ * invalid VID header, in which case UBI will treat this PEB as
+ * corrupted and will try to preserve it, and print scary warnings.
+ */
+ addr = (loff_t)pnum * ubi->peb_size;
+ err = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
+ if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
+ err != UBI_IO_FF){
+ err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
+ if(err)
+ goto error;
+ }
+
+ ubi_init_vid_buf(ubi, &vidb, &vid_hdr);
+ ubi_assert(&vid_hdr == ubi_get_vid_hdr(&vidb));
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, &vidb, 0);
+ if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
+ err != UBI_IO_FF){
+ addr += ubi->vid_hdr_aloffset;
+ err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
+ if (err)
+ goto error;
+ }
+ return 0;
+
+error:
+ /*
+ * The PEB contains a valid VID or EC header, but we cannot invalidate
+ * it. Supposedly the flash media or the driver is screwed up, so
+ * return an error.
+ */
+ ubi_err(ubi, "cannot invalidate PEB %d, write returned %d", pnum, err);
+ ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);
+ return -EIO;
+}
+
+/**
+ * ubi_io_sync_erase - synchronously erase a physical eraseblock.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock number to erase
+ * @torture: if this physical eraseblock has to be tortured
+ *
+ * This function synchronously erases physical eraseblock @pnum. If @torture
+ * flag is not zero, the physical eraseblock is checked by means of writing
+ * different patterns to it and reading them back. If the torturing is enabled,
+ * the physical eraseblock is erased more than once.
+ *
+ * This function returns the number of erasures made in case of success, %-EIO
+ * if the erasure failed or the torturing test failed, and other negative error
+ * codes in case of other errors. Note, %-EIO means that the physical
+ * eraseblock is bad.
+ */
+int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
+{
+ int err, ret = 0;
+
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ err = self_check_not_bad(ubi, pnum);
+ if (err != 0)
+ return err;
+
+ if (ubi->ro_mode) {
+ ubi_err(ubi, "read-only mode");
+ return -EROFS;
+ }
+
+ /*
+ * If the flash is ECC-ed then we have to erase the ECC block before we
+ * can write to it. But the write is in preparation to an erase in the
+ * first place. This means we cannot zero out EC and VID before the
+ * erase and we just have to hope the flash starts erasing from the
+ * start of the page.
+ */
+ if (ubi->nor_flash && ubi->mtd->writesize == 1) {
+ err = nor_erase_prepare(ubi, pnum);
+ if (err)
+ return err;
+ }
+
+ if (torture) {
+ ret = torture_peb(ubi, pnum);
+ if (ret < 0)
+ return ret;
+ }
+
+ err = do_sync_erase(ubi, pnum);
+ if (err)
+ return err;
+
+ return ret + 1;
+}
+
+/**
+ * ubi_io_is_bad - check if a physical eraseblock is bad.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to check
+ *
+ * This function returns a positive number if the physical eraseblock is bad,
+ * zero if not, and a negative error code if an error occurred.
+ */
+int ubi_io_is_bad(const struct ubi_device *ubi, int pnum)
+{
+ struct mtd_info *mtd = ubi->mtd;
+
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ if (ubi->bad_allowed) {
+ int ret;
+
+ ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
+ if (ret < 0)
+ ubi_err(ubi, "error %d while checking if PEB %d is bad",
+ ret, pnum);
+ else if (ret)
+ dbg_io("PEB %d is bad", pnum);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ubi_io_mark_bad - mark a physical eraseblock as bad.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to mark
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
+{
+ int err;
+ struct mtd_info *mtd = ubi->mtd;
+
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ if (ubi->ro_mode) {
+ ubi_err(ubi, "read-only mode");
+ return -EROFS;
+ }
+
+ if (!ubi->bad_allowed)
+ return 0;
+
+ err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
+ if (err)
+ ubi_err(ubi, "cannot mark PEB %d bad, error %d", pnum, err);
+ return err;
+}
+
+/**
+ * validate_ec_hdr - validate an erase counter header.
+ * @ubi: UBI device description object
+ * @ec_hdr: the erase counter header to check
+ *
+ * This function returns zero if the erase counter header is OK, and %1 if
+ * not.
+ */
+static int validate_ec_hdr(const struct ubi_device *ubi,
+ const struct ubi_ec_hdr *ec_hdr)
+{
+ long long ec;
+ int vid_hdr_offset, leb_start;
+
+ ec = be64_to_cpu(ec_hdr->ec);
+ vid_hdr_offset = be32_to_cpu(ec_hdr->vid_hdr_offset);
+ leb_start = be32_to_cpu(ec_hdr->data_offset);
+
+ if (ec_hdr->version != UBI_VERSION) {
+ ubi_err(ubi, "node with incompatible UBI version found: this UBI version is %d, image version is %d",
+ UBI_VERSION, (int)ec_hdr->version);
+ goto bad;
+ }
+
+ if (vid_hdr_offset != ubi->vid_hdr_offset) {
+ ubi_err(ubi, "bad VID header offset %d, expected %d",
+ vid_hdr_offset, ubi->vid_hdr_offset);
+ goto bad;
+ }
+
+ if (leb_start != ubi->leb_start) {
+ ubi_err(ubi, "bad data offset %d, expected %d",
+ leb_start, ubi->leb_start);
+ goto bad;
+ }
+
+ if (ec < 0 || ec > UBI_MAX_ERASECOUNTER) {
+ ubi_err(ubi, "bad erase counter %lld", ec);
+ goto bad;
+ }
+
+ return 0;
+
+bad:
+ ubi_err(ubi, "bad EC header");
+ ubi_dump_ec_hdr(ec_hdr);
+ dump_stack();
+ return 1;
+}
+
+/**
+ * ubi_io_read_ec_hdr - read and check an erase counter header.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock to read from
+ * @ec_hdr: a &struct ubi_ec_hdr object where to store the read erase counter
+ * header
+ * @verbose: be verbose if the header is corrupted or was not found
+ *
+ * This function reads erase counter header from physical eraseblock @pnum and
+ * stores it in @ec_hdr. This function also checks CRC checksum of the read
+ * erase counter header. The following codes may be returned:
+ *
+ * o %0 if the CRC checksum is correct and the header was successfully read;
+ * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected
+ * and corrected by the flash driver; this is harmless but may indicate that
+ * this eraseblock may become bad soon (but may be not);
+ * o %UBI_IO_BAD_HDR if the erase counter header is corrupted (a CRC error);
+ * o %UBI_IO_BAD_HDR_EBADMSG is the same as %UBI_IO_BAD_HDR, but there also was
+ * a data integrity error (uncorrectable ECC error in case of NAND);
+ * o %UBI_IO_FF if only 0xFF bytes were read (the PEB is supposedly empty)
+ * o a negative error code in case of failure.
+ */
+int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_ec_hdr *ec_hdr, int verbose)
+{
+ int err, read_err;
+ uint32_t crc, magic, hdr_crc;
+
+ dbg_io("read EC header from PEB %d", pnum);
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
+ if (read_err) {
+ if (read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
+ return read_err;
+
+ /*
+ * We read all the data, but either a correctable bit-flip
+ * occurred, or MTD reported a data integrity error
+ * (uncorrectable ECC error in case of NAND). The former is
+ * harmless, the later may mean that the read data is
+ * corrupted. But we have a CRC check-sum and we will detect
+ * this. If the EC header is still OK, we just report this as
+ * there was a bit-flip, to force scrubbing.
+ */
+ }
+
+ magic = be32_to_cpu(ec_hdr->magic);
+ if (magic != UBI_EC_HDR_MAGIC) {
+ if (mtd_is_eccerr(read_err))
+ return UBI_IO_BAD_HDR_EBADMSG;
+
+ /*
+ * The magic field is wrong. Let's check if we have read all
+ * 0xFF. If yes, this physical eraseblock is assumed to be
+ * empty.
+ */
+ if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
+ /* The physical eraseblock is supposedly empty */
+ if (verbose)
+ ubi_warn(ubi, "no EC header found at PEB %d, only 0xFF bytes",
+ pnum);
+ dbg_bld("no EC header found at PEB %d, only 0xFF bytes",
+ pnum);
+ if (!read_err)
+ return UBI_IO_FF;
+ else
+ return UBI_IO_FF_BITFLIPS;
+ }
+
+ /*
+ * This is not a valid erase counter header, and these are not
+ * 0xFF bytes. Report that the header is corrupted.
+ */
+ if (verbose) {
+ ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_EC_HDR_MAGIC);
+ ubi_dump_ec_hdr(ec_hdr);
+ }
+ dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_EC_HDR_MAGIC);
+ return UBI_IO_BAD_HDR;
+ }
+
+ crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
+ hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
+
+ if (hdr_crc != crc) {
+ if (verbose) {
+ ubi_warn(ubi, "bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+ ubi_dump_ec_hdr(ec_hdr);
+ }
+ dbg_bld("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+
+ if (!read_err)
+ return UBI_IO_BAD_HDR;
+ else
+ return UBI_IO_BAD_HDR_EBADMSG;
+ }
+
+ /* And of course validate what has just been read from the media */
+ err = validate_ec_hdr(ubi, ec_hdr);
+ if (err) {
+ ubi_err(ubi, "validation failed for PEB %d", pnum);
+ return -EINVAL;
+ }
+
+ /*
+ * If there was %-EBADMSG, but the header CRC is still OK, report about
+ * a bit-flip to force scrubbing on this PEB.
+ */
+ return read_err ? UBI_IO_BITFLIPS : 0;
+}
+
+/**
+ * ubi_io_write_ec_hdr - write an erase counter header.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock to write to
+ * @ec_hdr: the erase counter header to write
+ *
+ * This function writes erase counter header described by @ec_hdr to physical
+ * eraseblock @pnum. It also fills most fields of @ec_hdr before writing, so
+ * the caller do not have to fill them. Callers must only fill the @ec_hdr->ec
+ * field.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure. If %-EIO is returned, the physical eraseblock most probably
+ * went bad.
+ */
+int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_ec_hdr *ec_hdr)
+{
+ int err;
+ uint32_t crc;
+
+ dbg_io("write EC header to PEB %d", pnum);
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ ec_hdr->magic = cpu_to_be32(UBI_EC_HDR_MAGIC);
+ ec_hdr->version = UBI_VERSION;
+ ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset);
+ ec_hdr->data_offset = cpu_to_be32(ubi->leb_start);
+ ec_hdr->image_seq = cpu_to_be32(ubi->image_seq);
+ crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
+ ec_hdr->hdr_crc = cpu_to_be32(crc);
+
+ err = self_check_ec_hdr(ubi, pnum, ec_hdr);
+ if (err)
+ return err;
+
+ if (ubi_dbg_power_cut(ubi, POWER_CUT_EC_WRITE))
+ return -EROFS;
+
+ err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize);
+ return err;
+}
+
+/**
+ * validate_vid_hdr - validate a volume identifier header.
+ * @ubi: UBI device description object
+ * @vid_hdr: the volume identifier header to check
+ *
+ * This function checks that data stored in the volume identifier header
+ * @vid_hdr. Returns zero if the VID header is OK and %1 if not.
+ */
+static int validate_vid_hdr(const struct ubi_device *ubi,
+ const struct ubi_vid_hdr *vid_hdr)
+{
+ int vol_type = vid_hdr->vol_type;
+ int copy_flag = vid_hdr->copy_flag;
+ int vol_id = be32_to_cpu(vid_hdr->vol_id);
+ int lnum = be32_to_cpu(vid_hdr->lnum);
+ int compat = vid_hdr->compat;
+ int data_size = be32_to_cpu(vid_hdr->data_size);
+ int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+ int data_pad = be32_to_cpu(vid_hdr->data_pad);
+ int data_crc = be32_to_cpu(vid_hdr->data_crc);
+ int usable_leb_size = ubi->leb_size - data_pad;
+
+ if (copy_flag != 0 && copy_flag != 1) {
+ ubi_err(ubi, "bad copy_flag");
+ goto bad;
+ }
+
+ if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 ||
+ data_pad < 0) {
+ ubi_err(ubi, "negative values");
+ goto bad;
+ }
+
+ if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) {
+ ubi_err(ubi, "bad vol_id");
+ goto bad;
+ }
+
+ if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) {
+ ubi_err(ubi, "bad compat");
+ goto bad;
+ }
+
+ if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE &&
+ compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE &&
+ compat != UBI_COMPAT_REJECT) {
+ ubi_err(ubi, "bad compat");
+ goto bad;
+ }
+
+ if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
+ ubi_err(ubi, "bad vol_type");
+ goto bad;
+ }
+
+ if (data_pad >= ubi->leb_size / 2) {
+ ubi_err(ubi, "bad data_pad");
+ goto bad;
+ }
+
+ if (data_size > ubi->leb_size) {
+ ubi_err(ubi, "bad data_size");
+ goto bad;
+ }
+
+ if (vol_type == UBI_VID_STATIC) {
+ /*
+ * Although from high-level point of view static volumes may
+ * contain zero bytes of data, but no VID headers can contain
+ * zero at these fields, because they empty volumes do not have
+ * mapped logical eraseblocks.
+ */
+ if (used_ebs == 0) {
+ ubi_err(ubi, "zero used_ebs");
+ goto bad;
+ }
+ if (data_size == 0) {
+ ubi_err(ubi, "zero data_size");
+ goto bad;
+ }
+ if (lnum < used_ebs - 1) {
+ if (data_size != usable_leb_size) {
+ ubi_err(ubi, "bad data_size");
+ goto bad;
+ }
+ } else if (lnum > used_ebs - 1) {
+ ubi_err(ubi, "too high lnum");
+ goto bad;
+ }
+ } else {
+ if (copy_flag == 0) {
+ if (data_crc != 0) {
+ ubi_err(ubi, "non-zero data CRC");
+ goto bad;
+ }
+ if (data_size != 0) {
+ ubi_err(ubi, "non-zero data_size");
+ goto bad;
+ }
+ } else {
+ if (data_size == 0) {
+ ubi_err(ubi, "zero data_size of copy");
+ goto bad;
+ }
+ }
+ if (used_ebs != 0) {
+ ubi_err(ubi, "bad used_ebs");
+ goto bad;
+ }
+ }
+
+ return 0;
+
+bad:
+ ubi_err(ubi, "bad VID header");
+ ubi_dump_vid_hdr(vid_hdr);
+ dump_stack();
+ return 1;
+}
+
+/**
+ * ubi_io_read_vid_hdr - read and check a volume identifier header.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock number to read from
+ * @vidb: the volume identifier buffer to store data in
+ * @verbose: be verbose if the header is corrupted or wasn't found
+ *
+ * This function reads the volume identifier header from physical eraseblock
+ * @pnum and stores it in @vidb. It also checks CRC checksum of the read
+ * volume identifier header. The error codes are the same as in
+ * 'ubi_io_read_ec_hdr()'.
+ *
+ * Note, the implementation of this function is also very similar to
+ * 'ubi_io_read_ec_hdr()', so refer commentaries in 'ubi_io_read_ec_hdr()'.
+ */
+int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_vid_io_buf *vidb, int verbose)
+{
+ int err, read_err;
+ uint32_t crc, magic, hdr_crc;
+ struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb);
+ void *p = vidb->buffer;
+
+ dbg_io("read VID header from PEB %d", pnum);
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
+ ubi->vid_hdr_shift + UBI_VID_HDR_SIZE);
+ if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
+ return read_err;
+
+ magic = be32_to_cpu(vid_hdr->magic);
+ if (magic != UBI_VID_HDR_MAGIC) {
+ if (mtd_is_eccerr(read_err))
+ return UBI_IO_BAD_HDR_EBADMSG;
+
+ if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
+ if (verbose)
+ ubi_warn(ubi, "no VID header found at PEB %d, only 0xFF bytes",
+ pnum);
+ dbg_bld("no VID header found at PEB %d, only 0xFF bytes",
+ pnum);
+ if (!read_err)
+ return UBI_IO_FF;
+ else
+ return UBI_IO_FF_BITFLIPS;
+ }
+
+ if (verbose) {
+ ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_VID_HDR_MAGIC);
+ ubi_dump_vid_hdr(vid_hdr);
+ }
+ dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_VID_HDR_MAGIC);
+ return UBI_IO_BAD_HDR;
+ }
+
+ crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
+ hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
+
+ if (hdr_crc != crc) {
+ if (verbose) {
+ ubi_warn(ubi, "bad CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+ ubi_dump_vid_hdr(vid_hdr);
+ }
+ dbg_bld("bad CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+ if (!read_err)
+ return UBI_IO_BAD_HDR;
+ else
+ return UBI_IO_BAD_HDR_EBADMSG;
+ }
+
+ err = validate_vid_hdr(ubi, vid_hdr);
+ if (err) {
+ ubi_err(ubi, "validation failed for PEB %d", pnum);
+ return -EINVAL;
+ }
+
+ return read_err ? UBI_IO_BITFLIPS : 0;
+}
+
+/**
+ * ubi_io_write_vid_hdr - write a volume identifier header.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to write to
+ * @vidb: the volume identifier buffer to write
+ *
+ * This function writes the volume identifier header described by @vid_hdr to
+ * physical eraseblock @pnum. This function automatically fills the
+ * @vidb->hdr->magic and the @vidb->hdr->version fields, as well as calculates
+ * header CRC checksum and stores it at vidb->hdr->hdr_crc.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure. If %-EIO is returned, the physical eraseblock probably went
+ * bad.
+ */
+int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_vid_io_buf *vidb)
+{
+ struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb);
+ int err;
+ uint32_t crc;
+ void *p = vidb->buffer;
+
+ dbg_io("write VID header to PEB %d", pnum);
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ err = self_check_peb_ec_hdr(ubi, pnum);
+ if (err)
+ return err;
+
+ vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC);
+ vid_hdr->version = UBI_VERSION;
+ crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
+ vid_hdr->hdr_crc = cpu_to_be32(crc);
+
+ err = self_check_vid_hdr(ubi, pnum, vid_hdr);
+ if (err)
+ return err;
+
+ if (ubi_dbg_power_cut(ubi, POWER_CUT_VID_WRITE))
+ return -EROFS;
+
+ err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
+ ubi->vid_hdr_alsize);
+ return err;
+}
+
+/**
+ * self_check_not_bad - ensure that a physical eraseblock is not bad.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock number to check
+ *
+ * This function returns zero if the physical eraseblock is good, %-EINVAL if
+ * it is bad and a negative error code if an error occurred.
+ */
+static int self_check_not_bad(const struct ubi_device *ubi, int pnum)
+{
+ int err;
+
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
+ err = ubi_io_is_bad(ubi, pnum);
+ if (!err)
+ return err;
+
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
+ dump_stack();
+ return err > 0 ? -EINVAL : err;
+}
+
+/**
+ * self_check_ec_hdr - check if an erase counter header is all right.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock number the erase counter header belongs to
+ * @ec_hdr: the erase counter header to check
+ *
+ * This function returns zero if the erase counter header contains valid
+ * values, and %-EINVAL if not.
+ */
+static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_ec_hdr *ec_hdr)
+{
+ int err;
+ uint32_t magic;
+
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
+ magic = be32_to_cpu(ec_hdr->magic);
+ if (magic != UBI_EC_HDR_MAGIC) {
+ ubi_err(ubi, "bad magic %#08x, must be %#08x",
+ magic, UBI_EC_HDR_MAGIC);
+ goto fail;
+ }
+
+ err = validate_ec_hdr(ubi, ec_hdr);
+ if (err) {
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ ubi_dump_ec_hdr(ec_hdr);
+ dump_stack();
+ return -EINVAL;
+}
+
+/**
+ * self_check_peb_ec_hdr - check erase counter header.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to check
+ *
+ * This function returns zero if the erase counter header is all right and
+ * a negative error code if not or if an error occurred.
+ */
+static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
+{
+ int err;
+ uint32_t crc, hdr_crc;
+ struct ubi_ec_hdr *ec_hdr;
+
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
+ if (!ec_hdr)
+ return -ENOMEM;
+
+ err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
+ if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
+ goto exit;
+
+ crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
+ hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
+ if (hdr_crc != crc) {
+ ubi_err(ubi, "bad CRC, calculated %#08x, read %#08x",
+ crc, hdr_crc);
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
+ ubi_dump_ec_hdr(ec_hdr);
+ dump_stack();
+ err = -EINVAL;
+ goto exit;
+ }
+
+ err = self_check_ec_hdr(ubi, pnum, ec_hdr);
+
+exit:
+ kfree(ec_hdr);
+ return err;
+}
+
+/**
+ * self_check_vid_hdr - check that a volume identifier header is all right.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock number the volume identifier header belongs to
+ * @vid_hdr: the volume identifier header to check
+ *
+ * This function returns zero if the volume identifier header is all right, and
+ * %-EINVAL if not.
+ */
+static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_vid_hdr *vid_hdr)
+{
+ int err;
+ uint32_t magic;
+
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
+ magic = be32_to_cpu(vid_hdr->magic);
+ if (magic != UBI_VID_HDR_MAGIC) {
+ ubi_err(ubi, "bad VID header magic %#08x at PEB %d, must be %#08x",
+ magic, pnum, UBI_VID_HDR_MAGIC);
+ goto fail;
+ }
+
+ err = validate_vid_hdr(ubi, vid_hdr);
+ if (err) {
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
+ goto fail;
+ }
+
+ return err;
+
+fail:
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
+ ubi_dump_vid_hdr(vid_hdr);
+ dump_stack();
+ return -EINVAL;
+
+}
+
+/**
+ * self_check_peb_vid_hdr - check volume identifier header.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to check
+ *
+ * This function returns zero if the volume identifier header is all right,
+ * and a negative error code if not or if an error occurred.
+ */
+static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
+{
+ int err;
+ uint32_t crc, hdr_crc;
+ struct ubi_vid_io_buf *vidb;
+ struct ubi_vid_hdr *vid_hdr;
+ void *p;
+
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
+ vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
+ if (!vidb)
+ return -ENOMEM;
+
+ vid_hdr = ubi_get_vid_hdr(vidb);
+ p = vidb->buffer;
+ err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
+ ubi->vid_hdr_alsize);
+ if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
+ goto exit;
+
+ crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
+ hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
+ if (hdr_crc != crc) {
+ ubi_err(ubi, "bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
+ ubi_dump_vid_hdr(vid_hdr);
+ dump_stack();
+ err = -EINVAL;
+ goto exit;
+ }
+
+ err = self_check_vid_hdr(ubi, pnum, vid_hdr);
+
+exit:
+ ubi_free_vid_buf(vidb);
+ return err;
+}
+
+/**
+ * self_check_write - make sure write succeeded.
+ * @ubi: UBI device description object
+ * @buf: buffer with data which were written
+ * @pnum: physical eraseblock number the data were written to
+ * @offset: offset within the physical eraseblock the data were written to
+ * @len: how many bytes were written
+ *
+ * This functions reads data which were recently written and compares it with
+ * the original data buffer - the data have to match. Returns zero if the data
+ * match and a negative error code if not or in case of failure.
+ */
+static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
+ int offset, int len)
+{
+ int err, i;
+ size_t read;
+ void *buf1;
+ loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
+
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
+ buf1 = __vmalloc(len, GFP_NOFS);
+ if (!buf1) {
+ ubi_err(ubi, "cannot allocate memory to check writes");
+ return 0;
+ }
+
+ err = mtd_read(ubi->mtd, addr, len, &read, buf1);
+ if (err && !mtd_is_bitflip(err))
+ goto out_free;
+
+ for (i = 0; i < len; i++) {
+ uint8_t c = ((uint8_t *)buf)[i];
+ uint8_t c1 = ((uint8_t *)buf1)[i];
+ int dump_len;
+
+ if (c == c1)
+ continue;
+
+ ubi_err(ubi, "self-check failed for PEB %d:%d, len %d",
+ pnum, offset, len);
+ ubi_msg(ubi, "data differ at position %d", i);
+ dump_len = max_t(int, 128, len - i);
+ ubi_msg(ubi, "hex dump of the original buffer from %d to %d",
+ i, i + dump_len);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
+ buf + i, dump_len, 1);
+ ubi_msg(ubi, "hex dump of the read buffer from %d to %d",
+ i, i + dump_len);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
+ buf1 + i, dump_len, 1);
+ dump_stack();
+ err = -EINVAL;
+ goto out_free;
+ }
+
+ vfree(buf1);
+ return 0;
+
+out_free:
+ vfree(buf1);
+ return err;
+}
+
+/**
+ * ubi_self_check_all_ff - check that a region of flash is empty.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to check
+ * @offset: the starting offset within the physical eraseblock to check
+ * @len: the length of the region to check
+ *
+ * This function returns zero if only 0xFF bytes are present at offset
+ * @offset of the physical eraseblock @pnum, and a negative error code if not
+ * or if an error occurred.
+ */
+int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
+{
+ size_t read;
+ int err;
+ void *buf;
+ loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
+
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
+ buf = __vmalloc(len, GFP_NOFS);
+ if (!buf) {
+ ubi_err(ubi, "cannot allocate memory to check for 0xFFs");
+ return 0;
+ }
+
+ err = mtd_read(ubi->mtd, addr, len, &read, buf);
+ if (err && !mtd_is_bitflip(err)) {
+ ubi_err(ubi, "err %d while reading %d bytes from PEB %d:%d, read %zd bytes",
+ err, len, pnum, offset, read);
+ goto error;
+ }
+
+ err = ubi_check_pattern(buf, 0xFF, len);
+ if (err == 0) {
+ ubi_err(ubi, "flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
+ pnum, offset, len);
+ goto fail;
+ }
+
+ vfree(buf);
+ return 0;
+
+fail:
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
+ ubi_msg(ubi, "hex dump of the %d-%d region", offset, offset + len);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
+ err = -EINVAL;
+error:
+ dump_stack();
+ vfree(buf);
+ return err;
+}
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
new file mode 100644
index 0000000000..5db653eacb
--- /dev/null
+++ b/drivers/mtd/ubi/kapi.c
@@ -0,0 +1,855 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/* This file mostly implements UBI kernel API functions */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/namei.h>
+#include <linux/fs.h>
+#include <asm/div64.h>
+#include "ubi.h"
+
+/**
+ * ubi_do_get_device_info - get information about UBI device.
+ * @ubi: UBI device description object
+ * @di: the information is stored here
+ *
+ * This function is the same as 'ubi_get_device_info()', but it assumes the UBI
+ * device is locked and cannot disappear.
+ */
+void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di)
+{
+ di->ubi_num = ubi->ubi_num;
+ di->leb_size = ubi->leb_size;
+ di->leb_start = ubi->leb_start;
+ di->min_io_size = ubi->min_io_size;
+ di->max_write_size = ubi->max_write_size;
+ di->ro_mode = ubi->ro_mode;
+ di->cdev = ubi->cdev.dev;
+}
+EXPORT_SYMBOL_GPL(ubi_do_get_device_info);
+
+/**
+ * ubi_get_device_info - get information about UBI device.
+ * @ubi_num: UBI device number
+ * @di: the information is stored here
+ *
+ * This function returns %0 in case of success, %-EINVAL if the UBI device
+ * number is invalid, and %-ENODEV if there is no such UBI device.
+ */
+int ubi_get_device_info(int ubi_num, struct ubi_device_info *di)
+{
+ struct ubi_device *ubi;
+
+ if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+ return -EINVAL;
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -ENODEV;
+ ubi_do_get_device_info(ubi, di);
+ ubi_put_device(ubi);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ubi_get_device_info);
+
+/**
+ * ubi_do_get_volume_info - get information about UBI volume.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @vi: the information is stored here
+ */
+void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
+ struct ubi_volume_info *vi)
+{
+ vi->vol_id = vol->vol_id;
+ vi->ubi_num = ubi->ubi_num;
+ vi->size = vol->reserved_pebs;
+ vi->used_bytes = vol->used_bytes;
+ vi->vol_type = vol->vol_type;
+ vi->corrupted = vol->corrupted;
+ vi->upd_marker = vol->upd_marker;
+ vi->alignment = vol->alignment;
+ vi->usable_leb_size = vol->usable_leb_size;
+ vi->name_len = vol->name_len;
+ vi->name = vol->name;
+ vi->cdev = vol->cdev.dev;
+ vi->dev = &vol->dev;
+}
+
+/**
+ * ubi_get_volume_info - get information about UBI volume.
+ * @desc: volume descriptor
+ * @vi: the information is stored here
+ */
+void ubi_get_volume_info(struct ubi_volume_desc *desc,
+ struct ubi_volume_info *vi)
+{
+ ubi_do_get_volume_info(desc->vol->ubi, desc->vol, vi);
+}
+EXPORT_SYMBOL_GPL(ubi_get_volume_info);
+
+/**
+ * ubi_open_volume - open UBI volume.
+ * @ubi_num: UBI device number
+ * @vol_id: volume ID
+ * @mode: open mode
+ *
+ * The @mode parameter specifies if the volume should be opened in read-only
+ * mode, read-write mode, or exclusive mode. The exclusive mode guarantees that
+ * nobody else will be able to open this volume. UBI allows to have many volume
+ * readers and one writer at a time.
+ *
+ * If a static volume is being opened for the first time since boot, it will be
+ * checked by this function, which means it will be fully read and the CRC
+ * checksum of each logical eraseblock will be checked.
+ *
+ * This function returns volume descriptor in case of success and a negative
+ * error code in case of failure.
+ */
+struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
+{
+ int err;
+ struct ubi_volume_desc *desc;
+ struct ubi_device *ubi;
+ struct ubi_volume *vol;
+
+ dbg_gen("open device %d, volume %d, mode %d", ubi_num, vol_id, mode);
+
+ if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+ return ERR_PTR(-EINVAL);
+
+ if (mode != UBI_READONLY && mode != UBI_READWRITE &&
+ mode != UBI_EXCLUSIVE && mode != UBI_METAONLY)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * First of all, we have to get the UBI device to prevent its removal.
+ */
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return ERR_PTR(-ENODEV);
+
+ if (vol_id < 0 || vol_id >= ubi->vtbl_slots) {
+ err = -EINVAL;
+ goto out_put_ubi;
+ }
+
+ desc = kmalloc(sizeof(struct ubi_volume_desc), GFP_KERNEL);
+ if (!desc) {
+ err = -ENOMEM;
+ goto out_put_ubi;
+ }
+
+ err = -ENODEV;
+ if (!try_module_get(THIS_MODULE))
+ goto out_free;
+
+ spin_lock(&ubi->volumes_lock);
+ vol = ubi->volumes[vol_id];
+ if (!vol)
+ goto out_unlock;
+
+ err = -EBUSY;
+ switch (mode) {
+ case UBI_READONLY:
+ if (vol->exclusive)
+ goto out_unlock;
+ vol->readers += 1;
+ break;
+
+ case UBI_READWRITE:
+ if (vol->exclusive || vol->writers > 0)
+ goto out_unlock;
+ vol->writers += 1;
+ break;
+
+ case UBI_EXCLUSIVE:
+ if (vol->exclusive || vol->writers || vol->readers ||
+ vol->metaonly)
+ goto out_unlock;
+ vol->exclusive = 1;
+ break;
+
+ case UBI_METAONLY:
+ if (vol->metaonly || vol->exclusive)
+ goto out_unlock;
+ vol->metaonly = 1;
+ break;
+ }
+ get_device(&vol->dev);
+ vol->ref_count += 1;
+ spin_unlock(&ubi->volumes_lock);
+
+ desc->vol = vol;
+ desc->mode = mode;
+
+ mutex_lock(&ubi->ckvol_mutex);
+ if (!vol->checked && !vol->skip_check) {
+ /* This is the first open - check the volume */
+ err = ubi_check_volume(ubi, vol_id);
+ if (err < 0) {
+ mutex_unlock(&ubi->ckvol_mutex);
+ ubi_close_volume(desc);
+ return ERR_PTR(err);
+ }
+ if (err == 1) {
+ ubi_warn(ubi, "volume %d on UBI device %d is corrupted",
+ vol_id, ubi->ubi_num);
+ vol->corrupted = 1;
+ }
+ vol->checked = 1;
+ }
+ mutex_unlock(&ubi->ckvol_mutex);
+
+ return desc;
+
+out_unlock:
+ spin_unlock(&ubi->volumes_lock);
+ module_put(THIS_MODULE);
+out_free:
+ kfree(desc);
+out_put_ubi:
+ ubi_err(ubi, "cannot open device %d, volume %d, error %d",
+ ubi_num, vol_id, err);
+ ubi_put_device(ubi);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(ubi_open_volume);
+
+/**
+ * ubi_open_volume_nm - open UBI volume by name.
+ * @ubi_num: UBI device number
+ * @name: volume name
+ * @mode: open mode
+ *
+ * This function is similar to 'ubi_open_volume()', but opens a volume by name.
+ */
+struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
+ int mode)
+{
+ int i, vol_id = -1, len;
+ struct ubi_device *ubi;
+ struct ubi_volume_desc *ret;
+
+ dbg_gen("open device %d, volume %s, mode %d", ubi_num, name, mode);
+
+ if (!name)
+ return ERR_PTR(-EINVAL);
+
+ len = strnlen(name, UBI_VOL_NAME_MAX + 1);
+ if (len > UBI_VOL_NAME_MAX)
+ return ERR_PTR(-EINVAL);
+
+ if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+ return ERR_PTR(-EINVAL);
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return ERR_PTR(-ENODEV);
+
+ spin_lock(&ubi->volumes_lock);
+ /* Walk all volumes of this UBI device */
+ for (i = 0; i < ubi->vtbl_slots; i++) {
+ struct ubi_volume *vol = ubi->volumes[i];
+
+ if (vol && len == vol->name_len && !strcmp(name, vol->name)) {
+ vol_id = i;
+ break;
+ }
+ }
+ spin_unlock(&ubi->volumes_lock);
+
+ if (vol_id >= 0)
+ ret = ubi_open_volume(ubi_num, vol_id, mode);
+ else
+ ret = ERR_PTR(-ENODEV);
+
+ /*
+ * We should put the UBI device even in case of success, because
+ * 'ubi_open_volume()' took a reference as well.
+ */
+ ubi_put_device(ubi);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
+
+/**
+ * ubi_open_volume_path - open UBI volume by its character device node path.
+ * @pathname: volume character device node path
+ * @mode: open mode
+ *
+ * This function is similar to 'ubi_open_volume()', but opens a volume the path
+ * to its character device node.
+ */
+struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
+{
+ int error, ubi_num, vol_id;
+ struct path path;
+ struct kstat stat;
+
+ dbg_gen("open volume %s, mode %d", pathname, mode);
+
+ if (!pathname || !*pathname)
+ return ERR_PTR(-EINVAL);
+
+ error = kern_path(pathname, LOOKUP_FOLLOW, &path);
+ if (error)
+ return ERR_PTR(error);
+
+ error = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
+ path_put(&path);
+ if (error)
+ return ERR_PTR(error);
+
+ if (!S_ISCHR(stat.mode))
+ return ERR_PTR(-EINVAL);
+
+ ubi_num = ubi_major2num(MAJOR(stat.rdev));
+ vol_id = MINOR(stat.rdev) - 1;
+
+ if (vol_id >= 0 && ubi_num >= 0)
+ return ubi_open_volume(ubi_num, vol_id, mode);
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(ubi_open_volume_path);
+
+/**
+ * ubi_close_volume - close UBI volume.
+ * @desc: volume descriptor
+ */
+void ubi_close_volume(struct ubi_volume_desc *desc)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+
+ dbg_gen("close device %d, volume %d, mode %d",
+ ubi->ubi_num, vol->vol_id, desc->mode);
+
+ spin_lock(&ubi->volumes_lock);
+ switch (desc->mode) {
+ case UBI_READONLY:
+ vol->readers -= 1;
+ break;
+ case UBI_READWRITE:
+ vol->writers -= 1;
+ break;
+ case UBI_EXCLUSIVE:
+ vol->exclusive = 0;
+ break;
+ case UBI_METAONLY:
+ vol->metaonly = 0;
+ break;
+ }
+ vol->ref_count -= 1;
+ spin_unlock(&ubi->volumes_lock);
+
+ kfree(desc);
+ put_device(&vol->dev);
+ ubi_put_device(ubi);
+ module_put(THIS_MODULE);
+}
+EXPORT_SYMBOL_GPL(ubi_close_volume);
+
+/**
+ * leb_read_sanity_check - does sanity checks on read requests.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number to read from
+ * @offset: offset within the logical eraseblock to read from
+ * @len: how many bytes to read
+ *
+ * This function is used by ubi_leb_read() and ubi_leb_read_sg()
+ * to perform sanity checks.
+ */
+static int leb_read_sanity_check(struct ubi_volume_desc *desc, int lnum,
+ int offset, int len)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int vol_id = vol->vol_id;
+
+ if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 ||
+ lnum >= vol->used_ebs || offset < 0 || len < 0 ||
+ offset + len > vol->usable_leb_size)
+ return -EINVAL;
+
+ if (vol->vol_type == UBI_STATIC_VOLUME) {
+ if (vol->used_ebs == 0)
+ /* Empty static UBI volume */
+ return 0;
+ if (lnum == vol->used_ebs - 1 &&
+ offset + len > vol->last_eb_bytes)
+ return -EINVAL;
+ }
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ return 0;
+}
+
+/**
+ * ubi_leb_read - read data.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number to read from
+ * @buf: buffer where to store the read data
+ * @offset: offset within the logical eraseblock to read from
+ * @len: how many bytes to read
+ * @check: whether UBI has to check the read data's CRC or not.
+ *
+ * This function reads data from offset @offset of logical eraseblock @lnum and
+ * stores the data at @buf. When reading from static volumes, @check specifies
+ * whether the data has to be checked or not. If yes, the whole logical
+ * eraseblock will be read and its CRC checksum will be checked (i.e., the CRC
+ * checksum is per-eraseblock). So checking may substantially slow down the
+ * read speed. The @check argument is ignored for dynamic volumes.
+ *
+ * In case of success, this function returns zero. In case of failure, this
+ * function returns a negative error code.
+ *
+ * %-EBADMSG error code is returned:
+ * o for both static and dynamic volumes if MTD driver has detected a data
+ * integrity problem (unrecoverable ECC checksum mismatch in case of NAND);
+ * o for static volumes in case of data CRC mismatch.
+ *
+ * If the volume is damaged because of an interrupted update this function just
+ * returns immediately with %-EBADF error code.
+ */
+int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
+ int len, int check)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int err, vol_id = vol->vol_id;
+
+ dbg_gen("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset);
+
+ err = leb_read_sanity_check(desc, lnum, offset, len);
+ if (err < 0)
+ return err;
+
+ if (len == 0)
+ return 0;
+
+ err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check);
+ if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) {
+ ubi_warn(ubi, "mark volume %d as corrupted", vol_id);
+ vol->corrupted = 1;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ubi_leb_read);
+
+
+/**
+ * ubi_leb_read_sg - read data into a scatter gather list.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number to read from
+ * @sgl: UBI scatter gather list to store the read data
+ * @offset: offset within the logical eraseblock to read from
+ * @len: how many bytes to read
+ * @check: whether UBI has to check the read data's CRC or not.
+ *
+ * This function works exactly like ubi_leb_read_sg(). But instead of
+ * storing the read data into a buffer it writes to an UBI scatter gather
+ * list.
+ */
+int ubi_leb_read_sg(struct ubi_volume_desc *desc, int lnum, struct ubi_sgl *sgl,
+ int offset, int len, int check)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int err, vol_id = vol->vol_id;
+
+ dbg_gen("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset);
+
+ err = leb_read_sanity_check(desc, lnum, offset, len);
+ if (err < 0)
+ return err;
+
+ if (len == 0)
+ return 0;
+
+ err = ubi_eba_read_leb_sg(ubi, vol, sgl, lnum, offset, len, check);
+ if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) {
+ ubi_warn(ubi, "mark volume %d as corrupted", vol_id);
+ vol->corrupted = 1;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ubi_leb_read_sg);
+
+/**
+ * ubi_leb_write - write data.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number to write to
+ * @buf: data to write
+ * @offset: offset within the logical eraseblock where to write
+ * @len: how many bytes to write
+ *
+ * This function writes @len bytes of data from @buf to offset @offset of
+ * logical eraseblock @lnum.
+ *
+ * This function takes care of physical eraseblock write failures. If write to
+ * the physical eraseblock write operation fails, the logical eraseblock is
+ * re-mapped to another physical eraseblock, the data is recovered, and the
+ * write finishes. UBI has a pool of reserved physical eraseblocks for this.
+ *
+ * If all the data were successfully written, zero is returned. If an error
+ * occurred and UBI has not been able to recover from it, this function returns
+ * a negative error code. Note, in case of an error, it is possible that
+ * something was still written to the flash media, but that may be some
+ * garbage.
+ *
+ * If the volume is damaged because of an interrupted update this function just
+ * returns immediately with %-EBADF code.
+ */
+int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
+ int offset, int len)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int vol_id = vol->vol_id;
+
+ dbg_gen("write %d bytes to LEB %d:%d:%d", len, vol_id, lnum, offset);
+
+ if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
+ return -EINVAL;
+
+ if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
+ return -EROFS;
+
+ if (!ubi_leb_valid(vol, lnum) || offset < 0 || len < 0 ||
+ offset + len > vol->usable_leb_size ||
+ offset & (ubi->min_io_size - 1) || len & (ubi->min_io_size - 1))
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ if (len == 0)
+ return 0;
+
+ return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len);
+}
+EXPORT_SYMBOL_GPL(ubi_leb_write);
+
+/*
+ * ubi_leb_change - change logical eraseblock atomically.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number to change
+ * @buf: data to write
+ * @len: how many bytes to write
+ *
+ * This function changes the contents of a logical eraseblock atomically. @buf
+ * has to contain new logical eraseblock data, and @len - the length of the
+ * data, which has to be aligned. The length may be shorter than the logical
+ * eraseblock size, ant the logical eraseblock may be appended to more times
+ * later on. This function guarantees that in case of an unclean reboot the old
+ * contents is preserved. Returns zero in case of success and a negative error
+ * code in case of failure.
+ */
+int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
+ int len)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int vol_id = vol->vol_id;
+
+ dbg_gen("atomically write %d bytes to LEB %d:%d", len, vol_id, lnum);
+
+ if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
+ return -EINVAL;
+
+ if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
+ return -EROFS;
+
+ if (!ubi_leb_valid(vol, lnum) || len < 0 ||
+ len > vol->usable_leb_size || len & (ubi->min_io_size - 1))
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ if (len == 0)
+ return 0;
+
+ return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len);
+}
+EXPORT_SYMBOL_GPL(ubi_leb_change);
+
+/**
+ * ubi_leb_erase - erase logical eraseblock.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number
+ *
+ * This function un-maps logical eraseblock @lnum and synchronously erases the
+ * correspondent physical eraseblock. Returns zero in case of success and a
+ * negative error code in case of failure.
+ *
+ * If the volume is damaged because of an interrupted update this function just
+ * returns immediately with %-EBADF code.
+ */
+int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int err;
+
+ dbg_gen("erase LEB %d:%d", vol->vol_id, lnum);
+
+ if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
+ return -EROFS;
+
+ if (!ubi_leb_valid(vol, lnum))
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ err = ubi_eba_unmap_leb(ubi, vol, lnum);
+ if (err)
+ return err;
+
+ return ubi_wl_flush(ubi, vol->vol_id, lnum);
+}
+EXPORT_SYMBOL_GPL(ubi_leb_erase);
+
+/**
+ * ubi_leb_unmap - un-map logical eraseblock.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number
+ *
+ * This function un-maps logical eraseblock @lnum and schedules the
+ * corresponding physical eraseblock for erasure, so that it will eventually be
+ * physically erased in background. This operation is much faster than the
+ * erase operation.
+ *
+ * Unlike erase, the un-map operation does not guarantee that the logical
+ * eraseblock will contain all 0xFF bytes when UBI is initialized again. For
+ * example, if several logical eraseblocks are un-mapped, and an unclean reboot
+ * happens after this, the logical eraseblocks will not necessarily be
+ * un-mapped again when this MTD device is attached. They may actually be
+ * mapped to the same physical eraseblocks again. So, this function has to be
+ * used with care.
+ *
+ * In other words, when un-mapping a logical eraseblock, UBI does not store
+ * any information about this on the flash media, it just marks the logical
+ * eraseblock as "un-mapped" in RAM. If UBI is detached before the physical
+ * eraseblock is physically erased, it will be mapped again to the same logical
+ * eraseblock when the MTD device is attached again.
+ *
+ * The main and obvious use-case of this function is when the contents of a
+ * logical eraseblock has to be re-written. Then it is much more efficient to
+ * first un-map it, then write new data, rather than first erase it, then write
+ * new data. Note, once new data has been written to the logical eraseblock,
+ * UBI guarantees that the old contents has gone forever. In other words, if an
+ * unclean reboot happens after the logical eraseblock has been un-mapped and
+ * then written to, it will contain the last written data.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure. If the volume is damaged because of an interrupted update
+ * this function just returns immediately with %-EBADF code.
+ */
+int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+
+ dbg_gen("unmap LEB %d:%d", vol->vol_id, lnum);
+
+ if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
+ return -EROFS;
+
+ if (!ubi_leb_valid(vol, lnum))
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ return ubi_eba_unmap_leb(ubi, vol, lnum);
+}
+EXPORT_SYMBOL_GPL(ubi_leb_unmap);
+
+/**
+ * ubi_leb_map - map logical eraseblock to a physical eraseblock.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number
+ *
+ * This function maps an un-mapped logical eraseblock @lnum to a physical
+ * eraseblock. This means, that after a successful invocation of this
+ * function the logical eraseblock @lnum will be empty (contain only %0xFF
+ * bytes) and be mapped to a physical eraseblock, even if an unclean reboot
+ * happens.
+ *
+ * This function returns zero in case of success, %-EBADF if the volume is
+ * damaged because of an interrupted update, %-EBADMSG if the logical
+ * eraseblock is already mapped, and other negative error codes in case of
+ * other failures.
+ */
+int ubi_leb_map(struct ubi_volume_desc *desc, int lnum)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+
+ dbg_gen("map LEB %d:%d", vol->vol_id, lnum);
+
+ if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
+ return -EROFS;
+
+ if (!ubi_leb_valid(vol, lnum))
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ if (ubi_eba_is_mapped(vol, lnum))
+ return -EBADMSG;
+
+ return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
+}
+EXPORT_SYMBOL_GPL(ubi_leb_map);
+
+/**
+ * ubi_is_mapped - check if logical eraseblock is mapped.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number
+ *
+ * This function checks if logical eraseblock @lnum is mapped to a physical
+ * eraseblock. If a logical eraseblock is un-mapped, this does not necessarily
+ * mean it will still be un-mapped after the UBI device is re-attached. The
+ * logical eraseblock may become mapped to the physical eraseblock it was last
+ * mapped to.
+ *
+ * This function returns %1 if the LEB is mapped, %0 if not, and a negative
+ * error code in case of failure. If the volume is damaged because of an
+ * interrupted update this function just returns immediately with %-EBADF error
+ * code.
+ */
+int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum)
+{
+ struct ubi_volume *vol = desc->vol;
+
+ dbg_gen("test LEB %d:%d", vol->vol_id, lnum);
+
+ if (!ubi_leb_valid(vol, lnum))
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ return ubi_eba_is_mapped(vol, lnum);
+}
+EXPORT_SYMBOL_GPL(ubi_is_mapped);
+
+/**
+ * ubi_sync - synchronize UBI device buffers.
+ * @ubi_num: UBI device to synchronize
+ *
+ * The underlying MTD device may cache data in hardware or in software. This
+ * function ensures the caches are flushed. Returns zero in case of success and
+ * a negative error code in case of failure.
+ */
+int ubi_sync(int ubi_num)
+{
+ struct ubi_device *ubi;
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -ENODEV;
+
+ mtd_sync(ubi->mtd);
+ ubi_put_device(ubi);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ubi_sync);
+
+/**
+ * ubi_flush - flush UBI work queue.
+ * @ubi_num: UBI device to flush work queue
+ * @vol_id: volume id to flush for
+ * @lnum: logical eraseblock number to flush for
+ *
+ * This function executes all pending works for a particular volume id / logical
+ * eraseblock number pair. If either value is set to %UBI_ALL, then it acts as
+ * a wildcard for all of the corresponding volume numbers or logical
+ * eraseblock numbers. It returns zero in case of success and a negative error
+ * code in case of failure.
+ */
+int ubi_flush(int ubi_num, int vol_id, int lnum)
+{
+ struct ubi_device *ubi;
+ int err = 0;
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -ENODEV;
+
+ err = ubi_wl_flush(ubi, vol_id, lnum);
+ ubi_put_device(ubi);
+ return err;
+}
+EXPORT_SYMBOL_GPL(ubi_flush);
+
+BLOCKING_NOTIFIER_HEAD(ubi_notifiers);
+
+/**
+ * ubi_register_volume_notifier - register a volume notifier.
+ * @nb: the notifier description object
+ * @ignore_existing: if non-zero, do not send "added" notification for all
+ * already existing volumes
+ *
+ * This function registers a volume notifier, which means that
+ * 'nb->notifier_call()' will be invoked when an UBI volume is created,
+ * removed, re-sized, re-named, or updated. The first argument of the function
+ * is the notification type. The second argument is pointer to a
+ * &struct ubi_notification object which describes the notification event.
+ * Using UBI API from the volume notifier is prohibited.
+ *
+ * This function returns zero in case of success and a negative error code
+ * in case of failure.
+ */
+int ubi_register_volume_notifier(struct notifier_block *nb,
+ int ignore_existing)
+{
+ int err;
+
+ err = blocking_notifier_chain_register(&ubi_notifiers, nb);
+ if (err != 0)
+ return err;
+ if (ignore_existing)
+ return 0;
+
+ /*
+ * We are going to walk all UBI devices and all volumes, and
+ * notify the user about existing volumes by the %UBI_VOLUME_ADDED
+ * event. We have to lock the @ubi_devices_mutex to make sure UBI
+ * devices do not disappear.
+ */
+ mutex_lock(&ubi_devices_mutex);
+ ubi_enumerate_volumes(nb);
+ mutex_unlock(&ubi_devices_mutex);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ubi_register_volume_notifier);
+
+/**
+ * ubi_unregister_volume_notifier - unregister the volume notifier.
+ * @nb: the notifier description object
+ *
+ * This function unregisters volume notifier @nm and returns zero in case of
+ * success and a negative error code in case of failure.
+ */
+int ubi_unregister_volume_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&ubi_notifiers, nb);
+}
+EXPORT_SYMBOL_GPL(ubi_unregister_volume_notifier);
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
new file mode 100644
index 0000000000..1794d66b6e
--- /dev/null
+++ b/drivers/mtd/ubi/misc.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/* Here we keep miscellaneous functions which are used all over the UBI code */
+
+#include "ubi.h"
+
+/**
+ * ubi_calc_data_len - calculate how much real data is stored in a buffer.
+ * @ubi: UBI device description object
+ * @buf: a buffer with the contents of the physical eraseblock
+ * @length: the buffer length
+ *
+ * This function calculates how much "real data" is stored in @buf and returnes
+ * the length. Continuous 0xFF bytes at the end of the buffer are not
+ * considered as "real data".
+ */
+int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf,
+ int length)
+{
+ int i;
+
+ ubi_assert(!(length & (ubi->min_io_size - 1)));
+
+ for (i = length - 1; i >= 0; i--)
+ if (((const uint8_t *)buf)[i] != 0xFF)
+ break;
+
+ /* The resulting length must be aligned to the minimum flash I/O size */
+ length = ALIGN(i + 1, ubi->min_io_size);
+ return length;
+}
+
+/**
+ * ubi_check_volume - check the contents of a static volume.
+ * @ubi: UBI device description object
+ * @vol_id: ID of the volume to check
+ *
+ * This function checks if static volume @vol_id is corrupted by fully reading
+ * it and checking data CRC. This function returns %0 if the volume is not
+ * corrupted, %1 if it is corrupted and a negative error code in case of
+ * failure. Dynamic volumes are not checked and zero is returned immediately.
+ */
+int ubi_check_volume(struct ubi_device *ubi, int vol_id)
+{
+ void *buf;
+ int err = 0, i;
+ struct ubi_volume *vol = ubi->volumes[vol_id];
+
+ if (vol->vol_type != UBI_STATIC_VOLUME)
+ return 0;
+
+ buf = vmalloc(vol->usable_leb_size);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < vol->used_ebs; i++) {
+ int size;
+
+ cond_resched();
+
+ if (i == vol->used_ebs - 1)
+ size = vol->last_eb_bytes;
+ else
+ size = vol->usable_leb_size;
+
+ err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1);
+ if (err) {
+ if (mtd_is_eccerr(err))
+ err = 1;
+ break;
+ }
+ }
+
+ vfree(buf);
+ return err;
+}
+
+/**
+ * ubi_update_reserved - update bad eraseblock handling accounting data.
+ * @ubi: UBI device description object
+ *
+ * This function calculates the gap between current number of PEBs reserved for
+ * bad eraseblock handling and the required level of PEBs that must be
+ * reserved, and if necessary, reserves more PEBs to fill that gap, according
+ * to availability. Should be called with ubi->volumes_lock held.
+ */
+void ubi_update_reserved(struct ubi_device *ubi)
+{
+ int need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
+
+ if (need <= 0 || ubi->avail_pebs == 0)
+ return;
+
+ need = min_t(int, need, ubi->avail_pebs);
+ ubi->avail_pebs -= need;
+ ubi->rsvd_pebs += need;
+ ubi->beb_rsvd_pebs += need;
+ ubi_msg(ubi, "reserved more %d PEBs for bad PEB handling", need);
+}
+
+/**
+ * ubi_calculate_reserved - calculate how many PEBs must be reserved for bad
+ * eraseblock handling.
+ * @ubi: UBI device description object
+ */
+void ubi_calculate_reserved(struct ubi_device *ubi)
+{
+ /*
+ * Calculate the actual number of PEBs currently needed to be reserved
+ * for future bad eraseblock handling.
+ */
+ ubi->beb_rsvd_level = ubi->bad_peb_limit - ubi->bad_peb_count;
+ if (ubi->beb_rsvd_level < 0) {
+ ubi->beb_rsvd_level = 0;
+ ubi_warn(ubi, "number of bad PEBs (%d) is above the expected limit (%d), not reserving any PEBs for bad PEB handling, will use available PEBs (if any)",
+ ubi->bad_peb_count, ubi->bad_peb_limit);
+ }
+}
+
+/**
+ * ubi_check_pattern - check if buffer contains only a certain byte pattern.
+ * @buf: buffer to check
+ * @patt: the pattern to check
+ * @size: buffer size in bytes
+ *
+ * This function returns %1 in there are only @patt bytes in @buf, and %0 if
+ * something else was also found.
+ */
+int ubi_check_pattern(const void *buf, uint8_t patt, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ if (((const uint8_t *)buf)[i] != patt)
+ return 0;
+ return 1;
+}
+
+/* Normal UBI messages */
+void ubi_msg(const struct ubi_device *ubi, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ pr_notice(UBI_NAME_STR "%d: %pV\n", ubi->ubi_num, &vaf);
+
+ va_end(args);
+}
+
+/* UBI warning messages */
+void ubi_warn(const struct ubi_device *ubi, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ pr_warn(UBI_NAME_STR "%d warning: %ps: %pV\n",
+ ubi->ubi_num, __builtin_return_address(0), &vaf);
+
+ va_end(args);
+}
+
+/* UBI error messages */
+void ubi_err(const struct ubi_device *ubi, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ pr_err(UBI_NAME_STR "%d error: %ps: %pV\n",
+ ubi->ubi_num, __builtin_return_address(0), &vaf);
+ va_end(args);
+}
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
new file mode 100644
index 0000000000..2c9cd3b643
--- /dev/null
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -0,0 +1,503 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (C) International Business Machines Corp., 2006
+ * Authors: Artem Bityutskiy (Битюцкий Артём)
+ * Thomas Gleixner
+ * Frank Haverkamp
+ * Oliver Lohmann
+ * Andreas Arnez
+ *
+ * This file defines the layout of UBI headers and all the other UBI on-flash
+ * data structures.
+ */
+
+#ifndef __UBI_MEDIA_H__
+#define __UBI_MEDIA_H__
+
+#include <asm/byteorder.h>
+
+/* The version of UBI images supported by this implementation */
+#define UBI_VERSION 1
+
+/* The highest erase counter value supported by this implementation */
+#define UBI_MAX_ERASECOUNTER 0x7FFFFFFF
+
+/* The initial CRC32 value used when calculating CRC checksums */
+#define UBI_CRC32_INIT 0xFFFFFFFFU
+
+/* Erase counter header magic number (ASCII "UBI#") */
+#define UBI_EC_HDR_MAGIC 0x55424923
+/* Volume identifier header magic number (ASCII "UBI!") */
+#define UBI_VID_HDR_MAGIC 0x55424921
+
+/*
+ * Volume type constants used in the volume identifier header.
+ *
+ * @UBI_VID_DYNAMIC: dynamic volume
+ * @UBI_VID_STATIC: static volume
+ */
+enum {
+ UBI_VID_DYNAMIC = 1,
+ UBI_VID_STATIC = 2
+};
+
+/*
+ * Volume flags used in the volume table record.
+ *
+ * @UBI_VTBL_AUTORESIZE_FLG: auto-resize this volume
+ * @UBI_VTBL_SKIP_CRC_CHECK_FLG: skip the CRC check done on a static volume at
+ * open time. Should only be set on volumes that
+ * are used by upper layers doing this kind of
+ * check. Main use-case for this flag is
+ * boot-time reduction
+ *
+ * %UBI_VTBL_AUTORESIZE_FLG flag can be set only for one volume in the volume
+ * table. UBI automatically re-sizes the volume which has this flag and makes
+ * the volume to be of largest possible size. This means that if after the
+ * initialization UBI finds out that there are available physical eraseblocks
+ * present on the device, it automatically appends all of them to the volume
+ * (the physical eraseblocks reserved for bad eraseblocks handling and other
+ * reserved physical eraseblocks are not taken). So, if there is a volume with
+ * the %UBI_VTBL_AUTORESIZE_FLG flag set, the amount of available logical
+ * eraseblocks will be zero after UBI is loaded, because all of them will be
+ * reserved for this volume. Note, the %UBI_VTBL_AUTORESIZE_FLG bit is cleared
+ * after the volume had been initialized.
+ *
+ * The auto-resize feature is useful for device production purposes. For
+ * example, different NAND flash chips may have different amount of initial bad
+ * eraseblocks, depending of particular chip instance. Manufacturers of NAND
+ * chips usually guarantee that the amount of initial bad eraseblocks does not
+ * exceed certain percent, e.g. 2%. When one creates an UBI image which will be
+ * flashed to the end devices in production, he does not know the exact amount
+ * of good physical eraseblocks the NAND chip on the device will have, but this
+ * number is required to calculate the volume sized and put them to the volume
+ * table of the UBI image. In this case, one of the volumes (e.g., the one
+ * which will store the root file system) is marked as "auto-resizable", and
+ * UBI will adjust its size on the first boot if needed.
+ *
+ * Note, first UBI reserves some amount of physical eraseblocks for bad
+ * eraseblock handling, and then re-sizes the volume, not vice-versa. This
+ * means that the pool of reserved physical eraseblocks will always be present.
+ */
+enum {
+ UBI_VTBL_AUTORESIZE_FLG = 0x01,
+ UBI_VTBL_SKIP_CRC_CHECK_FLG = 0x02,
+};
+
+/*
+ * Compatibility constants used by internal volumes.
+ *
+ * @UBI_COMPAT_DELETE: delete this internal volume before anything is written
+ * to the flash
+ * @UBI_COMPAT_RO: attach this device in read-only mode
+ * @UBI_COMPAT_PRESERVE: preserve this internal volume - do not touch its
+ * physical eraseblocks, don't allow the wear-leveling
+ * sub-system to move them
+ * @UBI_COMPAT_REJECT: reject this UBI image
+ */
+enum {
+ UBI_COMPAT_DELETE = 1,
+ UBI_COMPAT_RO = 2,
+ UBI_COMPAT_PRESERVE = 4,
+ UBI_COMPAT_REJECT = 5
+};
+
+/* Sizes of UBI headers */
+#define UBI_EC_HDR_SIZE sizeof(struct ubi_ec_hdr)
+#define UBI_VID_HDR_SIZE sizeof(struct ubi_vid_hdr)
+
+/* Sizes of UBI headers without the ending CRC */
+#define UBI_EC_HDR_SIZE_CRC (UBI_EC_HDR_SIZE - sizeof(__be32))
+#define UBI_VID_HDR_SIZE_CRC (UBI_VID_HDR_SIZE - sizeof(__be32))
+
+/**
+ * struct ubi_ec_hdr - UBI erase counter header.
+ * @magic: erase counter header magic number (%UBI_EC_HDR_MAGIC)
+ * @version: version of UBI implementation which is supposed to accept this
+ * UBI image
+ * @padding1: reserved for future, zeroes
+ * @ec: the erase counter
+ * @vid_hdr_offset: where the VID header starts
+ * @data_offset: where the user data start
+ * @image_seq: image sequence number
+ * @padding2: reserved for future, zeroes
+ * @hdr_crc: erase counter header CRC checksum
+ *
+ * The erase counter header takes 64 bytes and has a plenty of unused space for
+ * future usage. The unused fields are zeroed. The @version field is used to
+ * indicate the version of UBI implementation which is supposed to be able to
+ * work with this UBI image. If @version is greater than the current UBI
+ * version, the image is rejected. This may be useful in future if something
+ * is changed radically. This field is duplicated in the volume identifier
+ * header.
+ *
+ * The @vid_hdr_offset and @data_offset fields contain the offset of the
+ * volume identifier header and user data, relative to the beginning of the
+ * physical eraseblock. These values have to be the same for all physical
+ * eraseblocks.
+ *
+ * The @image_seq field is used to validate a UBI image that has been prepared
+ * for a UBI device. The @image_seq value can be any value, but it must be the
+ * same on all eraseblocks. UBI will ensure that all new erase counter headers
+ * also contain this value, and will check the value when attaching the flash.
+ * One way to make use of @image_seq is to increase its value by one every time
+ * an image is flashed over an existing image, then, if the flashing does not
+ * complete, UBI will detect the error when attaching the media.
+ */
+struct ubi_ec_hdr {
+ __be32 magic;
+ __u8 version;
+ __u8 padding1[3];
+ __be64 ec; /* Warning: the current limit is 31-bit anyway! */
+ __be32 vid_hdr_offset;
+ __be32 data_offset;
+ __be32 image_seq;
+ __u8 padding2[32];
+ __be32 hdr_crc;
+} __packed;
+
+/**
+ * struct ubi_vid_hdr - on-flash UBI volume identifier header.
+ * @magic: volume identifier header magic number (%UBI_VID_HDR_MAGIC)
+ * @version: UBI implementation version which is supposed to accept this UBI
+ * image (%UBI_VERSION)
+ * @vol_type: volume type (%UBI_VID_DYNAMIC or %UBI_VID_STATIC)
+ * @copy_flag: if this logical eraseblock was copied from another physical
+ * eraseblock (for wear-leveling reasons)
+ * @compat: compatibility of this volume (%0, %UBI_COMPAT_DELETE,
+ * %UBI_COMPAT_IGNORE, %UBI_COMPAT_PRESERVE, or %UBI_COMPAT_REJECT)
+ * @vol_id: ID of this volume
+ * @lnum: logical eraseblock number
+ * @padding1: reserved for future, zeroes
+ * @data_size: how many bytes of data this logical eraseblock contains
+ * @used_ebs: total number of used logical eraseblocks in this volume
+ * @data_pad: how many bytes at the end of this physical eraseblock are not
+ * used
+ * @data_crc: CRC checksum of the data stored in this logical eraseblock
+ * @padding2: reserved for future, zeroes
+ * @sqnum: sequence number
+ * @padding3: reserved for future, zeroes
+ * @hdr_crc: volume identifier header CRC checksum
+ *
+ * The @sqnum is the value of the global sequence counter at the time when this
+ * VID header was created. The global sequence counter is incremented each time
+ * UBI writes a new VID header to the flash, i.e. when it maps a logical
+ * eraseblock to a new physical eraseblock. The global sequence counter is an
+ * unsigned 64-bit integer and we assume it never overflows. The @sqnum
+ * (sequence number) is used to distinguish between older and newer versions of
+ * logical eraseblocks.
+ *
+ * There are 2 situations when there may be more than one physical eraseblock
+ * corresponding to the same logical eraseblock, i.e., having the same @vol_id
+ * and @lnum values in the volume identifier header. Suppose we have a logical
+ * eraseblock L and it is mapped to the physical eraseblock P.
+ *
+ * 1. Because UBI may erase physical eraseblocks asynchronously, the following
+ * situation is possible: L is asynchronously erased, so P is scheduled for
+ * erasure, then L is written to,i.e. mapped to another physical eraseblock P1,
+ * so P1 is written to, then an unclean reboot happens. Result - there are 2
+ * physical eraseblocks P and P1 corresponding to the same logical eraseblock
+ * L. But P1 has greater sequence number, so UBI picks P1 when it attaches the
+ * flash.
+ *
+ * 2. From time to time UBI moves logical eraseblocks to other physical
+ * eraseblocks for wear-leveling reasons. If, for example, UBI moves L from P
+ * to P1, and an unclean reboot happens before P is physically erased, there
+ * are two physical eraseblocks P and P1 corresponding to L and UBI has to
+ * select one of them when the flash is attached. The @sqnum field says which
+ * PEB is the original (obviously P will have lower @sqnum) and the copy. But
+ * it is not enough to select the physical eraseblock with the higher sequence
+ * number, because the unclean reboot could have happen in the middle of the
+ * copying process, so the data in P is corrupted. It is also not enough to
+ * just select the physical eraseblock with lower sequence number, because the
+ * data there may be old (consider a case if more data was added to P1 after
+ * the copying). Moreover, the unclean reboot may happen when the erasure of P
+ * was just started, so it result in unstable P, which is "mostly" OK, but
+ * still has unstable bits.
+ *
+ * UBI uses the @copy_flag field to indicate that this logical eraseblock is a
+ * copy. UBI also calculates data CRC when the data is moved and stores it at
+ * the @data_crc field of the copy (P1). So when UBI needs to pick one physical
+ * eraseblock of two (P or P1), the @copy_flag of the newer one (P1) is
+ * examined. If it is cleared, the situation is simple and the newer one is
+ * picked. If it is set, the data CRC of the copy (P1) is examined. If the CRC
+ * checksum is correct, this physical eraseblock is selected (P1). Otherwise
+ * the older one (P) is selected.
+ *
+ * There are 2 sorts of volumes in UBI: user volumes and internal volumes.
+ * Internal volumes are not seen from outside and are used for various internal
+ * UBI purposes. In this implementation there is only one internal volume - the
+ * layout volume. Internal volumes are the main mechanism of UBI extensions.
+ * For example, in future one may introduce a journal internal volume. Internal
+ * volumes have their own reserved range of IDs.
+ *
+ * The @compat field is only used for internal volumes and contains the "degree
+ * of their compatibility". It is always zero for user volumes. This field
+ * provides a mechanism to introduce UBI extensions and to be still compatible
+ * with older UBI binaries. For example, if someone introduced a journal in
+ * future, he would probably use %UBI_COMPAT_DELETE compatibility for the
+ * journal volume. And in this case, older UBI binaries, which know nothing
+ * about the journal volume, would just delete this volume and work perfectly
+ * fine. This is similar to what Ext2fs does when it is fed by an Ext3fs image
+ * - it just ignores the Ext3fs journal.
+ *
+ * The @data_crc field contains the CRC checksum of the contents of the logical
+ * eraseblock if this is a static volume. In case of dynamic volumes, it does
+ * not contain the CRC checksum as a rule. The only exception is when the
+ * data of the physical eraseblock was moved by the wear-leveling sub-system,
+ * then the wear-leveling sub-system calculates the data CRC and stores it in
+ * the @data_crc field. And of course, the @copy_flag is %in this case.
+ *
+ * The @data_size field is used only for static volumes because UBI has to know
+ * how many bytes of data are stored in this eraseblock. For dynamic volumes,
+ * this field usually contains zero. The only exception is when the data of the
+ * physical eraseblock was moved to another physical eraseblock for
+ * wear-leveling reasons. In this case, UBI calculates CRC checksum of the
+ * contents and uses both @data_crc and @data_size fields. In this case, the
+ * @data_size field contains data size.
+ *
+ * The @used_ebs field is used only for static volumes and indicates how many
+ * eraseblocks the data of the volume takes. For dynamic volumes this field is
+ * not used and always contains zero.
+ *
+ * The @data_pad is calculated when volumes are created using the alignment
+ * parameter. So, effectively, the @data_pad field reduces the size of logical
+ * eraseblocks of this volume. This is very handy when one uses block-oriented
+ * software (say, cramfs) on top of the UBI volume.
+ */
+struct ubi_vid_hdr {
+ __be32 magic;
+ __u8 version;
+ __u8 vol_type;
+ __u8 copy_flag;
+ __u8 compat;
+ __be32 vol_id;
+ __be32 lnum;
+ __u8 padding1[4];
+ __be32 data_size;
+ __be32 used_ebs;
+ __be32 data_pad;
+ __be32 data_crc;
+ __u8 padding2[4];
+ __be64 sqnum;
+ __u8 padding3[12];
+ __be32 hdr_crc;
+} __packed;
+
+/* Internal UBI volumes count */
+#define UBI_INT_VOL_COUNT 1
+
+/*
+ * Starting ID of internal volumes: 0x7fffefff.
+ * There is reserved room for 4096 internal volumes.
+ */
+#define UBI_INTERNAL_VOL_START (0x7FFFFFFF - 4096)
+
+/* The layout volume contains the volume table */
+
+#define UBI_LAYOUT_VOLUME_ID UBI_INTERNAL_VOL_START
+#define UBI_LAYOUT_VOLUME_TYPE UBI_VID_DYNAMIC
+#define UBI_LAYOUT_VOLUME_ALIGN 1
+#define UBI_LAYOUT_VOLUME_EBS 2
+#define UBI_LAYOUT_VOLUME_NAME "layout volume"
+#define UBI_LAYOUT_VOLUME_COMPAT UBI_COMPAT_REJECT
+
+/* The maximum number of volumes per one UBI device */
+#define UBI_MAX_VOLUMES 128
+
+/* The maximum volume name length */
+#define UBI_VOL_NAME_MAX 127
+
+/* Size of the volume table record */
+#define UBI_VTBL_RECORD_SIZE sizeof(struct ubi_vtbl_record)
+
+/* Size of the volume table record without the ending CRC */
+#define UBI_VTBL_RECORD_SIZE_CRC (UBI_VTBL_RECORD_SIZE - sizeof(__be32))
+
+/**
+ * struct ubi_vtbl_record - a record in the volume table.
+ * @reserved_pebs: how many physical eraseblocks are reserved for this volume
+ * @alignment: volume alignment
+ * @data_pad: how many bytes are unused at the end of the each physical
+ * eraseblock to satisfy the requested alignment
+ * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
+ * @upd_marker: if volume update was started but not finished
+ * @name_len: volume name length
+ * @name: the volume name
+ * @flags: volume flags (%UBI_VTBL_AUTORESIZE_FLG)
+ * @padding: reserved, zeroes
+ * @crc: a CRC32 checksum of the record
+ *
+ * The volume table records are stored in the volume table, which is stored in
+ * the layout volume. The layout volume consists of 2 logical eraseblock, each
+ * of which contains a copy of the volume table (i.e., the volume table is
+ * duplicated). The volume table is an array of &struct ubi_vtbl_record
+ * objects indexed by the volume ID.
+ *
+ * If the size of the logical eraseblock is large enough to fit
+ * %UBI_MAX_VOLUMES records, the volume table contains %UBI_MAX_VOLUMES
+ * records. Otherwise, it contains as many records as it can fit (i.e., size of
+ * logical eraseblock divided by sizeof(struct ubi_vtbl_record)).
+ *
+ * The @upd_marker flag is used to implement volume update. It is set to %1
+ * before update and set to %0 after the update. So if the update operation was
+ * interrupted, UBI knows that the volume is corrupted.
+ *
+ * The @alignment field is specified when the volume is created and cannot be
+ * later changed. It may be useful, for example, when a block-oriented file
+ * system works on top of UBI. The @data_pad field is calculated using the
+ * logical eraseblock size and @alignment. The alignment must be multiple to the
+ * minimal flash I/O unit. If @alignment is 1, all the available space of
+ * the physical eraseblocks is used.
+ *
+ * Empty records contain all zeroes and the CRC checksum of those zeroes.
+ */
+struct ubi_vtbl_record {
+ __be32 reserved_pebs;
+ __be32 alignment;
+ __be32 data_pad;
+ __u8 vol_type;
+ __u8 upd_marker;
+ __be16 name_len;
+ __u8 name[UBI_VOL_NAME_MAX+1];
+ __u8 flags;
+ __u8 padding[23];
+ __be32 crc;
+} __packed;
+
+/* UBI fastmap on-flash data structures */
+
+#define UBI_FM_SB_VOLUME_ID (UBI_LAYOUT_VOLUME_ID + 1)
+#define UBI_FM_DATA_VOLUME_ID (UBI_LAYOUT_VOLUME_ID + 2)
+
+/* fastmap on-flash data structure format version */
+#define UBI_FM_FMT_VERSION 1
+
+#define UBI_FM_SB_MAGIC 0x7B11D69F
+#define UBI_FM_HDR_MAGIC 0xD4B82EF7
+#define UBI_FM_VHDR_MAGIC 0xFA370ED1
+#define UBI_FM_POOL_MAGIC 0x67AF4D08
+#define UBI_FM_EBA_MAGIC 0xf0c040a8
+
+/* A fastmap super block can be located between PEB 0 and
+ * UBI_FM_MAX_START */
+#define UBI_FM_MAX_START 64
+
+/* A fastmap can use up to UBI_FM_MAX_BLOCKS PEBs */
+#define UBI_FM_MAX_BLOCKS 32
+
+/* 5% of the total number of PEBs have to be scanned while attaching
+ * from a fastmap.
+ * But the size of this pool is limited to be between UBI_FM_MIN_POOL_SIZE and
+ * UBI_FM_MAX_POOL_SIZE */
+#define UBI_FM_MIN_POOL_SIZE 8
+#define UBI_FM_MAX_POOL_SIZE 256
+
+/**
+ * struct ubi_fm_sb - UBI fastmap super block
+ * @magic: fastmap super block magic number (%UBI_FM_SB_MAGIC)
+ * @version: format version of this fastmap
+ * @data_crc: CRC over the fastmap data
+ * @used_blocks: number of PEBs used by this fastmap
+ * @block_loc: an array containing the location of all PEBs of the fastmap
+ * @block_ec: the erase counter of each used PEB
+ * @sqnum: highest sequence number value at the time while taking the fastmap
+ *
+ */
+struct ubi_fm_sb {
+ __be32 magic;
+ __u8 version;
+ __u8 padding1[3];
+ __be32 data_crc;
+ __be32 used_blocks;
+ __be32 block_loc[UBI_FM_MAX_BLOCKS];
+ __be32 block_ec[UBI_FM_MAX_BLOCKS];
+ __be64 sqnum;
+ __u8 padding2[32];
+} __packed;
+
+/**
+ * struct ubi_fm_hdr - header of the fastmap data set
+ * @magic: fastmap header magic number (%UBI_FM_HDR_MAGIC)
+ * @free_peb_count: number of free PEBs known by this fastmap
+ * @used_peb_count: number of used PEBs known by this fastmap
+ * @scrub_peb_count: number of to be scrubbed PEBs known by this fastmap
+ * @bad_peb_count: number of bad PEBs known by this fastmap
+ * @erase_peb_count: number of bad PEBs which have to be erased
+ * @vol_count: number of UBI volumes known by this fastmap
+ */
+struct ubi_fm_hdr {
+ __be32 magic;
+ __be32 free_peb_count;
+ __be32 used_peb_count;
+ __be32 scrub_peb_count;
+ __be32 bad_peb_count;
+ __be32 erase_peb_count;
+ __be32 vol_count;
+ __u8 padding[4];
+} __packed;
+
+/* struct ubi_fm_hdr is followed by two struct ubi_fm_scan_pool */
+
+/**
+ * struct ubi_fm_scan_pool - Fastmap pool PEBs to be scanned while attaching
+ * @magic: pool magic numer (%UBI_FM_POOL_MAGIC)
+ * @size: current pool size
+ * @max_size: maximal pool size
+ * @pebs: an array containing the location of all PEBs in this pool
+ */
+struct ubi_fm_scan_pool {
+ __be32 magic;
+ __be16 size;
+ __be16 max_size;
+ __be32 pebs[UBI_FM_MAX_POOL_SIZE];
+ __be32 padding[4];
+} __packed;
+
+/* ubi_fm_scan_pool is followed by nfree+nused struct ubi_fm_ec records */
+
+/**
+ * struct ubi_fm_ec - stores the erase counter of a PEB
+ * @pnum: PEB number
+ * @ec: ec of this PEB
+ */
+struct ubi_fm_ec {
+ __be32 pnum;
+ __be32 ec;
+} __packed;
+
+/**
+ * struct ubi_fm_volhdr - Fastmap volume header
+ * it identifies the start of an eba table
+ * @magic: Fastmap volume header magic number (%UBI_FM_VHDR_MAGIC)
+ * @vol_id: volume id of the fastmapped volume
+ * @vol_type: type of the fastmapped volume
+ * @data_pad: data_pad value of the fastmapped volume
+ * @used_ebs: number of used LEBs within this volume
+ * @last_eb_bytes: number of bytes used in the last LEB
+ */
+struct ubi_fm_volhdr {
+ __be32 magic;
+ __be32 vol_id;
+ __u8 vol_type;
+ __u8 padding1[3];
+ __be32 data_pad;
+ __be32 used_ebs;
+ __be32 last_eb_bytes;
+ __u8 padding2[8];
+} __packed;
+
+/* struct ubi_fm_volhdr is followed by one struct ubi_fm_eba records */
+
+/**
+ * struct ubi_fm_eba - denotes an association between a PEB and LEB
+ * @magic: EBA table magic number
+ * @reserved_pebs: number of table entries
+ * @pnum: PEB number of LEB (LEB is the index)
+ */
+struct ubi_fm_eba {
+ __be32 magic;
+ __be32 reserved_pebs;
+ __be32 pnum[];
+} __packed;
+#endif /* !__UBI_MEDIA_H__ */
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
new file mode 100644
index 0000000000..c8f1bd4fa1
--- /dev/null
+++ b/drivers/mtd/ubi/ubi.h
@@ -0,0 +1,1228 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ * Copyright (c) Nokia Corporation, 2006, 2007
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+#ifndef __UBI_UBI_H__
+#define __UBI_UBI_H__
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+#include <linux/notifier.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/ubi.h>
+#include <linux/pgtable.h>
+
+#include "ubi-media.h"
+
+/* Maximum number of supported UBI devices */
+#define UBI_MAX_DEVICES 32
+
+/* UBI name used for character devices, sysfs, etc */
+#define UBI_NAME_STR "ubi"
+
+struct ubi_device;
+
+/* Normal UBI messages */
+__printf(2, 3)
+void ubi_msg(const struct ubi_device *ubi, const char *fmt, ...);
+
+/* UBI warning messages */
+__printf(2, 3)
+void ubi_warn(const struct ubi_device *ubi, const char *fmt, ...);
+
+/* UBI error messages */
+__printf(2, 3)
+void ubi_err(const struct ubi_device *ubi, const char *fmt, ...);
+
+/* Background thread name pattern */
+#define UBI_BGT_NAME_PATTERN "ubi_bgt%dd"
+
+/*
+ * This marker in the EBA table means that the LEB is um-mapped.
+ * NOTE! It has to have the same value as %UBI_ALL.
+ */
+#define UBI_LEB_UNMAPPED -1
+
+/*
+ * In case of errors, UBI tries to repeat the operation several times before
+ * returning error. The below constant defines how many times UBI re-tries.
+ */
+#define UBI_IO_RETRIES 3
+
+/*
+ * Length of the protection queue. The length is effectively equivalent to the
+ * number of (global) erase cycles PEBs are protected from the wear-leveling
+ * worker.
+ */
+#define UBI_PROT_QUEUE_LEN 10
+
+/* The volume ID/LEB number/erase counter is unknown */
+#define UBI_UNKNOWN -1
+
+/*
+ * The UBI debugfs directory name pattern and maximum name length (3 for "ubi"
+ * + 2 for the number plus 1 for the trailing zero byte.
+ */
+#define UBI_DFS_DIR_NAME "ubi%d"
+#define UBI_DFS_DIR_LEN (3 + 2 + 1)
+
+/*
+ * Error codes returned by the I/O sub-system.
+ *
+ * UBI_IO_FF: the read region of flash contains only 0xFFs
+ * UBI_IO_FF_BITFLIPS: the same as %UBI_IO_FF, but also there was a data
+ * integrity error reported by the MTD driver
+ * (uncorrectable ECC error in case of NAND)
+ * UBI_IO_BAD_HDR: the EC or VID header is corrupted (bad magic or CRC)
+ * UBI_IO_BAD_HDR_EBADMSG: the same as %UBI_IO_BAD_HDR, but also there was a
+ * data integrity error reported by the MTD driver
+ * (uncorrectable ECC error in case of NAND)
+ * UBI_IO_BITFLIPS: bit-flips were detected and corrected
+ *
+ * Note, it is probably better to have bit-flip and ebadmsg as flags which can
+ * be or'ed with other error code. But this is a big change because there are
+ * may callers, so it does not worth the risk of introducing a bug
+ */
+enum {
+ UBI_IO_FF = 1,
+ UBI_IO_FF_BITFLIPS,
+ UBI_IO_BAD_HDR,
+ UBI_IO_BAD_HDR_EBADMSG,
+ UBI_IO_BITFLIPS,
+};
+
+/*
+ * Return codes of the 'ubi_eba_copy_leb()' function.
+ *
+ * MOVE_CANCEL_RACE: canceled because the volume is being deleted, the source
+ * PEB was put meanwhile, or there is I/O on the source PEB
+ * MOVE_SOURCE_RD_ERR: canceled because there was a read error from the source
+ * PEB
+ * MOVE_TARGET_RD_ERR: canceled because there was a read error from the target
+ * PEB
+ * MOVE_TARGET_WR_ERR: canceled because there was a write error to the target
+ * PEB
+ * MOVE_TARGET_BITFLIPS: canceled because a bit-flip was detected in the
+ * target PEB
+ * MOVE_RETRY: retry scrubbing the PEB
+ */
+enum {
+ MOVE_CANCEL_RACE = 1,
+ MOVE_SOURCE_RD_ERR,
+ MOVE_TARGET_RD_ERR,
+ MOVE_TARGET_WR_ERR,
+ MOVE_TARGET_BITFLIPS,
+ MOVE_RETRY,
+};
+
+/*
+ * Return codes of the fastmap sub-system
+ *
+ * UBI_NO_FASTMAP: No fastmap super block was found
+ * UBI_BAD_FASTMAP: A fastmap was found but it's unusable
+ */
+enum {
+ UBI_NO_FASTMAP = 1,
+ UBI_BAD_FASTMAP,
+};
+
+/*
+ * Flags for emulate_power_cut in ubi_debug_info
+ *
+ * POWER_CUT_EC_WRITE: Emulate a power cut when writing an EC header
+ * POWER_CUT_VID_WRITE: Emulate a power cut when writing a VID header
+ */
+enum {
+ POWER_CUT_EC_WRITE = 0x01,
+ POWER_CUT_VID_WRITE = 0x02,
+};
+
+/**
+ * struct ubi_vid_io_buf - VID buffer used to read/write VID info to/from the
+ * flash.
+ * @hdr: a pointer to the VID header stored in buffer
+ * @buffer: underlying buffer
+ */
+struct ubi_vid_io_buf {
+ struct ubi_vid_hdr *hdr;
+ void *buffer;
+};
+
+/**
+ * struct ubi_wl_entry - wear-leveling entry.
+ * @u.rb: link in the corresponding (free/used) RB-tree
+ * @u.list: link in the protection queue
+ * @ec: erase counter
+ * @pnum: physical eraseblock number
+ *
+ * This data structure is used in the WL sub-system. Each physical eraseblock
+ * has a corresponding &struct wl_entry object which may be kept in different
+ * RB-trees. See WL sub-system for details.
+ */
+struct ubi_wl_entry {
+ union {
+ struct rb_node rb;
+ struct list_head list;
+ } u;
+ int ec;
+ int pnum;
+};
+
+/**
+ * struct ubi_ltree_entry - an entry in the lock tree.
+ * @rb: links RB-tree nodes
+ * @vol_id: volume ID of the locked logical eraseblock
+ * @lnum: locked logical eraseblock number
+ * @users: how many tasks are using this logical eraseblock or wait for it
+ * @mutex: read/write mutex to implement read/write access serialization to
+ * the (@vol_id, @lnum) logical eraseblock
+ *
+ * This data structure is used in the EBA sub-system to implement per-LEB
+ * locking. When a logical eraseblock is being locked - corresponding
+ * &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree).
+ * See EBA sub-system for details.
+ */
+struct ubi_ltree_entry {
+ struct rb_node rb;
+ int vol_id;
+ int lnum;
+ int users;
+ struct rw_semaphore mutex;
+};
+
+/**
+ * struct ubi_rename_entry - volume re-name description data structure.
+ * @new_name_len: new volume name length
+ * @new_name: new volume name
+ * @remove: if not zero, this volume should be removed, not re-named
+ * @desc: descriptor of the volume
+ * @list: links re-name entries into a list
+ *
+ * This data structure is utilized in the multiple volume re-name code. Namely,
+ * UBI first creates a list of &struct ubi_rename_entry objects from the
+ * &struct ubi_rnvol_req request object, and then utilizes this list to do all
+ * the job.
+ */
+struct ubi_rename_entry {
+ int new_name_len;
+ char new_name[UBI_VOL_NAME_MAX + 1];
+ int remove;
+ struct ubi_volume_desc *desc;
+ struct list_head list;
+};
+
+struct ubi_volume_desc;
+
+/**
+ * struct ubi_fastmap_layout - in-memory fastmap data structure.
+ * @e: PEBs used by the current fastmap
+ * @to_be_tortured: if non-zero tortured this PEB
+ * @used_blocks: number of used PEBs
+ * @max_pool_size: maximal size of the user pool
+ * @max_wl_pool_size: maximal size of the pool used by the WL sub-system
+ */
+struct ubi_fastmap_layout {
+ struct ubi_wl_entry *e[UBI_FM_MAX_BLOCKS];
+ int to_be_tortured[UBI_FM_MAX_BLOCKS];
+ int used_blocks;
+ int max_pool_size;
+ int max_wl_pool_size;
+};
+
+/**
+ * struct ubi_fm_pool - in-memory fastmap pool
+ * @pebs: PEBs in this pool
+ * @used: number of used PEBs
+ * @size: total number of PEBs in this pool
+ * @max_size: maximal size of the pool
+ *
+ * A pool gets filled with up to max_size.
+ * If all PEBs within the pool are used a new fastmap will be written
+ * to the flash and the pool gets refilled with empty PEBs.
+ *
+ */
+struct ubi_fm_pool {
+ int pebs[UBI_FM_MAX_POOL_SIZE];
+ int used;
+ int size;
+ int max_size;
+};
+
+/**
+ * struct ubi_eba_leb_desc - EBA logical eraseblock descriptor
+ * @lnum: the logical eraseblock number
+ * @pnum: the physical eraseblock where the LEB can be found
+ *
+ * This structure is here to hide EBA's internal from other part of the
+ * UBI implementation.
+ *
+ * One can query the position of a LEB by calling ubi_eba_get_ldesc().
+ */
+struct ubi_eba_leb_desc {
+ int lnum;
+ int pnum;
+};
+
+/**
+ * struct ubi_volume - UBI volume description data structure.
+ * @dev: device object to make use of the Linux device model
+ * @cdev: character device object to create character device
+ * @ubi: reference to the UBI device description object
+ * @vol_id: volume ID
+ * @ref_count: volume reference count
+ * @readers: number of users holding this volume in read-only mode
+ * @writers: number of users holding this volume in read-write mode
+ * @exclusive: whether somebody holds this volume in exclusive mode
+ * @metaonly: whether somebody is altering only meta data of this volume
+ *
+ * @reserved_pebs: how many physical eraseblocks are reserved for this volume
+ * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
+ * @usable_leb_size: logical eraseblock size without padding
+ * @used_ebs: how many logical eraseblocks in this volume contain data
+ * @last_eb_bytes: how many bytes are stored in the last logical eraseblock
+ * @used_bytes: how many bytes of data this volume contains
+ * @alignment: volume alignment
+ * @data_pad: how many bytes are not used at the end of physical eraseblocks to
+ * satisfy the requested alignment
+ * @name_len: volume name length
+ * @name: volume name
+ *
+ * @upd_ebs: how many eraseblocks are expected to be updated
+ * @ch_lnum: LEB number which is being changing by the atomic LEB change
+ * operation
+ * @upd_bytes: how many bytes are expected to be received for volume update or
+ * atomic LEB change
+ * @upd_received: how many bytes were already received for volume update or
+ * atomic LEB change
+ * @upd_buf: update buffer which is used to collect update data or data for
+ * atomic LEB change
+ *
+ * @eba_tbl: EBA table of this volume (LEB->PEB mapping)
+ * @skip_check: %1 if CRC check of this static volume should be skipped.
+ * Directly reflects the presence of the
+ * %UBI_VTBL_SKIP_CRC_CHECK_FLG flag in the vtbl entry
+ * @checked: %1 if this static volume was checked
+ * @corrupted: %1 if the volume is corrupted (static volumes only)
+ * @upd_marker: %1 if the update marker is set for this volume
+ * @updating: %1 if the volume is being updated
+ * @changing_leb: %1 if the atomic LEB change ioctl command is in progress
+ * @direct_writes: %1 if direct writes are enabled for this volume
+ *
+ * @checkmap: bitmap to remember which PEB->LEB mappings got checked,
+ * protected by UBI LEB lock tree.
+ *
+ * The @corrupted field indicates that the volume's contents is corrupted.
+ * Since UBI protects only static volumes, this field is not relevant to
+ * dynamic volumes - it is user's responsibility to assure their data
+ * integrity.
+ *
+ * The @upd_marker flag indicates that this volume is either being updated at
+ * the moment or is damaged because of an unclean reboot.
+ */
+struct ubi_volume {
+ struct device dev;
+ struct cdev cdev;
+ struct ubi_device *ubi;
+ int vol_id;
+ int ref_count;
+ int readers;
+ int writers;
+ int exclusive;
+ int metaonly;
+
+ int reserved_pebs;
+ int vol_type;
+ int usable_leb_size;
+ int used_ebs;
+ int last_eb_bytes;
+ long long used_bytes;
+ int alignment;
+ int data_pad;
+ int name_len;
+ char name[UBI_VOL_NAME_MAX + 1];
+
+ int upd_ebs;
+ int ch_lnum;
+ long long upd_bytes;
+ long long upd_received;
+ void *upd_buf;
+
+ struct ubi_eba_table *eba_tbl;
+ unsigned int skip_check:1;
+ unsigned int checked:1;
+ unsigned int corrupted:1;
+ unsigned int upd_marker:1;
+ unsigned int updating:1;
+ unsigned int changing_leb:1;
+ unsigned int direct_writes:1;
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ unsigned long *checkmap;
+#endif
+};
+
+/**
+ * struct ubi_volume_desc - UBI volume descriptor returned when it is opened.
+ * @vol: reference to the corresponding volume description object
+ * @mode: open mode (%UBI_READONLY, %UBI_READWRITE, %UBI_EXCLUSIVE
+ * or %UBI_METAONLY)
+ */
+struct ubi_volume_desc {
+ struct ubi_volume *vol;
+ int mode;
+};
+
+/**
+ * struct ubi_debug_info - debugging information for an UBI device.
+ *
+ * @chk_gen: if UBI general extra checks are enabled
+ * @chk_io: if UBI I/O extra checks are enabled
+ * @chk_fastmap: if UBI fastmap extra checks are enabled
+ * @disable_bgt: disable the background task for testing purposes
+ * @emulate_bitflips: emulate bit-flips for testing purposes
+ * @emulate_io_failures: emulate write/erase failures for testing purposes
+ * @emulate_power_cut: emulate power cut for testing purposes
+ * @power_cut_counter: count down for writes left until emulated power cut
+ * @power_cut_min: minimum number of writes before emulating a power cut
+ * @power_cut_max: maximum number of writes until emulating a power cut
+ * @dfs_dir_name: name of debugfs directory containing files of this UBI device
+ * @dfs_dir: direntry object of the UBI device debugfs directory
+ * @dfs_chk_gen: debugfs knob to enable UBI general extra checks
+ * @dfs_chk_io: debugfs knob to enable UBI I/O extra checks
+ * @dfs_chk_fastmap: debugfs knob to enable UBI fastmap extra checks
+ * @dfs_disable_bgt: debugfs knob to disable the background task
+ * @dfs_emulate_bitflips: debugfs knob to emulate bit-flips
+ * @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures
+ * @dfs_emulate_power_cut: debugfs knob to emulate power cuts
+ * @dfs_power_cut_min: debugfs knob for minimum writes before power cut
+ * @dfs_power_cut_max: debugfs knob for maximum writes until power cut
+ */
+struct ubi_debug_info {
+ unsigned int chk_gen:1;
+ unsigned int chk_io:1;
+ unsigned int chk_fastmap:1;
+ unsigned int disable_bgt:1;
+ unsigned int emulate_bitflips:1;
+ unsigned int emulate_io_failures:1;
+ unsigned int emulate_power_cut:2;
+ unsigned int power_cut_counter;
+ unsigned int power_cut_min;
+ unsigned int power_cut_max;
+ char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
+ struct dentry *dfs_dir;
+ struct dentry *dfs_chk_gen;
+ struct dentry *dfs_chk_io;
+ struct dentry *dfs_chk_fastmap;
+ struct dentry *dfs_disable_bgt;
+ struct dentry *dfs_emulate_bitflips;
+ struct dentry *dfs_emulate_io_failures;
+ struct dentry *dfs_emulate_power_cut;
+ struct dentry *dfs_power_cut_min;
+ struct dentry *dfs_power_cut_max;
+};
+
+/**
+ * struct ubi_device - UBI device description structure
+ * @dev: UBI device object to use the Linux device model
+ * @cdev: character device object to create character device
+ * @ubi_num: UBI device number
+ * @ubi_name: UBI device name
+ * @vol_count: number of volumes in this UBI device
+ * @volumes: volumes of this UBI device
+ * @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs,
+ * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count,
+ * @vol->readers, @vol->writers, @vol->exclusive,
+ * @vol->metaonly, @vol->ref_count, @vol->mapping and
+ * @vol->eba_tbl.
+ * @ref_count: count of references on the UBI device
+ * @image_seq: image sequence number recorded on EC headers
+ *
+ * @rsvd_pebs: count of reserved physical eraseblocks
+ * @avail_pebs: count of available physical eraseblocks
+ * @beb_rsvd_pebs: how many physical eraseblocks are reserved for bad PEB
+ * handling
+ * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling
+ *
+ * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end
+ * of UBI initialization
+ * @vtbl_slots: how many slots are available in the volume table
+ * @vtbl_size: size of the volume table in bytes
+ * @vtbl: in-RAM volume table copy
+ * @device_mutex: protects on-flash volume table and serializes volume
+ * creation, deletion, update, re-size, re-name and set
+ * property
+ *
+ * @max_ec: current highest erase counter value
+ * @mean_ec: current mean erase counter value
+ *
+ * @global_sqnum: global sequence number
+ * @ltree_lock: protects the lock tree and @global_sqnum
+ * @ltree: the lock tree
+ * @alc_mutex: serializes "atomic LEB change" operations
+ *
+ * @fm_disabled: non-zero if fastmap is disabled (default)
+ * @fm: in-memory data structure of the currently used fastmap
+ * @fm_pool: in-memory data structure of the fastmap pool
+ * @fm_wl_pool: in-memory data structure of the fastmap pool used by the WL
+ * sub-system
+ * @fm_protect: serializes ubi_update_fastmap(), protects @fm_buf and makes sure
+ * that critical sections cannot be interrupted by ubi_update_fastmap()
+ * @fm_buf: vmalloc()'d buffer which holds the raw fastmap
+ * @fm_size: fastmap size in bytes
+ * @fm_eba_sem: allows ubi_update_fastmap() to block EBA table changes
+ * @fm_work: fastmap work queue
+ * @fm_work_scheduled: non-zero if fastmap work was scheduled
+ * @fast_attach: non-zero if UBI was attached by fastmap
+ * @fm_anchor: The next anchor PEB to use for fastmap
+ * @fm_do_produce_anchor: If true produce an anchor PEB in wl
+ *
+ * @used: RB-tree of used physical eraseblocks
+ * @erroneous: RB-tree of erroneous used physical eraseblocks
+ * @free: RB-tree of free physical eraseblocks
+ * @free_count: Contains the number of elements in @free
+ * @scrub: RB-tree of physical eraseblocks which need scrubbing
+ * @pq: protection queue (contain physical eraseblocks which are temporarily
+ * protected from the wear-leveling worker)
+ * @pq_head: protection queue head
+ * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
+ * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
+ * @erroneous, @erroneous_peb_count, @fm_work_scheduled, @fm_pool,
+ * and @fm_wl_pool fields
+ * @move_mutex: serializes eraseblock moves
+ * @work_sem: used to wait for all the scheduled works to finish and prevent
+ * new works from being submitted
+ * @wl_scheduled: non-zero if the wear-leveling was scheduled
+ * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any
+ * physical eraseblock
+ * @move_from: physical eraseblock from where the data is being moved
+ * @move_to: physical eraseblock where the data is being moved to
+ * @move_to_put: if the "to" PEB was put
+ * @works: list of pending works
+ * @works_count: count of pending works
+ * @bgt_thread: background thread description object
+ * @thread_enabled: if the background thread is enabled
+ * @bgt_name: background thread name
+ *
+ * @flash_size: underlying MTD device size (in bytes)
+ * @peb_count: count of physical eraseblocks on the MTD device
+ * @peb_size: physical eraseblock size
+ * @bad_peb_limit: top limit of expected bad physical eraseblocks
+ * @bad_peb_count: count of bad physical eraseblocks
+ * @good_peb_count: count of good physical eraseblocks
+ * @corr_peb_count: count of corrupted physical eraseblocks (preserved and not
+ * used by UBI)
+ * @erroneous_peb_count: count of erroneous physical eraseblocks in @erroneous
+ * @max_erroneous: maximum allowed amount of erroneous physical eraseblocks
+ * @min_io_size: minimal input/output unit size of the underlying MTD device
+ * @hdrs_min_io_size: minimal I/O unit size used for VID and EC headers
+ * @ro_mode: if the UBI device is in read-only mode
+ * @leb_size: logical eraseblock size
+ * @leb_start: starting offset of logical eraseblocks within physical
+ * eraseblocks
+ * @ec_hdr_alsize: size of the EC header aligned to @hdrs_min_io_size
+ * @vid_hdr_alsize: size of the VID header aligned to @hdrs_min_io_size
+ * @vid_hdr_offset: starting offset of the volume identifier header (might be
+ * unaligned)
+ * @vid_hdr_aloffset: starting offset of the VID header aligned to
+ * @hdrs_min_io_size
+ * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset
+ * @bad_allowed: whether the MTD device admits bad physical eraseblocks or not
+ * @nor_flash: non-zero if working on top of NOR flash
+ * @max_write_size: maximum amount of bytes the underlying flash can write at a
+ * time (MTD write buffer size)
+ * @mtd: MTD device descriptor
+ *
+ * @peb_buf: a buffer of PEB size used for different purposes
+ * @buf_mutex: protects @peb_buf
+ * @ckvol_mutex: serializes static volume checking when opening
+ *
+ * @dbg: debugging information for this UBI device
+ */
+struct ubi_device {
+ struct cdev cdev;
+ struct device dev;
+ int ubi_num;
+ char ubi_name[sizeof(UBI_NAME_STR)+5];
+ int vol_count;
+ struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT];
+ spinlock_t volumes_lock;
+ int ref_count;
+ int image_seq;
+
+ int rsvd_pebs;
+ int avail_pebs;
+ int beb_rsvd_pebs;
+ int beb_rsvd_level;
+ int bad_peb_limit;
+
+ int autoresize_vol_id;
+ int vtbl_slots;
+ int vtbl_size;
+ struct ubi_vtbl_record *vtbl;
+ struct mutex device_mutex;
+
+ int max_ec;
+ /* Note, mean_ec is not updated run-time - should be fixed */
+ int mean_ec;
+
+ /* EBA sub-system's stuff */
+ unsigned long long global_sqnum;
+ spinlock_t ltree_lock;
+ struct rb_root ltree;
+ struct mutex alc_mutex;
+
+ /* Fastmap stuff */
+ int fm_disabled;
+ struct ubi_fastmap_layout *fm;
+ struct ubi_fm_pool fm_pool;
+ struct ubi_fm_pool fm_wl_pool;
+ struct rw_semaphore fm_eba_sem;
+ struct rw_semaphore fm_protect;
+ void *fm_buf;
+ size_t fm_size;
+ struct work_struct fm_work;
+ int fm_work_scheduled;
+ int fast_attach;
+ struct ubi_wl_entry *fm_anchor;
+ int fm_do_produce_anchor;
+
+ /* Wear-leveling sub-system's stuff */
+ struct rb_root used;
+ struct rb_root erroneous;
+ struct rb_root free;
+ int free_count;
+ struct rb_root scrub;
+ struct list_head pq[UBI_PROT_QUEUE_LEN];
+ int pq_head;
+ spinlock_t wl_lock;
+ struct mutex move_mutex;
+ struct rw_semaphore work_sem;
+ int wl_scheduled;
+ struct ubi_wl_entry **lookuptbl;
+ struct ubi_wl_entry *move_from;
+ struct ubi_wl_entry *move_to;
+ int move_to_put;
+ struct list_head works;
+ int works_count;
+ struct task_struct *bgt_thread;
+ int thread_enabled;
+ char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2];
+
+ /* I/O sub-system's stuff */
+ long long flash_size;
+ int peb_count;
+ int peb_size;
+ int bad_peb_count;
+ int good_peb_count;
+ int corr_peb_count;
+ int erroneous_peb_count;
+ int max_erroneous;
+ int min_io_size;
+ int hdrs_min_io_size;
+ int ro_mode;
+ int leb_size;
+ int leb_start;
+ int ec_hdr_alsize;
+ int vid_hdr_alsize;
+ int vid_hdr_offset;
+ int vid_hdr_aloffset;
+ int vid_hdr_shift;
+ unsigned int bad_allowed:1;
+ unsigned int nor_flash:1;
+ int max_write_size;
+ struct mtd_info *mtd;
+
+ void *peb_buf;
+ struct mutex buf_mutex;
+ struct mutex ckvol_mutex;
+
+ struct ubi_debug_info dbg;
+};
+
+/**
+ * struct ubi_ainf_peb - attach information about a physical eraseblock.
+ * @ec: erase counter (%UBI_UNKNOWN if it is unknown)
+ * @pnum: physical eraseblock number
+ * @vol_id: ID of the volume this LEB belongs to
+ * @lnum: logical eraseblock number
+ * @scrub: if this physical eraseblock needs scrubbing
+ * @copy_flag: this LEB is a copy (@copy_flag is set in VID header of this LEB)
+ * @sqnum: sequence number
+ * @u: unions RB-tree or @list links
+ * @u.rb: link in the per-volume RB-tree of &struct ubi_ainf_peb objects
+ * @u.list: link in one of the eraseblock lists
+ *
+ * One object of this type is allocated for each physical eraseblock when
+ * attaching an MTD device. Note, if this PEB does not belong to any LEB /
+ * volume, the @vol_id and @lnum fields are initialized to %UBI_UNKNOWN.
+ */
+struct ubi_ainf_peb {
+ int ec;
+ int pnum;
+ int vol_id;
+ int lnum;
+ unsigned int scrub:1;
+ unsigned int copy_flag:1;
+ unsigned long long sqnum;
+ union {
+ struct rb_node rb;
+ struct list_head list;
+ } u;
+};
+
+/**
+ * struct ubi_ainf_volume - attaching information about a volume.
+ * @vol_id: volume ID
+ * @highest_lnum: highest logical eraseblock number in this volume
+ * @leb_count: number of logical eraseblocks in this volume
+ * @vol_type: volume type
+ * @used_ebs: number of used logical eraseblocks in this volume (only for
+ * static volumes)
+ * @last_data_size: amount of data in the last logical eraseblock of this
+ * volume (always equivalent to the usable logical eraseblock
+ * size in case of dynamic volumes)
+ * @data_pad: how many bytes at the end of logical eraseblocks of this volume
+ * are not used (due to volume alignment)
+ * @compat: compatibility flags of this volume
+ * @rb: link in the volume RB-tree
+ * @root: root of the RB-tree containing all the eraseblock belonging to this
+ * volume (&struct ubi_ainf_peb objects)
+ *
+ * One object of this type is allocated for each volume when attaching an MTD
+ * device.
+ */
+struct ubi_ainf_volume {
+ int vol_id;
+ int highest_lnum;
+ int leb_count;
+ int vol_type;
+ int used_ebs;
+ int last_data_size;
+ int data_pad;
+ int compat;
+ struct rb_node rb;
+ struct rb_root root;
+};
+
+/**
+ * struct ubi_attach_info - MTD device attaching information.
+ * @volumes: root of the volume RB-tree
+ * @corr: list of corrupted physical eraseblocks
+ * @free: list of free physical eraseblocks
+ * @erase: list of physical eraseblocks which have to be erased
+ * @alien: list of physical eraseblocks which should not be used by UBI (e.g.,
+ * those belonging to "preserve"-compatible internal volumes)
+ * @fastmap: list of physical eraseblocks which relate to fastmap (e.g.,
+ * eraseblocks of the current and not yet erased old fastmap blocks)
+ * @corr_peb_count: count of PEBs in the @corr list
+ * @empty_peb_count: count of PEBs which are presumably empty (contain only
+ * 0xFF bytes)
+ * @alien_peb_count: count of PEBs in the @alien list
+ * @bad_peb_count: count of bad physical eraseblocks
+ * @maybe_bad_peb_count: count of bad physical eraseblocks which are not marked
+ * as bad yet, but which look like bad
+ * @vols_found: number of volumes found
+ * @highest_vol_id: highest volume ID
+ * @is_empty: flag indicating whether the MTD device is empty or not
+ * @force_full_scan: flag indicating whether we need to do a full scan and drop
+ all existing Fastmap data structures
+ * @min_ec: lowest erase counter value
+ * @max_ec: highest erase counter value
+ * @max_sqnum: highest sequence number value
+ * @mean_ec: mean erase counter value
+ * @ec_sum: a temporary variable used when calculating @mean_ec
+ * @ec_count: a temporary variable used when calculating @mean_ec
+ * @aeb_slab_cache: slab cache for &struct ubi_ainf_peb objects
+ * @ech: temporary EC header. Only available during scan
+ * @vidh: temporary VID buffer. Only available during scan
+ *
+ * This data structure contains the result of attaching an MTD device and may
+ * be used by other UBI sub-systems to build final UBI data structures, further
+ * error-recovery and so on.
+ */
+struct ubi_attach_info {
+ struct rb_root volumes;
+ struct list_head corr;
+ struct list_head free;
+ struct list_head erase;
+ struct list_head alien;
+ struct list_head fastmap;
+ int corr_peb_count;
+ int empty_peb_count;
+ int alien_peb_count;
+ int bad_peb_count;
+ int maybe_bad_peb_count;
+ int vols_found;
+ int highest_vol_id;
+ int is_empty;
+ int force_full_scan;
+ int min_ec;
+ int max_ec;
+ unsigned long long max_sqnum;
+ int mean_ec;
+ uint64_t ec_sum;
+ int ec_count;
+ struct kmem_cache *aeb_slab_cache;
+ struct ubi_ec_hdr *ech;
+ struct ubi_vid_io_buf *vidb;
+};
+
+/**
+ * struct ubi_work - UBI work description data structure.
+ * @list: a link in the list of pending works
+ * @func: worker function
+ * @e: physical eraseblock to erase
+ * @vol_id: the volume ID on which this erasure is being performed
+ * @lnum: the logical eraseblock number
+ * @torture: if the physical eraseblock has to be tortured
+ *
+ * The @func pointer points to the worker function. If the @shutdown argument is
+ * not zero, the worker has to free the resources and exit immediately as the
+ * WL sub-system is shutting down.
+ * The worker has to return zero in case of success and a negative error code in
+ * case of failure.
+ */
+struct ubi_work {
+ struct list_head list;
+ int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int shutdown);
+ /* The below fields are only relevant to erasure works */
+ struct ubi_wl_entry *e;
+ int vol_id;
+ int lnum;
+ int torture;
+};
+
+#include "debug.h"
+
+extern struct kmem_cache *ubi_wl_entry_slab;
+extern const struct file_operations ubi_ctrl_cdev_operations;
+extern const struct file_operations ubi_cdev_operations;
+extern const struct file_operations ubi_vol_cdev_operations;
+extern struct class ubi_class;
+extern struct mutex ubi_devices_mutex;
+extern struct blocking_notifier_head ubi_notifiers;
+
+/* attach.c */
+struct ubi_ainf_peb *ubi_alloc_aeb(struct ubi_attach_info *ai, int pnum,
+ int ec);
+void ubi_free_aeb(struct ubi_attach_info *ai, struct ubi_ainf_peb *aeb);
+int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
+ int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips);
+struct ubi_ainf_volume *ubi_add_av(struct ubi_attach_info *ai, int vol_id);
+struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
+ int vol_id);
+void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av);
+struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
+ struct ubi_attach_info *ai);
+int ubi_attach(struct ubi_device *ubi, int force_scan);
+void ubi_destroy_ai(struct ubi_attach_info *ai);
+
+/* vtbl.c */
+int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
+ struct ubi_vtbl_record *vtbl_rec);
+int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
+ struct list_head *rename_list);
+int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai);
+
+/* vmt.c */
+int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req);
+int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl);
+int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs);
+int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list);
+int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol);
+void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol);
+
+/* upd.c */
+int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
+ long long bytes);
+int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
+ const void __user *buf, int count);
+int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+ const struct ubi_leb_change_req *req);
+int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
+ const void __user *buf, int count);
+
+/* misc.c */
+int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf,
+ int length);
+int ubi_check_volume(struct ubi_device *ubi, int vol_id);
+void ubi_update_reserved(struct ubi_device *ubi);
+void ubi_calculate_reserved(struct ubi_device *ubi);
+int ubi_check_pattern(const void *buf, uint8_t patt, int size);
+
+static inline bool ubi_leb_valid(struct ubi_volume *vol, int lnum)
+{
+ return lnum >= 0 && lnum < vol->reserved_pebs;
+}
+
+/* eba.c */
+struct ubi_eba_table *ubi_eba_create_table(struct ubi_volume *vol,
+ int nentries);
+void ubi_eba_destroy_table(struct ubi_eba_table *tbl);
+void ubi_eba_copy_table(struct ubi_volume *vol, struct ubi_eba_table *dst,
+ int nentries);
+void ubi_eba_replace_table(struct ubi_volume *vol, struct ubi_eba_table *tbl);
+void ubi_eba_get_ldesc(struct ubi_volume *vol, int lnum,
+ struct ubi_eba_leb_desc *ldesc);
+bool ubi_eba_is_mapped(struct ubi_volume *vol, int lnum);
+int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum);
+int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ void *buf, int offset, int len, int check);
+int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
+ struct ubi_sgl *sgl, int lnum, int offset, int len,
+ int check);
+int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ const void *buf, int offset, int len);
+int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum, const void *buf, int len, int used_ebs);
+int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum, const void *buf, int len);
+int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
+ struct ubi_vid_io_buf *vidb);
+int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
+unsigned long long ubi_next_sqnum(struct ubi_device *ubi);
+int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
+ struct ubi_attach_info *ai_scan);
+
+/* wl.c */
+int ubi_wl_get_peb(struct ubi_device *ubi);
+int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
+ int pnum, int torture);
+int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum);
+int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum);
+int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
+void ubi_wl_close(struct ubi_device *ubi);
+int ubi_thread(void *u);
+struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor);
+int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e,
+ int lnum, int torture);
+int ubi_is_erase_work(struct ubi_work *wrk);
+void ubi_refill_pools(struct ubi_device *ubi);
+int ubi_ensure_anchor_pebs(struct ubi_device *ubi);
+int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force_scrub);
+
+/* io.c */
+int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
+ int len);
+int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
+ int len);
+int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture);
+int ubi_io_is_bad(const struct ubi_device *ubi, int pnum);
+int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum);
+int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_ec_hdr *ec_hdr, int verbose);
+int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_ec_hdr *ec_hdr);
+int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_vid_io_buf *vidb, int verbose);
+int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_vid_io_buf *vidb);
+
+/* build.c */
+int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ int vid_hdr_offset, int max_beb_per1024,
+ bool disable_fm);
+int ubi_detach_mtd_dev(int ubi_num, int anyway);
+struct ubi_device *ubi_get_device(int ubi_num);
+void ubi_put_device(struct ubi_device *ubi);
+struct ubi_device *ubi_get_by_major(int major);
+int ubi_major2num(int major);
+int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol,
+ int ntype);
+int ubi_notify_all(struct ubi_device *ubi, int ntype,
+ struct notifier_block *nb);
+int ubi_enumerate_volumes(struct notifier_block *nb);
+void ubi_free_all_volumes(struct ubi_device *ubi);
+void ubi_free_internal_volumes(struct ubi_device *ubi);
+
+/* kapi.c */
+void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
+void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
+ struct ubi_volume_info *vi);
+/* scan.c */
+int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
+ int pnum, const struct ubi_vid_hdr *vid_hdr);
+
+/* fastmap.c */
+#ifdef CONFIG_MTD_UBI_FASTMAP
+size_t ubi_calc_fm_size(struct ubi_device *ubi);
+int ubi_update_fastmap(struct ubi_device *ubi);
+int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ struct ubi_attach_info *scan_ai);
+int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count);
+void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol);
+#else
+static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; }
+static inline int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count) { return 0; }
+static inline void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol) {}
+#endif
+
+/* block.c */
+#ifdef CONFIG_MTD_UBI_BLOCK
+int ubiblock_init(void);
+void ubiblock_exit(void);
+int ubiblock_create(struct ubi_volume_info *vi);
+int ubiblock_remove(struct ubi_volume_info *vi);
+#else
+static inline int ubiblock_init(void) { return 0; }
+static inline void ubiblock_exit(void) {}
+static inline int ubiblock_create(struct ubi_volume_info *vi)
+{
+ return -ENOSYS;
+}
+static inline int ubiblock_remove(struct ubi_volume_info *vi)
+{
+ return -ENOSYS;
+}
+#endif
+
+/*
+ * ubi_for_each_free_peb - walk the UBI free RB tree.
+ * @ubi: UBI device description object
+ * @e: a pointer to a ubi_wl_entry to use as cursor
+ * @pos: a pointer to RB-tree entry type to use as a loop counter
+ */
+#define ubi_for_each_free_peb(ubi, e, tmp_rb) \
+ ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->free, u.rb)
+
+/*
+ * ubi_for_each_used_peb - walk the UBI used RB tree.
+ * @ubi: UBI device description object
+ * @e: a pointer to a ubi_wl_entry to use as cursor
+ * @pos: a pointer to RB-tree entry type to use as a loop counter
+ */
+#define ubi_for_each_used_peb(ubi, e, tmp_rb) \
+ ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->used, u.rb)
+
+/*
+ * ubi_for_each_scub_peb - walk the UBI scub RB tree.
+ * @ubi: UBI device description object
+ * @e: a pointer to a ubi_wl_entry to use as cursor
+ * @pos: a pointer to RB-tree entry type to use as a loop counter
+ */
+#define ubi_for_each_scrub_peb(ubi, e, tmp_rb) \
+ ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->scrub, u.rb)
+
+/*
+ * ubi_for_each_protected_peb - walk the UBI protection queue.
+ * @ubi: UBI device description object
+ * @i: a integer used as counter
+ * @e: a pointer to a ubi_wl_entry to use as cursor
+ */
+#define ubi_for_each_protected_peb(ubi, i, e) \
+ for ((i) = 0; (i) < UBI_PROT_QUEUE_LEN; (i)++) \
+ list_for_each_entry((e), &(ubi->pq[(i)]), u.list)
+
+/*
+ * ubi_rb_for_each_entry - walk an RB-tree.
+ * @rb: a pointer to type 'struct rb_node' to use as a loop counter
+ * @pos: a pointer to RB-tree entry type to use as a loop counter
+ * @root: RB-tree's root
+ * @member: the name of the 'struct rb_node' within the RB-tree entry
+ */
+#define ubi_rb_for_each_entry(rb, pos, root, member) \
+ for (rb = rb_first(root), \
+ pos = (rb ? container_of(rb, typeof(*pos), member) : NULL); \
+ rb; \
+ rb = rb_next(rb), \
+ pos = (rb ? container_of(rb, typeof(*pos), member) : NULL))
+
+/*
+ * ubi_move_aeb_to_list - move a PEB from the volume tree to a list.
+ *
+ * @av: volume attaching information
+ * @aeb: attaching eraseblock information
+ * @list: the list to move to
+ */
+static inline void ubi_move_aeb_to_list(struct ubi_ainf_volume *av,
+ struct ubi_ainf_peb *aeb,
+ struct list_head *list)
+{
+ rb_erase(&aeb->u.rb, &av->root);
+ list_add_tail(&aeb->u.list, list);
+}
+
+/**
+ * ubi_init_vid_buf - Initialize a VID buffer
+ * @ubi: the UBI device
+ * @vidb: the VID buffer to initialize
+ * @buf: the underlying buffer
+ */
+static inline void ubi_init_vid_buf(const struct ubi_device *ubi,
+ struct ubi_vid_io_buf *vidb,
+ void *buf)
+{
+ if (buf)
+ memset(buf, 0, ubi->vid_hdr_alsize);
+
+ vidb->buffer = buf;
+ vidb->hdr = buf + ubi->vid_hdr_shift;
+}
+
+/**
+ * ubi_init_vid_buf - Allocate a VID buffer
+ * @ubi: the UBI device
+ * @gfp_flags: GFP flags to use for the allocation
+ */
+static inline struct ubi_vid_io_buf *
+ubi_alloc_vid_buf(const struct ubi_device *ubi, gfp_t gfp_flags)
+{
+ struct ubi_vid_io_buf *vidb;
+ void *buf;
+
+ vidb = kzalloc(sizeof(*vidb), gfp_flags);
+ if (!vidb)
+ return NULL;
+
+ buf = kmalloc(ubi->vid_hdr_alsize, gfp_flags);
+ if (!buf) {
+ kfree(vidb);
+ return NULL;
+ }
+
+ ubi_init_vid_buf(ubi, vidb, buf);
+
+ return vidb;
+}
+
+/**
+ * ubi_free_vid_buf - Free a VID buffer
+ * @vidb: the VID buffer to free
+ */
+static inline void ubi_free_vid_buf(struct ubi_vid_io_buf *vidb)
+{
+ if (!vidb)
+ return;
+
+ kfree(vidb->buffer);
+ kfree(vidb);
+}
+
+/**
+ * ubi_get_vid_hdr - Get the VID header attached to a VID buffer
+ * @vidb: VID buffer
+ */
+static inline struct ubi_vid_hdr *ubi_get_vid_hdr(struct ubi_vid_io_buf *vidb)
+{
+ return vidb->hdr;
+}
+
+/*
+ * This function is equivalent to 'ubi_io_read()', but @offset is relative to
+ * the beginning of the logical eraseblock, not to the beginning of the
+ * physical eraseblock.
+ */
+static inline int ubi_io_read_data(const struct ubi_device *ubi, void *buf,
+ int pnum, int offset, int len)
+{
+ ubi_assert(offset >= 0);
+ return ubi_io_read(ubi, buf, pnum, offset + ubi->leb_start, len);
+}
+
+/*
+ * This function is equivalent to 'ubi_io_write()', but @offset is relative to
+ * the beginning of the logical eraseblock, not to the beginning of the
+ * physical eraseblock.
+ */
+static inline int ubi_io_write_data(struct ubi_device *ubi, const void *buf,
+ int pnum, int offset, int len)
+{
+ ubi_assert(offset >= 0);
+ return ubi_io_write(ubi, buf, pnum, offset + ubi->leb_start, len);
+}
+
+/**
+ * ubi_ro_mode - switch to read-only mode.
+ * @ubi: UBI device description object
+ */
+static inline void ubi_ro_mode(struct ubi_device *ubi)
+{
+ if (!ubi->ro_mode) {
+ ubi->ro_mode = 1;
+ ubi_warn(ubi, "switch to read-only mode");
+ dump_stack();
+ }
+}
+
+/**
+ * vol_id2idx - get table index by volume ID.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ */
+static inline int vol_id2idx(const struct ubi_device *ubi, int vol_id)
+{
+ if (vol_id >= UBI_INTERNAL_VOL_START)
+ return vol_id - UBI_INTERNAL_VOL_START + ubi->vtbl_slots;
+ else
+ return vol_id;
+}
+
+/**
+ * idx2vol_id - get volume ID by table index.
+ * @ubi: UBI device description object
+ * @idx: table index
+ */
+static inline int idx2vol_id(const struct ubi_device *ubi, int idx)
+{
+ if (idx >= ubi->vtbl_slots)
+ return idx - ubi->vtbl_slots + UBI_INTERNAL_VOL_START;
+ else
+ return idx;
+}
+
+/**
+ * ubi_is_fm_vol - check whether a volume ID is a Fastmap volume.
+ * @vol_id: volume ID
+ */
+static inline bool ubi_is_fm_vol(int vol_id)
+{
+ switch (vol_id) {
+ case UBI_FM_SB_VOLUME_ID:
+ case UBI_FM_DATA_VOLUME_ID:
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ubi_find_fm_block - check whether a PEB is part of the current Fastmap.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock to look for
+ *
+ * This function returns a wear leveling object if @pnum relates to the current
+ * fastmap, @NULL otherwise.
+ */
+static inline struct ubi_wl_entry *ubi_find_fm_block(const struct ubi_device *ubi,
+ int pnum)
+{
+ int i;
+
+ if (ubi->fm) {
+ for (i = 0; i < ubi->fm->used_blocks; i++) {
+ if (ubi->fm->e[i]->pnum == pnum)
+ return ubi->fm->e[i];
+ }
+ }
+
+ return NULL;
+}
+
+#endif /* !__UBI_UBI_H__ */
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
new file mode 100644
index 0000000000..962f693cf8
--- /dev/null
+++ b/drivers/mtd/ubi/upd.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ * Copyright (c) Nokia Corporation, 2006
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ *
+ * Jan 2007: Alexander Schmidt, hacked per-volume update.
+ */
+
+/*
+ * This file contains implementation of the volume update and atomic LEB change
+ * functionality.
+ *
+ * The update operation is based on the per-volume update marker which is
+ * stored in the volume table. The update marker is set before the update
+ * starts, and removed after the update has been finished. So if the update was
+ * interrupted by an unclean re-boot or due to some other reasons, the update
+ * marker stays on the flash media and UBI finds it when it attaches the MTD
+ * device next time. If the update marker is set for a volume, the volume is
+ * treated as damaged and most I/O operations are prohibited. Only a new update
+ * operation is allowed.
+ *
+ * Note, in general it is possible to implement the update operation as a
+ * transaction with a roll-back capability.
+ */
+
+#include <linux/err.h>
+#include <linux/uaccess.h>
+#include <linux/math64.h>
+#include "ubi.h"
+
+/**
+ * set_update_marker - set update marker.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ *
+ * This function sets the update marker flag for volume @vol. Returns zero
+ * in case of success and a negative error code in case of failure.
+ */
+static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol)
+{
+ int err;
+ struct ubi_vtbl_record vtbl_rec;
+
+ dbg_gen("set update marker for volume %d", vol->vol_id);
+
+ if (vol->upd_marker) {
+ ubi_assert(ubi->vtbl[vol->vol_id].upd_marker);
+ dbg_gen("already set");
+ return 0;
+ }
+
+ vtbl_rec = ubi->vtbl[vol->vol_id];
+ vtbl_rec.upd_marker = 1;
+
+ mutex_lock(&ubi->device_mutex);
+ err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
+ vol->upd_marker = 1;
+ mutex_unlock(&ubi->device_mutex);
+ return err;
+}
+
+/**
+ * clear_update_marker - clear update marker.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @bytes: new data size in bytes
+ *
+ * This function clears the update marker for volume @vol, sets new volume
+ * data size and clears the "corrupted" flag (static volumes only). Returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol,
+ long long bytes)
+{
+ int err;
+ struct ubi_vtbl_record vtbl_rec;
+
+ dbg_gen("clear update marker for volume %d", vol->vol_id);
+
+ vtbl_rec = ubi->vtbl[vol->vol_id];
+ ubi_assert(vol->upd_marker && vtbl_rec.upd_marker);
+ vtbl_rec.upd_marker = 0;
+
+ if (vol->vol_type == UBI_STATIC_VOLUME) {
+ vol->corrupted = 0;
+ vol->used_bytes = bytes;
+ vol->used_ebs = div_u64_rem(bytes, vol->usable_leb_size,
+ &vol->last_eb_bytes);
+ if (vol->last_eb_bytes)
+ vol->used_ebs += 1;
+ else
+ vol->last_eb_bytes = vol->usable_leb_size;
+ }
+
+ mutex_lock(&ubi->device_mutex);
+ err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
+ vol->upd_marker = 0;
+ mutex_unlock(&ubi->device_mutex);
+ return err;
+}
+
+/**
+ * ubi_start_update - start volume update.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @bytes: update bytes
+ *
+ * This function starts volume update operation. If @bytes is zero, the volume
+ * is just wiped out. Returns zero in case of success and a negative error code
+ * in case of failure.
+ */
+int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
+ long long bytes)
+{
+ int i, err;
+
+ dbg_gen("start update of volume %d, %llu bytes", vol->vol_id, bytes);
+ ubi_assert(!vol->updating && !vol->changing_leb);
+ vol->updating = 1;
+
+ vol->upd_buf = vmalloc(ubi->leb_size);
+ if (!vol->upd_buf)
+ return -ENOMEM;
+
+ err = set_update_marker(ubi, vol);
+ if (err)
+ return err;
+
+ /* Before updating - wipe out the volume */
+ for (i = 0; i < vol->reserved_pebs; i++) {
+ err = ubi_eba_unmap_leb(ubi, vol, i);
+ if (err)
+ return err;
+ }
+
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
+ if (err)
+ return err;
+
+ if (bytes == 0) {
+ err = clear_update_marker(ubi, vol, 0);
+ if (err)
+ return err;
+
+ vfree(vol->upd_buf);
+ vol->updating = 0;
+ return 0;
+ }
+
+ vol->upd_ebs = div_u64(bytes + vol->usable_leb_size - 1,
+ vol->usable_leb_size);
+ vol->upd_bytes = bytes;
+ vol->upd_received = 0;
+ return 0;
+}
+
+/**
+ * ubi_start_leb_change - start atomic LEB change.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @req: operation request
+ *
+ * This function starts atomic LEB change operation. Returns zero in case of
+ * success and a negative error code in case of failure.
+ */
+int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+ const struct ubi_leb_change_req *req)
+{
+ ubi_assert(!vol->updating && !vol->changing_leb);
+
+ dbg_gen("start changing LEB %d:%d, %u bytes",
+ vol->vol_id, req->lnum, req->bytes);
+ if (req->bytes == 0)
+ return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0);
+
+ vol->upd_bytes = req->bytes;
+ vol->upd_received = 0;
+ vol->changing_leb = 1;
+ vol->ch_lnum = req->lnum;
+
+ vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size));
+ if (!vol->upd_buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * write_leb - write update data.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @buf: data to write
+ * @len: data size
+ * @used_ebs: how many logical eraseblocks will this volume contain (static
+ * volumes only)
+ *
+ * This function writes update data to corresponding logical eraseblock. In
+ * case of dynamic volume, this function checks if the data contains 0xFF bytes
+ * at the end. If yes, the 0xFF bytes are cut and not written. So if the whole
+ * buffer contains only 0xFF bytes, the LEB is left unmapped.
+ *
+ * The reason why we skip the trailing 0xFF bytes in case of dynamic volume is
+ * that we want to make sure that more data may be appended to the logical
+ * eraseblock in future. Indeed, writing 0xFF bytes may have side effects and
+ * this PEB won't be writable anymore. So if one writes the file-system image
+ * to the UBI volume where 0xFFs mean free space - UBI makes sure this free
+ * space is writable after the update.
+ *
+ * We do not do this for static volumes because they are read-only. But this
+ * also cannot be done because we have to store per-LEB CRC and the correct
+ * data length.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ void *buf, int len, int used_ebs)
+{
+ int err;
+
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
+ int l = ALIGN(len, ubi->min_io_size);
+
+ memset(buf + len, 0xFF, l - len);
+ len = ubi_calc_data_len(ubi, buf, l);
+ if (len == 0) {
+ dbg_gen("all %d bytes contain 0xFF - skip", len);
+ return 0;
+ }
+
+ err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len);
+ } else {
+ /*
+ * When writing static volume, and this is the last logical
+ * eraseblock, the length (@len) does not have to be aligned to
+ * the minimal flash I/O unit. The 'ubi_eba_write_leb_st()'
+ * function accepts exact (unaligned) length and stores it in
+ * the VID header. And it takes care of proper alignment by
+ * padding the buffer. Here we just make sure the padding will
+ * contain zeros, not random trash.
+ */
+ memset(buf + len, 0, vol->usable_leb_size - len);
+ err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len, used_ebs);
+ }
+
+ return err;
+}
+
+/**
+ * ubi_more_update_data - write more update data.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @buf: write data (user-space memory buffer)
+ * @count: how much bytes to write
+ *
+ * This function writes more data to the volume which is being updated. It may
+ * be called arbitrary number of times until all the update data arriveis. This
+ * function returns %0 in case of success, number of bytes written during the
+ * last call if the whole volume update has been successfully finished, and a
+ * negative error code in case of failure.
+ */
+int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
+ const void __user *buf, int count)
+{
+ int lnum, offs, err = 0, len, to_write = count;
+
+ dbg_gen("write %d of %lld bytes, %lld already passed",
+ count, vol->upd_bytes, vol->upd_received);
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ lnum = div_u64_rem(vol->upd_received, vol->usable_leb_size, &offs);
+ if (vol->upd_received + count > vol->upd_bytes)
+ to_write = count = vol->upd_bytes - vol->upd_received;
+
+ /*
+ * When updating volumes, we accumulate whole logical eraseblock of
+ * data and write it at once.
+ */
+ if (offs != 0) {
+ /*
+ * This is a write to the middle of the logical eraseblock. We
+ * copy the data to our update buffer and wait for more data or
+ * flush it if the whole eraseblock is written or the update
+ * is finished.
+ */
+
+ len = vol->usable_leb_size - offs;
+ if (len > count)
+ len = count;
+
+ err = copy_from_user(vol->upd_buf + offs, buf, len);
+ if (err)
+ return -EFAULT;
+
+ if (offs + len == vol->usable_leb_size ||
+ vol->upd_received + len == vol->upd_bytes) {
+ int flush_len = offs + len;
+
+ /*
+ * OK, we gathered either the whole eraseblock or this
+ * is the last chunk, it's time to flush the buffer.
+ */
+ ubi_assert(flush_len <= vol->usable_leb_size);
+ err = write_leb(ubi, vol, lnum, vol->upd_buf, flush_len,
+ vol->upd_ebs);
+ if (err)
+ return err;
+ }
+
+ vol->upd_received += len;
+ count -= len;
+ buf += len;
+ lnum += 1;
+ }
+
+ /*
+ * If we've got more to write, let's continue. At this point we know we
+ * are starting from the beginning of an eraseblock.
+ */
+ while (count) {
+ if (count > vol->usable_leb_size)
+ len = vol->usable_leb_size;
+ else
+ len = count;
+
+ err = copy_from_user(vol->upd_buf, buf, len);
+ if (err)
+ return -EFAULT;
+
+ if (len == vol->usable_leb_size ||
+ vol->upd_received + len == vol->upd_bytes) {
+ err = write_leb(ubi, vol, lnum, vol->upd_buf,
+ len, vol->upd_ebs);
+ if (err)
+ break;
+ }
+
+ vol->upd_received += len;
+ count -= len;
+ lnum += 1;
+ buf += len;
+ }
+
+ ubi_assert(vol->upd_received <= vol->upd_bytes);
+ if (vol->upd_received == vol->upd_bytes) {
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
+ if (err)
+ return err;
+ /* The update is finished, clear the update marker */
+ err = clear_update_marker(ubi, vol, vol->upd_bytes);
+ if (err)
+ return err;
+ vol->updating = 0;
+ err = to_write;
+ vfree(vol->upd_buf);
+ }
+
+ return err;
+}
+
+/**
+ * ubi_more_leb_change_data - accept more data for atomic LEB change.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @buf: write data (user-space memory buffer)
+ * @count: how much bytes to write
+ *
+ * This function accepts more data to the volume which is being under the
+ * "atomic LEB change" operation. It may be called arbitrary number of times
+ * until all data arrives. This function returns %0 in case of success, number
+ * of bytes written during the last call if the whole "atomic LEB change"
+ * operation has been successfully finished, and a negative error code in case
+ * of failure.
+ */
+int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
+ const void __user *buf, int count)
+{
+ int err;
+
+ dbg_gen("write %d of %lld bytes, %lld already passed",
+ count, vol->upd_bytes, vol->upd_received);
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ if (vol->upd_received + count > vol->upd_bytes)
+ count = vol->upd_bytes - vol->upd_received;
+
+ err = copy_from_user(vol->upd_buf + vol->upd_received, buf, count);
+ if (err)
+ return -EFAULT;
+
+ vol->upd_received += count;
+
+ if (vol->upd_received == vol->upd_bytes) {
+ int len = ALIGN((int)vol->upd_bytes, ubi->min_io_size);
+
+ memset(vol->upd_buf + vol->upd_bytes, 0xFF,
+ len - vol->upd_bytes);
+ len = ubi_calc_data_len(ubi, vol->upd_buf, len);
+ err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum,
+ vol->upd_buf, len);
+ if (err)
+ return err;
+ }
+
+ ubi_assert(vol->upd_received <= vol->upd_bytes);
+ if (vol->upd_received == vol->upd_bytes) {
+ vol->changing_leb = 0;
+ err = count;
+ vfree(vol->upd_buf);
+ }
+
+ return err;
+}
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
new file mode 100644
index 0000000000..2c867d16f8
--- /dev/null
+++ b/drivers/mtd/ubi/vmt.c
@@ -0,0 +1,797 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * This file contains implementation of volume creation, deletion, updating and
+ * resizing.
+ */
+
+#include <linux/err.h>
+#include <linux/math64.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include "ubi.h"
+
+static int self_check_volumes(struct ubi_device *ubi);
+
+static ssize_t vol_attribute_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+/* Device attributes corresponding to files in '/<sysfs>/class/ubi/ubiX_Y' */
+static struct device_attribute attr_vol_reserved_ebs =
+ __ATTR(reserved_ebs, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_type =
+ __ATTR(type, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_name =
+ __ATTR(name, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_corrupted =
+ __ATTR(corrupted, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_alignment =
+ __ATTR(alignment, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_usable_eb_size =
+ __ATTR(usable_eb_size, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_data_bytes =
+ __ATTR(data_bytes, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_upd_marker =
+ __ATTR(upd_marker, S_IRUGO, vol_attribute_show, NULL);
+
+/*
+ * "Show" method for files in '/<sysfs>/class/ubi/ubiX_Y/'.
+ *
+ * Consider a situation:
+ * A. process 1 opens a sysfs file related to volume Y, say
+ * /<sysfs>/class/ubi/ubiX_Y/reserved_ebs;
+ * B. process 2 removes volume Y;
+ * C. process 1 starts reading the /<sysfs>/class/ubi/ubiX_Y/reserved_ebs file;
+ *
+ * In this situation, this function will return %-ENODEV because it will find
+ * out that the volume was removed from the @ubi->volumes array.
+ */
+static ssize_t vol_attribute_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
+ struct ubi_device *ubi = vol->ubi;
+
+ spin_lock(&ubi->volumes_lock);
+ if (!ubi->volumes[vol->vol_id]) {
+ spin_unlock(&ubi->volumes_lock);
+ return -ENODEV;
+ }
+ /* Take a reference to prevent volume removal */
+ vol->ref_count += 1;
+ spin_unlock(&ubi->volumes_lock);
+
+ if (attr == &attr_vol_reserved_ebs)
+ ret = sprintf(buf, "%d\n", vol->reserved_pebs);
+ else if (attr == &attr_vol_type) {
+ const char *tp;
+
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME)
+ tp = "dynamic";
+ else
+ tp = "static";
+ ret = sprintf(buf, "%s\n", tp);
+ } else if (attr == &attr_vol_name)
+ ret = sprintf(buf, "%s\n", vol->name);
+ else if (attr == &attr_vol_corrupted)
+ ret = sprintf(buf, "%d\n", vol->corrupted);
+ else if (attr == &attr_vol_alignment)
+ ret = sprintf(buf, "%d\n", vol->alignment);
+ else if (attr == &attr_vol_usable_eb_size)
+ ret = sprintf(buf, "%d\n", vol->usable_leb_size);
+ else if (attr == &attr_vol_data_bytes)
+ ret = sprintf(buf, "%lld\n", vol->used_bytes);
+ else if (attr == &attr_vol_upd_marker)
+ ret = sprintf(buf, "%d\n", vol->upd_marker);
+ else
+ /* This must be a bug */
+ ret = -EINVAL;
+
+ /* We've done the operation, drop volume and UBI device references */
+ spin_lock(&ubi->volumes_lock);
+ vol->ref_count -= 1;
+ ubi_assert(vol->ref_count >= 0);
+ spin_unlock(&ubi->volumes_lock);
+ return ret;
+}
+
+static struct attribute *volume_dev_attrs[] = {
+ &attr_vol_reserved_ebs.attr,
+ &attr_vol_type.attr,
+ &attr_vol_name.attr,
+ &attr_vol_corrupted.attr,
+ &attr_vol_alignment.attr,
+ &attr_vol_usable_eb_size.attr,
+ &attr_vol_data_bytes.attr,
+ &attr_vol_upd_marker.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(volume_dev);
+
+/* Release method for volume devices */
+static void vol_release(struct device *dev)
+{
+ struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
+
+ ubi_eba_replace_table(vol, NULL);
+ ubi_fastmap_destroy_checkmap(vol);
+ kfree(vol);
+}
+
+/**
+ * ubi_create_volume - create volume.
+ * @ubi: UBI device description object
+ * @req: volume creation request
+ *
+ * This function creates volume described by @req. If @req->vol_id id
+ * %UBI_VOL_NUM_AUTO, this function automatically assign ID to the new volume
+ * and saves it in @req->vol_id. Returns zero in case of success and a negative
+ * error code in case of failure. Note, the caller has to have the
+ * @ubi->device_mutex locked.
+ */
+int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
+{
+ int i, err, vol_id = req->vol_id;
+ struct ubi_volume *vol;
+ struct ubi_vtbl_record vtbl_rec;
+ struct ubi_eba_table *eba_tbl = NULL;
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
+ if (!vol)
+ return -ENOMEM;
+
+ device_initialize(&vol->dev);
+ vol->dev.release = vol_release;
+ vol->dev.parent = &ubi->dev;
+ vol->dev.class = &ubi_class;
+ vol->dev.groups = volume_dev_groups;
+
+ if (req->flags & UBI_VOL_SKIP_CRC_CHECK_FLG)
+ vol->skip_check = 1;
+
+ spin_lock(&ubi->volumes_lock);
+ if (vol_id == UBI_VOL_NUM_AUTO) {
+ /* Find unused volume ID */
+ dbg_gen("search for vacant volume ID");
+ for (i = 0; i < ubi->vtbl_slots; i++)
+ if (!ubi->volumes[i]) {
+ vol_id = i;
+ break;
+ }
+
+ if (vol_id == UBI_VOL_NUM_AUTO) {
+ ubi_err(ubi, "out of volume IDs");
+ err = -ENFILE;
+ goto out_unlock;
+ }
+ req->vol_id = vol_id;
+ }
+
+ dbg_gen("create device %d, volume %d, %llu bytes, type %d, name %s",
+ ubi->ubi_num, vol_id, (unsigned long long)req->bytes,
+ (int)req->vol_type, req->name);
+
+ /* Ensure that this volume does not exist */
+ err = -EEXIST;
+ if (ubi->volumes[vol_id]) {
+ ubi_err(ubi, "volume %d already exists", vol_id);
+ goto out_unlock;
+ }
+
+ /* Ensure that the name is unique */
+ for (i = 0; i < ubi->vtbl_slots; i++)
+ if (ubi->volumes[i] &&
+ ubi->volumes[i]->name_len == req->name_len &&
+ !strcmp(ubi->volumes[i]->name, req->name)) {
+ ubi_err(ubi, "volume \"%s\" exists (ID %d)",
+ req->name, i);
+ goto out_unlock;
+ }
+
+ /* Calculate how many eraseblocks are requested */
+ vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment;
+ vol->reserved_pebs = div_u64(req->bytes + vol->usable_leb_size - 1,
+ vol->usable_leb_size);
+
+ /* Reserve physical eraseblocks */
+ if (vol->reserved_pebs > ubi->avail_pebs) {
+ ubi_err(ubi, "not enough PEBs, only %d available",
+ ubi->avail_pebs);
+ if (ubi->corr_peb_count)
+ ubi_err(ubi, "%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
+ err = -ENOSPC;
+ goto out_unlock;
+ }
+ ubi->avail_pebs -= vol->reserved_pebs;
+ ubi->rsvd_pebs += vol->reserved_pebs;
+ spin_unlock(&ubi->volumes_lock);
+
+ vol->vol_id = vol_id;
+ vol->alignment = req->alignment;
+ vol->data_pad = ubi->leb_size % vol->alignment;
+ vol->vol_type = req->vol_type;
+ vol->name_len = req->name_len;
+ memcpy(vol->name, req->name, vol->name_len);
+ vol->ubi = ubi;
+
+ /*
+ * Finish all pending erases because there may be some LEBs belonging
+ * to the same volume ID.
+ */
+ err = ubi_wl_flush(ubi, vol_id, UBI_ALL);
+ if (err)
+ goto out_acc;
+
+ eba_tbl = ubi_eba_create_table(vol, vol->reserved_pebs);
+ if (IS_ERR(eba_tbl)) {
+ err = PTR_ERR(eba_tbl);
+ goto out_acc;
+ }
+
+ ubi_eba_replace_table(vol, eba_tbl);
+
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
+ vol->used_ebs = vol->reserved_pebs;
+ vol->last_eb_bytes = vol->usable_leb_size;
+ vol->used_bytes =
+ (long long)vol->used_ebs * vol->usable_leb_size;
+ } else {
+ vol->used_ebs = div_u64_rem(vol->used_bytes,
+ vol->usable_leb_size,
+ &vol->last_eb_bytes);
+ if (vol->last_eb_bytes != 0)
+ vol->used_ebs += 1;
+ else
+ vol->last_eb_bytes = vol->usable_leb_size;
+ }
+
+ /* Make volume "available" before it becomes accessible via sysfs */
+ spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = vol;
+ ubi->vol_count += 1;
+ spin_unlock(&ubi->volumes_lock);
+
+ /* Register character device for the volume */
+ cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
+ vol->cdev.owner = THIS_MODULE;
+
+ vol->dev.devt = MKDEV(MAJOR(ubi->cdev.dev), vol_id + 1);
+ dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
+ err = cdev_device_add(&vol->cdev, &vol->dev);
+ if (err) {
+ ubi_err(ubi, "cannot add device");
+ goto out_mapping;
+ }
+
+ /* Fill volume table record */
+ memset(&vtbl_rec, 0, sizeof(struct ubi_vtbl_record));
+ vtbl_rec.reserved_pebs = cpu_to_be32(vol->reserved_pebs);
+ vtbl_rec.alignment = cpu_to_be32(vol->alignment);
+ vtbl_rec.data_pad = cpu_to_be32(vol->data_pad);
+ vtbl_rec.name_len = cpu_to_be16(vol->name_len);
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME)
+ vtbl_rec.vol_type = UBI_VID_DYNAMIC;
+ else
+ vtbl_rec.vol_type = UBI_VID_STATIC;
+
+ if (vol->skip_check)
+ vtbl_rec.flags |= UBI_VTBL_SKIP_CRC_CHECK_FLG;
+
+ memcpy(vtbl_rec.name, vol->name, vol->name_len);
+
+ err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
+ if (err)
+ goto out_sysfs;
+
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
+ self_check_volumes(ubi);
+ return err;
+
+out_sysfs:
+ /*
+ * We have registered our device, we should not free the volume
+ * description object in this function in case of an error - it is
+ * freed by the release function.
+ */
+ cdev_device_del(&vol->cdev, &vol->dev);
+out_mapping:
+ spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = NULL;
+ ubi->vol_count -= 1;
+ spin_unlock(&ubi->volumes_lock);
+out_acc:
+ spin_lock(&ubi->volumes_lock);
+ ubi->rsvd_pebs -= vol->reserved_pebs;
+ ubi->avail_pebs += vol->reserved_pebs;
+out_unlock:
+ spin_unlock(&ubi->volumes_lock);
+ put_device(&vol->dev);
+ ubi_err(ubi, "cannot create volume %d, error %d", vol_id, err);
+ return err;
+}
+
+/**
+ * ubi_remove_volume - remove volume.
+ * @desc: volume descriptor
+ * @no_vtbl: do not change volume table if not zero
+ *
+ * This function removes volume described by @desc. The volume has to be opened
+ * in "exclusive" mode. Returns zero in case of success and a negative error
+ * code in case of failure. The caller has to have the @ubi->device_mutex
+ * locked.
+ */
+int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int i, err, vol_id = vol->vol_id, reserved_pebs = vol->reserved_pebs;
+
+ dbg_gen("remove device %d, volume %d", ubi->ubi_num, vol_id);
+ ubi_assert(desc->mode == UBI_EXCLUSIVE);
+ ubi_assert(vol == ubi->volumes[vol_id]);
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ spin_lock(&ubi->volumes_lock);
+ if (vol->ref_count > 1) {
+ /*
+ * The volume is busy, probably someone is reading one of its
+ * sysfs files.
+ */
+ err = -EBUSY;
+ goto out_unlock;
+ }
+ ubi->volumes[vol_id] = NULL;
+ spin_unlock(&ubi->volumes_lock);
+
+ if (!no_vtbl) {
+ err = ubi_change_vtbl_record(ubi, vol_id, NULL);
+ if (err)
+ goto out_err;
+ }
+
+ for (i = 0; i < vol->reserved_pebs; i++) {
+ err = ubi_eba_unmap_leb(ubi, vol, i);
+ if (err)
+ goto out_err;
+ }
+
+ cdev_device_del(&vol->cdev, &vol->dev);
+ put_device(&vol->dev);
+
+ spin_lock(&ubi->volumes_lock);
+ ubi->rsvd_pebs -= reserved_pebs;
+ ubi->avail_pebs += reserved_pebs;
+ ubi_update_reserved(ubi);
+ ubi->vol_count -= 1;
+ spin_unlock(&ubi->volumes_lock);
+
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_REMOVED);
+ if (!no_vtbl)
+ self_check_volumes(ubi);
+
+ return 0;
+
+out_err:
+ ubi_err(ubi, "cannot remove volume %d, error %d", vol_id, err);
+ spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = vol;
+out_unlock:
+ spin_unlock(&ubi->volumes_lock);
+ return err;
+}
+
+/**
+ * ubi_resize_volume - re-size volume.
+ * @desc: volume descriptor
+ * @reserved_pebs: new size in physical eraseblocks
+ *
+ * This function re-sizes the volume and returns zero in case of success, and a
+ * negative error code in case of failure. The caller has to have the
+ * @ubi->device_mutex locked.
+ */
+int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
+{
+ int i, err, pebs;
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ struct ubi_vtbl_record vtbl_rec;
+ struct ubi_eba_table *new_eba_tbl = NULL;
+ int vol_id = vol->vol_id;
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ dbg_gen("re-size device %d, volume %d to from %d to %d PEBs",
+ ubi->ubi_num, vol_id, vol->reserved_pebs, reserved_pebs);
+
+ if (vol->vol_type == UBI_STATIC_VOLUME &&
+ reserved_pebs < vol->used_ebs) {
+ ubi_err(ubi, "too small size %d, %d LEBs contain data",
+ reserved_pebs, vol->used_ebs);
+ return -EINVAL;
+ }
+
+ /* If the size is the same, we have nothing to do */
+ if (reserved_pebs == vol->reserved_pebs)
+ return 0;
+
+ new_eba_tbl = ubi_eba_create_table(vol, reserved_pebs);
+ if (IS_ERR(new_eba_tbl))
+ return PTR_ERR(new_eba_tbl);
+
+ spin_lock(&ubi->volumes_lock);
+ if (vol->ref_count > 1) {
+ spin_unlock(&ubi->volumes_lock);
+ err = -EBUSY;
+ goto out_free;
+ }
+ spin_unlock(&ubi->volumes_lock);
+
+ /* Reserve physical eraseblocks */
+ pebs = reserved_pebs - vol->reserved_pebs;
+ if (pebs > 0) {
+ spin_lock(&ubi->volumes_lock);
+ if (pebs > ubi->avail_pebs) {
+ ubi_err(ubi, "not enough PEBs: requested %d, available %d",
+ pebs, ubi->avail_pebs);
+ if (ubi->corr_peb_count)
+ ubi_err(ubi, "%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
+ spin_unlock(&ubi->volumes_lock);
+ err = -ENOSPC;
+ goto out_free;
+ }
+ ubi->avail_pebs -= pebs;
+ ubi->rsvd_pebs += pebs;
+ ubi_eba_copy_table(vol, new_eba_tbl, vol->reserved_pebs);
+ ubi_eba_replace_table(vol, new_eba_tbl);
+ spin_unlock(&ubi->volumes_lock);
+ }
+
+ if (pebs < 0) {
+ for (i = 0; i < -pebs; i++) {
+ err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
+ if (err)
+ goto out_free;
+ }
+ spin_lock(&ubi->volumes_lock);
+ ubi->rsvd_pebs += pebs;
+ ubi->avail_pebs -= pebs;
+ ubi_update_reserved(ubi);
+ ubi_eba_copy_table(vol, new_eba_tbl, reserved_pebs);
+ ubi_eba_replace_table(vol, new_eba_tbl);
+ spin_unlock(&ubi->volumes_lock);
+ }
+
+ /*
+ * When we shrink a volume we have to flush all pending (erase) work.
+ * Otherwise it can happen that upon next attach UBI finds a LEB with
+ * lnum > highest_lnum and refuses to attach.
+ */
+ if (pebs < 0) {
+ err = ubi_wl_flush(ubi, vol_id, UBI_ALL);
+ if (err)
+ goto out_acc;
+ }
+
+ /* Change volume table record */
+ vtbl_rec = ubi->vtbl[vol_id];
+ vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
+ err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
+ if (err)
+ goto out_acc;
+
+ vol->reserved_pebs = reserved_pebs;
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
+ vol->used_ebs = reserved_pebs;
+ vol->last_eb_bytes = vol->usable_leb_size;
+ vol->used_bytes =
+ (long long)vol->used_ebs * vol->usable_leb_size;
+ }
+
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_RESIZED);
+ self_check_volumes(ubi);
+ return err;
+
+out_acc:
+ if (pebs > 0) {
+ spin_lock(&ubi->volumes_lock);
+ ubi->rsvd_pebs -= pebs;
+ ubi->avail_pebs += pebs;
+ spin_unlock(&ubi->volumes_lock);
+ }
+ return err;
+
+out_free:
+ ubi_eba_destroy_table(new_eba_tbl);
+ return err;
+}
+
+/**
+ * ubi_rename_volumes - re-name UBI volumes.
+ * @ubi: UBI device description object
+ * @rename_list: list of &struct ubi_rename_entry objects
+ *
+ * This function re-names or removes volumes specified in the re-name list.
+ * Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list)
+{
+ int err;
+ struct ubi_rename_entry *re;
+
+ err = ubi_vtbl_rename_volumes(ubi, rename_list);
+ if (err)
+ return err;
+
+ list_for_each_entry(re, rename_list, list) {
+ if (re->remove) {
+ err = ubi_remove_volume(re->desc, 1);
+ if (err)
+ break;
+ } else {
+ struct ubi_volume *vol = re->desc->vol;
+
+ spin_lock(&ubi->volumes_lock);
+ vol->name_len = re->new_name_len;
+ memcpy(vol->name, re->new_name, re->new_name_len + 1);
+ spin_unlock(&ubi->volumes_lock);
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_RENAMED);
+ }
+ }
+
+ if (!err)
+ self_check_volumes(ubi);
+ return err;
+}
+
+/**
+ * ubi_add_volume - add volume.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ *
+ * This function adds an existing volume and initializes all its data
+ * structures. Returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
+{
+ int err, vol_id = vol->vol_id;
+ dev_t dev;
+
+ dbg_gen("add volume %d", vol_id);
+
+ /* Register character device for the volume */
+ cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
+ vol->cdev.owner = THIS_MODULE;
+ dev = MKDEV(MAJOR(ubi->cdev.dev), vol->vol_id + 1);
+ err = cdev_add(&vol->cdev, dev, 1);
+ if (err) {
+ ubi_err(ubi, "cannot add character device for volume %d, error %d",
+ vol_id, err);
+ vol_release(&vol->dev);
+ return err;
+ }
+
+ vol->dev.release = vol_release;
+ vol->dev.parent = &ubi->dev;
+ vol->dev.devt = dev;
+ vol->dev.class = &ubi_class;
+ vol->dev.groups = volume_dev_groups;
+ dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
+ err = device_register(&vol->dev);
+ if (err) {
+ cdev_del(&vol->cdev);
+ put_device(&vol->dev);
+ return err;
+ }
+
+ self_check_volumes(ubi);
+ return err;
+}
+
+/**
+ * ubi_free_volume - free volume.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ *
+ * This function frees all resources for volume @vol but does not remove it.
+ * Used only when the UBI device is detached.
+ */
+void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
+{
+ dbg_gen("free volume %d", vol->vol_id);
+
+ ubi->volumes[vol->vol_id] = NULL;
+ cdev_del(&vol->cdev);
+ device_unregister(&vol->dev);
+}
+
+/**
+ * self_check_volume - check volume information.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ *
+ * Returns zero if volume is all right and a negative error code if not.
+ */
+static int self_check_volume(struct ubi_device *ubi, int vol_id)
+{
+ int idx = vol_id2idx(ubi, vol_id);
+ int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker;
+ const struct ubi_volume *vol;
+ long long n;
+ const char *name;
+
+ spin_lock(&ubi->volumes_lock);
+ reserved_pebs = be32_to_cpu(ubi->vtbl[vol_id].reserved_pebs);
+ vol = ubi->volumes[idx];
+
+ if (!vol) {
+ if (reserved_pebs) {
+ ubi_err(ubi, "no volume info, but volume exists");
+ goto fail;
+ }
+ spin_unlock(&ubi->volumes_lock);
+ return 0;
+ }
+
+ if (vol->reserved_pebs < 0 || vol->alignment < 0 || vol->data_pad < 0 ||
+ vol->name_len < 0) {
+ ubi_err(ubi, "negative values");
+ goto fail;
+ }
+ if (vol->alignment > ubi->leb_size || vol->alignment == 0) {
+ ubi_err(ubi, "bad alignment");
+ goto fail;
+ }
+
+ n = vol->alignment & (ubi->min_io_size - 1);
+ if (vol->alignment != 1 && n) {
+ ubi_err(ubi, "alignment is not multiple of min I/O unit");
+ goto fail;
+ }
+
+ n = ubi->leb_size % vol->alignment;
+ if (vol->data_pad != n) {
+ ubi_err(ubi, "bad data_pad, has to be %lld", n);
+ goto fail;
+ }
+
+ if (vol->vol_type != UBI_DYNAMIC_VOLUME &&
+ vol->vol_type != UBI_STATIC_VOLUME) {
+ ubi_err(ubi, "bad vol_type");
+ goto fail;
+ }
+
+ if (vol->upd_marker && vol->corrupted) {
+ ubi_err(ubi, "update marker and corrupted simultaneously");
+ goto fail;
+ }
+
+ if (vol->reserved_pebs > ubi->good_peb_count) {
+ ubi_err(ubi, "too large reserved_pebs");
+ goto fail;
+ }
+
+ n = ubi->leb_size - vol->data_pad;
+ if (vol->usable_leb_size != ubi->leb_size - vol->data_pad) {
+ ubi_err(ubi, "bad usable_leb_size, has to be %lld", n);
+ goto fail;
+ }
+
+ if (vol->name_len > UBI_VOL_NAME_MAX) {
+ ubi_err(ubi, "too long volume name, max is %d",
+ UBI_VOL_NAME_MAX);
+ goto fail;
+ }
+
+ n = strnlen(vol->name, vol->name_len + 1);
+ if (n != vol->name_len) {
+ ubi_err(ubi, "bad name_len %lld", n);
+ goto fail;
+ }
+
+ n = (long long)vol->used_ebs * vol->usable_leb_size;
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
+ if (vol->corrupted) {
+ ubi_err(ubi, "corrupted dynamic volume");
+ goto fail;
+ }
+ if (vol->used_ebs != vol->reserved_pebs) {
+ ubi_err(ubi, "bad used_ebs");
+ goto fail;
+ }
+ if (vol->last_eb_bytes != vol->usable_leb_size) {
+ ubi_err(ubi, "bad last_eb_bytes");
+ goto fail;
+ }
+ if (vol->used_bytes != n) {
+ ubi_err(ubi, "bad used_bytes");
+ goto fail;
+ }
+
+ if (vol->skip_check) {
+ ubi_err(ubi, "bad skip_check");
+ goto fail;
+ }
+ } else {
+ if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) {
+ ubi_err(ubi, "bad used_ebs");
+ goto fail;
+ }
+ if (vol->last_eb_bytes < 0 ||
+ vol->last_eb_bytes > vol->usable_leb_size) {
+ ubi_err(ubi, "bad last_eb_bytes");
+ goto fail;
+ }
+ if (vol->used_bytes < 0 || vol->used_bytes > n ||
+ vol->used_bytes < n - vol->usable_leb_size) {
+ ubi_err(ubi, "bad used_bytes");
+ goto fail;
+ }
+ }
+
+ alignment = be32_to_cpu(ubi->vtbl[vol_id].alignment);
+ data_pad = be32_to_cpu(ubi->vtbl[vol_id].data_pad);
+ name_len = be16_to_cpu(ubi->vtbl[vol_id].name_len);
+ upd_marker = ubi->vtbl[vol_id].upd_marker;
+ name = &ubi->vtbl[vol_id].name[0];
+ if (ubi->vtbl[vol_id].vol_type == UBI_VID_DYNAMIC)
+ vol_type = UBI_DYNAMIC_VOLUME;
+ else
+ vol_type = UBI_STATIC_VOLUME;
+
+ if (alignment != vol->alignment || data_pad != vol->data_pad ||
+ upd_marker != vol->upd_marker || vol_type != vol->vol_type ||
+ name_len != vol->name_len || strncmp(name, vol->name, name_len)) {
+ ubi_err(ubi, "volume info is different");
+ goto fail;
+ }
+
+ spin_unlock(&ubi->volumes_lock);
+ return 0;
+
+fail:
+ ubi_err(ubi, "self-check failed for volume %d", vol_id);
+ if (vol)
+ ubi_dump_vol_info(vol);
+ ubi_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
+ dump_stack();
+ spin_unlock(&ubi->volumes_lock);
+ return -EINVAL;
+}
+
+/**
+ * self_check_volumes - check information about all volumes.
+ * @ubi: UBI device description object
+ *
+ * Returns zero if volumes are all right and a negative error code if not.
+ */
+static int self_check_volumes(struct ubi_device *ubi)
+{
+ int i, err = 0;
+
+ if (!ubi_dbg_chk_gen(ubi))
+ return 0;
+
+ for (i = 0; i < ubi->vtbl_slots; i++) {
+ err = self_check_volume(ubi, i);
+ if (err)
+ break;
+ }
+
+ return err;
+}
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
new file mode 100644
index 0000000000..f700f0e4f2
--- /dev/null
+++ b/drivers/mtd/ubi/vtbl.c
@@ -0,0 +1,871 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ * Copyright (c) Nokia Corporation, 2006, 2007
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * This file includes volume table manipulation code. The volume table is an
+ * on-flash table containing volume meta-data like name, number of reserved
+ * physical eraseblocks, type, etc. The volume table is stored in the so-called
+ * "layout volume".
+ *
+ * The layout volume is an internal volume which is organized as follows. It
+ * consists of two logical eraseblocks - LEB 0 and LEB 1. Each logical
+ * eraseblock stores one volume table copy, i.e. LEB 0 and LEB 1 duplicate each
+ * other. This redundancy guarantees robustness to unclean reboots. The volume
+ * table is basically an array of volume table records. Each record contains
+ * full information about the volume and protected by a CRC checksum. Note,
+ * nowadays we use the atomic LEB change operation when updating the volume
+ * table, so we do not really need 2 LEBs anymore, but we preserve the older
+ * design for the backward compatibility reasons.
+ *
+ * When the volume table is changed, it is first changed in RAM. Then LEB 0 is
+ * erased, and the updated volume table is written back to LEB 0. Then same for
+ * LEB 1. This scheme guarantees recoverability from unclean reboots.
+ *
+ * In this UBI implementation the on-flash volume table does not contain any
+ * information about how much data static volumes contain.
+ *
+ * But it would still be beneficial to store this information in the volume
+ * table. For example, suppose we have a static volume X, and all its physical
+ * eraseblocks became bad for some reasons. Suppose we are attaching the
+ * corresponding MTD device, for some reason we find no logical eraseblocks
+ * corresponding to the volume X. According to the volume table volume X does
+ * exist. So we don't know whether it is just empty or all its physical
+ * eraseblocks went bad. So we cannot alarm the user properly.
+ *
+ * The volume table also stores so-called "update marker", which is used for
+ * volume updates. Before updating the volume, the update marker is set, and
+ * after the update operation is finished, the update marker is cleared. So if
+ * the update operation was interrupted (e.g. by an unclean reboot) - the
+ * update marker is still there and we know that the volume's contents is
+ * damaged.
+ */
+
+#include <linux/crc32.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <asm/div64.h>
+#include "ubi.h"
+
+static void self_vtbl_check(const struct ubi_device *ubi);
+
+/* Empty volume table record */
+static struct ubi_vtbl_record empty_vtbl_record;
+
+/**
+ * ubi_update_layout_vol - helper for updatting layout volumes on flash
+ * @ubi: UBI device description object
+ */
+static int ubi_update_layout_vol(struct ubi_device *ubi)
+{
+ struct ubi_volume *layout_vol;
+ int i, err;
+
+ layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)];
+ for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
+ err = ubi_eba_atomic_leb_change(ubi, layout_vol, i, ubi->vtbl,
+ ubi->vtbl_size);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ubi_change_vtbl_record - change volume table record.
+ * @ubi: UBI device description object
+ * @idx: table index to change
+ * @vtbl_rec: new volume table record
+ *
+ * This function changes volume table record @idx. If @vtbl_rec is %NULL, empty
+ * volume table record is written. The caller does not have to calculate CRC of
+ * the record as it is done by this function. Returns zero in case of success
+ * and a negative error code in case of failure.
+ */
+int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
+ struct ubi_vtbl_record *vtbl_rec)
+{
+ int err;
+ uint32_t crc;
+
+ ubi_assert(idx >= 0 && idx < ubi->vtbl_slots);
+
+ if (!vtbl_rec)
+ vtbl_rec = &empty_vtbl_record;
+ else {
+ crc = crc32(UBI_CRC32_INIT, vtbl_rec, UBI_VTBL_RECORD_SIZE_CRC);
+ vtbl_rec->crc = cpu_to_be32(crc);
+ }
+
+ memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record));
+ err = ubi_update_layout_vol(ubi);
+
+ self_vtbl_check(ubi);
+ return err ? err : 0;
+}
+
+/**
+ * ubi_vtbl_rename_volumes - rename UBI volumes in the volume table.
+ * @ubi: UBI device description object
+ * @rename_list: list of &struct ubi_rename_entry objects
+ *
+ * This function re-names multiple volumes specified in @req in the volume
+ * table. Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
+ struct list_head *rename_list)
+{
+ struct ubi_rename_entry *re;
+
+ list_for_each_entry(re, rename_list, list) {
+ uint32_t crc;
+ struct ubi_volume *vol = re->desc->vol;
+ struct ubi_vtbl_record *vtbl_rec = &ubi->vtbl[vol->vol_id];
+
+ if (re->remove) {
+ memcpy(vtbl_rec, &empty_vtbl_record,
+ sizeof(struct ubi_vtbl_record));
+ continue;
+ }
+
+ vtbl_rec->name_len = cpu_to_be16(re->new_name_len);
+ memcpy(vtbl_rec->name, re->new_name, re->new_name_len);
+ memset(vtbl_rec->name + re->new_name_len, 0,
+ UBI_VOL_NAME_MAX + 1 - re->new_name_len);
+ crc = crc32(UBI_CRC32_INIT, vtbl_rec,
+ UBI_VTBL_RECORD_SIZE_CRC);
+ vtbl_rec->crc = cpu_to_be32(crc);
+ }
+
+ return ubi_update_layout_vol(ubi);
+}
+
+/**
+ * vtbl_check - check if volume table is not corrupted and sensible.
+ * @ubi: UBI device description object
+ * @vtbl: volume table
+ *
+ * This function returns zero if @vtbl is all right, %1 if CRC is incorrect,
+ * and %-EINVAL if it contains inconsistent data.
+ */
+static int vtbl_check(const struct ubi_device *ubi,
+ const struct ubi_vtbl_record *vtbl)
+{
+ int i, n, reserved_pebs, alignment, data_pad, vol_type, name_len;
+ int upd_marker, err;
+ uint32_t crc;
+ const char *name;
+
+ for (i = 0; i < ubi->vtbl_slots; i++) {
+ cond_resched();
+
+ reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs);
+ alignment = be32_to_cpu(vtbl[i].alignment);
+ data_pad = be32_to_cpu(vtbl[i].data_pad);
+ upd_marker = vtbl[i].upd_marker;
+ vol_type = vtbl[i].vol_type;
+ name_len = be16_to_cpu(vtbl[i].name_len);
+ name = &vtbl[i].name[0];
+
+ crc = crc32(UBI_CRC32_INIT, &vtbl[i], UBI_VTBL_RECORD_SIZE_CRC);
+ if (be32_to_cpu(vtbl[i].crc) != crc) {
+ ubi_err(ubi, "bad CRC at record %u: %#08x, not %#08x",
+ i, crc, be32_to_cpu(vtbl[i].crc));
+ ubi_dump_vtbl_record(&vtbl[i], i);
+ return 1;
+ }
+
+ if (reserved_pebs == 0) {
+ if (memcmp(&vtbl[i], &empty_vtbl_record,
+ UBI_VTBL_RECORD_SIZE)) {
+ err = 2;
+ goto bad;
+ }
+ continue;
+ }
+
+ if (reserved_pebs < 0 || alignment < 0 || data_pad < 0 ||
+ name_len < 0) {
+ err = 3;
+ goto bad;
+ }
+
+ if (alignment > ubi->leb_size || alignment == 0) {
+ err = 4;
+ goto bad;
+ }
+
+ n = alignment & (ubi->min_io_size - 1);
+ if (alignment != 1 && n) {
+ err = 5;
+ goto bad;
+ }
+
+ n = ubi->leb_size % alignment;
+ if (data_pad != n) {
+ ubi_err(ubi, "bad data_pad, has to be %d", n);
+ err = 6;
+ goto bad;
+ }
+
+ if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
+ err = 7;
+ goto bad;
+ }
+
+ if (upd_marker != 0 && upd_marker != 1) {
+ err = 8;
+ goto bad;
+ }
+
+ if (reserved_pebs > ubi->good_peb_count) {
+ ubi_err(ubi, "too large reserved_pebs %d, good PEBs %d",
+ reserved_pebs, ubi->good_peb_count);
+ err = 9;
+ goto bad;
+ }
+
+ if (name_len > UBI_VOL_NAME_MAX) {
+ err = 10;
+ goto bad;
+ }
+
+ if (name[0] == '\0') {
+ err = 11;
+ goto bad;
+ }
+
+ if (name_len != strnlen(name, name_len + 1)) {
+ err = 12;
+ goto bad;
+ }
+ }
+
+ /* Checks that all names are unique */
+ for (i = 0; i < ubi->vtbl_slots - 1; i++) {
+ for (n = i + 1; n < ubi->vtbl_slots; n++) {
+ int len1 = be16_to_cpu(vtbl[i].name_len);
+ int len2 = be16_to_cpu(vtbl[n].name_len);
+
+ if (len1 > 0 && len1 == len2 &&
+ !strncmp(vtbl[i].name, vtbl[n].name, len1)) {
+ ubi_err(ubi, "volumes %d and %d have the same name \"%s\"",
+ i, n, vtbl[i].name);
+ ubi_dump_vtbl_record(&vtbl[i], i);
+ ubi_dump_vtbl_record(&vtbl[n], n);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+
+bad:
+ ubi_err(ubi, "volume table check failed: record %d, error %d", i, err);
+ ubi_dump_vtbl_record(&vtbl[i], i);
+ return -EINVAL;
+}
+
+/**
+ * create_vtbl - create a copy of volume table.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ * @copy: number of the volume table copy
+ * @vtbl: contents of the volume table
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int copy, void *vtbl)
+{
+ int err, tries = 0;
+ struct ubi_vid_io_buf *vidb;
+ struct ubi_vid_hdr *vid_hdr;
+ struct ubi_ainf_peb *new_aeb;
+
+ dbg_gen("create volume table (copy #%d)", copy + 1);
+
+ vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
+ if (!vidb)
+ return -ENOMEM;
+
+ vid_hdr = ubi_get_vid_hdr(vidb);
+
+retry:
+ new_aeb = ubi_early_get_peb(ubi, ai);
+ if (IS_ERR(new_aeb)) {
+ err = PTR_ERR(new_aeb);
+ goto out_free;
+ }
+
+ vid_hdr->vol_type = UBI_LAYOUT_VOLUME_TYPE;
+ vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOLUME_ID);
+ vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT;
+ vid_hdr->data_size = vid_hdr->used_ebs =
+ vid_hdr->data_pad = cpu_to_be32(0);
+ vid_hdr->lnum = cpu_to_be32(copy);
+ vid_hdr->sqnum = cpu_to_be64(++ai->max_sqnum);
+
+ /* The EC header is already there, write the VID header */
+ err = ubi_io_write_vid_hdr(ubi, new_aeb->pnum, vidb);
+ if (err)
+ goto write_error;
+
+ /* Write the layout volume contents */
+ err = ubi_io_write_data(ubi, vtbl, new_aeb->pnum, 0, ubi->vtbl_size);
+ if (err)
+ goto write_error;
+
+ /*
+ * And add it to the attaching information. Don't delete the old version
+ * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'.
+ */
+ err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0);
+ ubi_free_aeb(ai, new_aeb);
+ ubi_free_vid_buf(vidb);
+ return err;
+
+write_error:
+ if (err == -EIO && ++tries <= 5) {
+ /*
+ * Probably this physical eraseblock went bad, try to pick
+ * another one.
+ */
+ list_add(&new_aeb->u.list, &ai->erase);
+ goto retry;
+ }
+ ubi_free_aeb(ai, new_aeb);
+out_free:
+ ubi_free_vid_buf(vidb);
+ return err;
+
+}
+
+/**
+ * process_lvol - process the layout volume.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ * @av: layout volume attaching information
+ *
+ * This function is responsible for reading the layout volume, ensuring it is
+ * not corrupted, and recovering from corruptions if needed. Returns volume
+ * table in case of success and a negative error code in case of failure.
+ */
+static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
+ struct ubi_attach_info *ai,
+ struct ubi_ainf_volume *av)
+{
+ int err;
+ struct rb_node *rb;
+ struct ubi_ainf_peb *aeb;
+ struct ubi_vtbl_record *leb[UBI_LAYOUT_VOLUME_EBS] = { NULL, NULL };
+ int leb_corrupted[UBI_LAYOUT_VOLUME_EBS] = {1, 1};
+
+ /*
+ * UBI goes through the following steps when it changes the layout
+ * volume:
+ * a. erase LEB 0;
+ * b. write new data to LEB 0;
+ * c. erase LEB 1;
+ * d. write new data to LEB 1.
+ *
+ * Before the change, both LEBs contain the same data.
+ *
+ * Due to unclean reboots, the contents of LEB 0 may be lost, but there
+ * should LEB 1. So it is OK if LEB 0 is corrupted while LEB 1 is not.
+ * Similarly, LEB 1 may be lost, but there should be LEB 0. And
+ * finally, unclean reboots may result in a situation when neither LEB
+ * 0 nor LEB 1 are corrupted, but they are different. In this case, LEB
+ * 0 contains more recent information.
+ *
+ * So the plan is to first check LEB 0. Then
+ * a. if LEB 0 is OK, it must be containing the most recent data; then
+ * we compare it with LEB 1, and if they are different, we copy LEB
+ * 0 to LEB 1;
+ * b. if LEB 0 is corrupted, but LEB 1 has to be OK, and we copy LEB 1
+ * to LEB 0.
+ */
+
+ dbg_gen("check layout volume");
+
+ /* Read both LEB 0 and LEB 1 into memory */
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
+ leb[aeb->lnum] = vzalloc(ubi->vtbl_size);
+ if (!leb[aeb->lnum]) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ err = ubi_io_read_data(ubi, leb[aeb->lnum], aeb->pnum, 0,
+ ubi->vtbl_size);
+ if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err))
+ /*
+ * Scrub the PEB later. Note, -EBADMSG indicates an
+ * uncorrectable ECC error, but we have our own CRC and
+ * the data will be checked later. If the data is OK,
+ * the PEB will be scrubbed (because we set
+ * aeb->scrub). If the data is not OK, the contents of
+ * the PEB will be recovered from the second copy, and
+ * aeb->scrub will be cleared in
+ * 'ubi_add_to_av()'.
+ */
+ aeb->scrub = 1;
+ else if (err)
+ goto out_free;
+ }
+
+ err = -EINVAL;
+ if (leb[0]) {
+ leb_corrupted[0] = vtbl_check(ubi, leb[0]);
+ if (leb_corrupted[0] < 0)
+ goto out_free;
+ }
+
+ if (!leb_corrupted[0]) {
+ /* LEB 0 is OK */
+ if (leb[1])
+ leb_corrupted[1] = memcmp(leb[0], leb[1],
+ ubi->vtbl_size);
+ if (leb_corrupted[1]) {
+ ubi_warn(ubi, "volume table copy #2 is corrupted");
+ err = create_vtbl(ubi, ai, 1, leb[0]);
+ if (err)
+ goto out_free;
+ ubi_msg(ubi, "volume table was restored");
+ }
+
+ /* Both LEB 1 and LEB 2 are OK and consistent */
+ vfree(leb[1]);
+ return leb[0];
+ } else {
+ /* LEB 0 is corrupted or does not exist */
+ if (leb[1]) {
+ leb_corrupted[1] = vtbl_check(ubi, leb[1]);
+ if (leb_corrupted[1] < 0)
+ goto out_free;
+ }
+ if (leb_corrupted[1]) {
+ /* Both LEB 0 and LEB 1 are corrupted */
+ ubi_err(ubi, "both volume tables are corrupted");
+ goto out_free;
+ }
+
+ ubi_warn(ubi, "volume table copy #1 is corrupted");
+ err = create_vtbl(ubi, ai, 0, leb[1]);
+ if (err)
+ goto out_free;
+ ubi_msg(ubi, "volume table was restored");
+
+ vfree(leb[0]);
+ return leb[1];
+ }
+
+out_free:
+ vfree(leb[0]);
+ vfree(leb[1]);
+ return ERR_PTR(err);
+}
+
+/**
+ * create_empty_lvol - create empty layout volume.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This function returns volume table contents in case of success and a
+ * negative error code in case of failure.
+ */
+static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
+ struct ubi_attach_info *ai)
+{
+ int i;
+ struct ubi_vtbl_record *vtbl;
+
+ vtbl = vzalloc(ubi->vtbl_size);
+ if (!vtbl)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < ubi->vtbl_slots; i++)
+ memcpy(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE);
+
+ for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
+ int err;
+
+ err = create_vtbl(ubi, ai, i, vtbl);
+ if (err) {
+ vfree(vtbl);
+ return ERR_PTR(err);
+ }
+ }
+
+ return vtbl;
+}
+
+/**
+ * init_volumes - initialize volume information for existing volumes.
+ * @ubi: UBI device description object
+ * @ai: scanning information
+ * @vtbl: volume table
+ *
+ * This function allocates volume description objects for existing volumes.
+ * Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+static int init_volumes(struct ubi_device *ubi,
+ const struct ubi_attach_info *ai,
+ const struct ubi_vtbl_record *vtbl)
+{
+ int i, err, reserved_pebs = 0;
+ struct ubi_ainf_volume *av;
+ struct ubi_volume *vol;
+
+ for (i = 0; i < ubi->vtbl_slots; i++) {
+ cond_resched();
+
+ if (be32_to_cpu(vtbl[i].reserved_pebs) == 0)
+ continue; /* Empty record */
+
+ vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
+ if (!vol)
+ return -ENOMEM;
+
+ vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs);
+ vol->alignment = be32_to_cpu(vtbl[i].alignment);
+ vol->data_pad = be32_to_cpu(vtbl[i].data_pad);
+ vol->upd_marker = vtbl[i].upd_marker;
+ vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ?
+ UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
+ vol->name_len = be16_to_cpu(vtbl[i].name_len);
+ vol->usable_leb_size = ubi->leb_size - vol->data_pad;
+ memcpy(vol->name, vtbl[i].name, vol->name_len);
+ vol->name[vol->name_len] = '\0';
+ vol->vol_id = i;
+
+ if (vtbl[i].flags & UBI_VTBL_SKIP_CRC_CHECK_FLG)
+ vol->skip_check = 1;
+
+ if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) {
+ /* Auto re-size flag may be set only for one volume */
+ if (ubi->autoresize_vol_id != -1) {
+ ubi_err(ubi, "more than one auto-resize volume (%d and %d)",
+ ubi->autoresize_vol_id, i);
+ kfree(vol);
+ return -EINVAL;
+ }
+
+ ubi->autoresize_vol_id = i;
+ }
+
+ ubi_assert(!ubi->volumes[i]);
+ ubi->volumes[i] = vol;
+ ubi->vol_count += 1;
+ vol->ubi = ubi;
+ reserved_pebs += vol->reserved_pebs;
+
+ /*
+ * We use ubi->peb_count and not vol->reserved_pebs because
+ * we want to keep the code simple. Otherwise we'd have to
+ * resize/check the bitmap upon volume resize too.
+ * Allocating a few bytes more does not hurt.
+ */
+ err = ubi_fastmap_init_checkmap(vol, ubi->peb_count);
+ if (err)
+ return err;
+
+ /*
+ * In case of dynamic volume UBI knows nothing about how many
+ * data is stored there. So assume the whole volume is used.
+ */
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
+ vol->used_ebs = vol->reserved_pebs;
+ vol->last_eb_bytes = vol->usable_leb_size;
+ vol->used_bytes =
+ (long long)vol->used_ebs * vol->usable_leb_size;
+ continue;
+ }
+
+ /* Static volumes only */
+ av = ubi_find_av(ai, i);
+ if (!av || !av->leb_count) {
+ /*
+ * No eraseblocks belonging to this volume found. We
+ * don't actually know whether this static volume is
+ * completely corrupted or just contains no data. And
+ * we cannot know this as long as data size is not
+ * stored on flash. So we just assume the volume is
+ * empty. FIXME: this should be handled.
+ */
+ continue;
+ }
+
+ if (av->leb_count != av->used_ebs) {
+ /*
+ * We found a static volume which misses several
+ * eraseblocks. Treat it as corrupted.
+ */
+ ubi_warn(ubi, "static volume %d misses %d LEBs - corrupted",
+ av->vol_id, av->used_ebs - av->leb_count);
+ vol->corrupted = 1;
+ continue;
+ }
+
+ vol->used_ebs = av->used_ebs;
+ vol->used_bytes =
+ (long long)(vol->used_ebs - 1) * vol->usable_leb_size;
+ vol->used_bytes += av->last_data_size;
+ vol->last_eb_bytes = av->last_data_size;
+ }
+
+ /* And add the layout volume */
+ vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
+ if (!vol)
+ return -ENOMEM;
+
+ vol->reserved_pebs = UBI_LAYOUT_VOLUME_EBS;
+ vol->alignment = UBI_LAYOUT_VOLUME_ALIGN;
+ vol->vol_type = UBI_DYNAMIC_VOLUME;
+ vol->name_len = sizeof(UBI_LAYOUT_VOLUME_NAME) - 1;
+ memcpy(vol->name, UBI_LAYOUT_VOLUME_NAME, vol->name_len + 1);
+ vol->usable_leb_size = ubi->leb_size;
+ vol->used_ebs = vol->reserved_pebs;
+ vol->last_eb_bytes = vol->reserved_pebs;
+ vol->used_bytes =
+ (long long)vol->used_ebs * (ubi->leb_size - vol->data_pad);
+ vol->vol_id = UBI_LAYOUT_VOLUME_ID;
+ vol->ref_count = 1;
+
+ ubi_assert(!ubi->volumes[i]);
+ ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol;
+ reserved_pebs += vol->reserved_pebs;
+ ubi->vol_count += 1;
+ vol->ubi = ubi;
+ err = ubi_fastmap_init_checkmap(vol, UBI_LAYOUT_VOLUME_EBS);
+ if (err)
+ return err;
+
+ if (reserved_pebs > ubi->avail_pebs) {
+ ubi_err(ubi, "not enough PEBs, required %d, available %d",
+ reserved_pebs, ubi->avail_pebs);
+ if (ubi->corr_peb_count)
+ ubi_err(ubi, "%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
+ return -ENOSPC;
+ }
+ ubi->rsvd_pebs += reserved_pebs;
+ ubi->avail_pebs -= reserved_pebs;
+
+ return 0;
+}
+
+/**
+ * check_av - check volume attaching information.
+ * @vol: UBI volume description object
+ * @av: volume attaching information
+ *
+ * This function returns zero if the volume attaching information is consistent
+ * to the data read from the volume tabla, and %-EINVAL if not.
+ */
+static int check_av(const struct ubi_volume *vol,
+ const struct ubi_ainf_volume *av)
+{
+ int err;
+
+ if (av->highest_lnum >= vol->reserved_pebs) {
+ err = 1;
+ goto bad;
+ }
+ if (av->leb_count > vol->reserved_pebs) {
+ err = 2;
+ goto bad;
+ }
+ if (av->vol_type != vol->vol_type) {
+ err = 3;
+ goto bad;
+ }
+ if (av->used_ebs > vol->reserved_pebs) {
+ err = 4;
+ goto bad;
+ }
+ if (av->data_pad != vol->data_pad) {
+ err = 5;
+ goto bad;
+ }
+ return 0;
+
+bad:
+ ubi_err(vol->ubi, "bad attaching information, error %d", err);
+ ubi_dump_av(av);
+ ubi_dump_vol_info(vol);
+ return -EINVAL;
+}
+
+/**
+ * check_attaching_info - check that attaching information.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * Even though we protect on-flash data by CRC checksums, we still don't trust
+ * the media. This function ensures that attaching information is consistent to
+ * the information read from the volume table. Returns zero if the attaching
+ * information is OK and %-EINVAL if it is not.
+ */
+static int check_attaching_info(const struct ubi_device *ubi,
+ struct ubi_attach_info *ai)
+{
+ int err, i;
+ struct ubi_ainf_volume *av;
+ struct ubi_volume *vol;
+
+ if (ai->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) {
+ ubi_err(ubi, "found %d volumes while attaching, maximum is %d + %d",
+ ai->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots);
+ return -EINVAL;
+ }
+
+ if (ai->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT &&
+ ai->highest_vol_id < UBI_INTERNAL_VOL_START) {
+ ubi_err(ubi, "too large volume ID %d found",
+ ai->highest_vol_id);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
+ cond_resched();
+
+ av = ubi_find_av(ai, i);
+ vol = ubi->volumes[i];
+ if (!vol) {
+ if (av)
+ ubi_remove_av(ai, av);
+ continue;
+ }
+
+ if (vol->reserved_pebs == 0) {
+ ubi_assert(i < ubi->vtbl_slots);
+
+ if (!av)
+ continue;
+
+ /*
+ * During attaching we found a volume which does not
+ * exist according to the information in the volume
+ * table. This must have happened due to an unclean
+ * reboot while the volume was being removed. Discard
+ * these eraseblocks.
+ */
+ ubi_msg(ubi, "finish volume %d removal", av->vol_id);
+ ubi_remove_av(ai, av);
+ } else if (av) {
+ err = check_av(vol, av);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ubi_read_volume_table - read the volume table.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This function reads volume table, checks it, recover from errors if needed,
+ * or creates it if needed. Returns zero in case of success and a negative
+ * error code in case of failure.
+ */
+int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
+{
+ int err;
+ struct ubi_ainf_volume *av;
+
+ empty_vtbl_record.crc = cpu_to_be32(0xf116c36b);
+
+ /*
+ * The number of supported volumes is limited by the eraseblock size
+ * and by the UBI_MAX_VOLUMES constant.
+ */
+ ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE;
+ if (ubi->vtbl_slots > UBI_MAX_VOLUMES)
+ ubi->vtbl_slots = UBI_MAX_VOLUMES;
+
+ ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE;
+ ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size);
+
+ av = ubi_find_av(ai, UBI_LAYOUT_VOLUME_ID);
+ if (!av) {
+ /*
+ * No logical eraseblocks belonging to the layout volume were
+ * found. This could mean that the flash is just empty. In
+ * this case we create empty layout volume.
+ *
+ * But if flash is not empty this must be a corruption or the
+ * MTD device just contains garbage.
+ */
+ if (ai->is_empty) {
+ ubi->vtbl = create_empty_lvol(ubi, ai);
+ if (IS_ERR(ubi->vtbl))
+ return PTR_ERR(ubi->vtbl);
+ } else {
+ ubi_err(ubi, "the layout volume was not found");
+ return -EINVAL;
+ }
+ } else {
+ if (av->leb_count > UBI_LAYOUT_VOLUME_EBS) {
+ /* This must not happen with proper UBI images */
+ ubi_err(ubi, "too many LEBs (%d) in layout volume",
+ av->leb_count);
+ return -EINVAL;
+ }
+
+ ubi->vtbl = process_lvol(ubi, ai, av);
+ if (IS_ERR(ubi->vtbl))
+ return PTR_ERR(ubi->vtbl);
+ }
+
+ ubi->avail_pebs = ubi->good_peb_count - ubi->corr_peb_count;
+
+ /*
+ * The layout volume is OK, initialize the corresponding in-RAM data
+ * structures.
+ */
+ err = init_volumes(ubi, ai, ubi->vtbl);
+ if (err)
+ goto out_free;
+
+ /*
+ * Make sure that the attaching information is consistent to the
+ * information stored in the volume table.
+ */
+ err = check_attaching_info(ubi, ai);
+ if (err)
+ goto out_free;
+
+ return 0;
+
+out_free:
+ vfree(ubi->vtbl);
+ ubi_free_all_volumes(ubi);
+ return err;
+}
+
+/**
+ * self_vtbl_check - check volume table.
+ * @ubi: UBI device description object
+ */
+static void self_vtbl_check(const struct ubi_device *ubi)
+{
+ if (!ubi_dbg_chk_gen(ubi))
+ return;
+
+ if (vtbl_check(ubi, ubi->vtbl)) {
+ ubi_err(ubi, "self-check failed");
+ BUG();
+ }
+}
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
new file mode 100644
index 0000000000..26a214f016
--- /dev/null
+++ b/drivers/mtd/ubi/wl.c
@@ -0,0 +1,2159 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
+ */
+
+/*
+ * UBI wear-leveling sub-system.
+ *
+ * This sub-system is responsible for wear-leveling. It works in terms of
+ * physical eraseblocks and erase counters and knows nothing about logical
+ * eraseblocks, volumes, etc. From this sub-system's perspective all physical
+ * eraseblocks are of two types - used and free. Used physical eraseblocks are
+ * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
+ * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
+ *
+ * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
+ * header. The rest of the physical eraseblock contains only %0xFF bytes.
+ *
+ * When physical eraseblocks are returned to the WL sub-system by means of the
+ * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
+ * done asynchronously in context of the per-UBI device background thread,
+ * which is also managed by the WL sub-system.
+ *
+ * The wear-leveling is ensured by means of moving the contents of used
+ * physical eraseblocks with low erase counter to free physical eraseblocks
+ * with high erase counter.
+ *
+ * If the WL sub-system fails to erase a physical eraseblock, it marks it as
+ * bad.
+ *
+ * This sub-system is also responsible for scrubbing. If a bit-flip is detected
+ * in a physical eraseblock, it has to be moved. Technically this is the same
+ * as moving it for wear-leveling reasons.
+ *
+ * As it was said, for the UBI sub-system all physical eraseblocks are either
+ * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
+ * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
+ * RB-trees, as well as (temporarily) in the @wl->pq queue.
+ *
+ * When the WL sub-system returns a physical eraseblock, the physical
+ * eraseblock is protected from being moved for some "time". For this reason,
+ * the physical eraseblock is not directly moved from the @wl->free tree to the
+ * @wl->used tree. There is a protection queue in between where this
+ * physical eraseblock is temporarily stored (@wl->pq).
+ *
+ * All this protection stuff is needed because:
+ * o we don't want to move physical eraseblocks just after we have given them
+ * to the user; instead, we first want to let users fill them up with data;
+ *
+ * o there is a chance that the user will put the physical eraseblock very
+ * soon, so it makes sense not to move it for some time, but wait.
+ *
+ * Physical eraseblocks stay protected only for limited time. But the "time" is
+ * measured in erase cycles in this case. This is implemented with help of the
+ * protection queue. Eraseblocks are put to the tail of this queue when they
+ * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
+ * head of the queue on each erase operation (for any eraseblock). So the
+ * length of the queue defines how may (global) erase cycles PEBs are protected.
+ *
+ * To put it differently, each physical eraseblock has 2 main states: free and
+ * used. The former state corresponds to the @wl->free tree. The latter state
+ * is split up on several sub-states:
+ * o the WL movement is allowed (@wl->used tree);
+ * o the WL movement is disallowed (@wl->erroneous) because the PEB is
+ * erroneous - e.g., there was a read error;
+ * o the WL movement is temporarily prohibited (@wl->pq queue);
+ * o scrubbing is needed (@wl->scrub tree).
+ *
+ * Depending on the sub-state, wear-leveling entries of the used physical
+ * eraseblocks may be kept in one of those structures.
+ *
+ * Note, in this implementation, we keep a small in-RAM object for each physical
+ * eraseblock. This is surely not a scalable solution. But it appears to be good
+ * enough for moderately large flashes and it is simple. In future, one may
+ * re-work this sub-system and make it more scalable.
+ *
+ * At the moment this sub-system does not utilize the sequence number, which
+ * was introduced relatively recently. But it would be wise to do this because
+ * the sequence number of a logical eraseblock characterizes how old is it. For
+ * example, when we move a PEB with low erase counter, and we need to pick the
+ * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
+ * pick target PEB with an average EC if our PEB is not very "old". This is a
+ * room for future re-works of the WL sub-system.
+ */
+
+#include <linux/slab.h>
+#include <linux/crc32.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include "ubi.h"
+#include "wl.h"
+
+/* Number of physical eraseblocks reserved for wear-leveling purposes */
+#define WL_RESERVED_PEBS 1
+
+/*
+ * Maximum difference between two erase counters. If this threshold is
+ * exceeded, the WL sub-system starts moving data from used physical
+ * eraseblocks with low erase counter to free physical eraseblocks with high
+ * erase counter.
+ */
+#define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
+
+/*
+ * When a physical eraseblock is moved, the WL sub-system has to pick the target
+ * physical eraseblock to move to. The simplest way would be just to pick the
+ * one with the highest erase counter. But in certain workloads this could lead
+ * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
+ * situation when the picked physical eraseblock is constantly erased after the
+ * data is written to it. So, we have a constant which limits the highest erase
+ * counter of the free physical eraseblock to pick. Namely, the WL sub-system
+ * does not pick eraseblocks with erase counter greater than the lowest erase
+ * counter plus %WL_FREE_MAX_DIFF.
+ */
+#define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
+
+/*
+ * Maximum number of consecutive background thread failures which is enough to
+ * switch to read-only mode.
+ */
+#define WL_MAX_FAILURES 32
+
+static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
+static int self_check_in_wl_tree(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e, struct rb_root *root);
+static int self_check_in_pq(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e);
+
+/**
+ * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
+ * @e: the wear-leveling entry to add
+ * @root: the root of the tree
+ *
+ * Note, we use (erase counter, physical eraseblock number) pairs as keys in
+ * the @ubi->used and @ubi->free RB-trees.
+ */
+static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
+{
+ struct rb_node **p, *parent = NULL;
+
+ p = &root->rb_node;
+ while (*p) {
+ struct ubi_wl_entry *e1;
+
+ parent = *p;
+ e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
+
+ if (e->ec < e1->ec)
+ p = &(*p)->rb_left;
+ else if (e->ec > e1->ec)
+ p = &(*p)->rb_right;
+ else {
+ ubi_assert(e->pnum != e1->pnum);
+ if (e->pnum < e1->pnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ }
+
+ rb_link_node(&e->u.rb, parent, p);
+ rb_insert_color(&e->u.rb, root);
+}
+
+/**
+ * wl_entry_destroy - destroy a wear-leveling entry.
+ * @ubi: UBI device description object
+ * @e: the wear-leveling entry to add
+ *
+ * This function destroys a wear leveling entry and removes
+ * the reference from the lookup table.
+ */
+static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
+{
+ ubi->lookuptbl[e->pnum] = NULL;
+ kmem_cache_free(ubi_wl_entry_slab, e);
+}
+
+/**
+ * do_work - do one pending work.
+ * @ubi: UBI device description object
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int do_work(struct ubi_device *ubi)
+{
+ int err;
+ struct ubi_work *wrk;
+
+ cond_resched();
+
+ /*
+ * @ubi->work_sem is used to synchronize with the workers. Workers take
+ * it in read mode, so many of them may be doing works at a time. But
+ * the queue flush code has to be sure the whole queue of works is
+ * done, and it takes the mutex in write mode.
+ */
+ down_read(&ubi->work_sem);
+ spin_lock(&ubi->wl_lock);
+ if (list_empty(&ubi->works)) {
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->work_sem);
+ return 0;
+ }
+
+ wrk = list_entry(ubi->works.next, struct ubi_work, list);
+ list_del(&wrk->list);
+ ubi->works_count -= 1;
+ ubi_assert(ubi->works_count >= 0);
+ spin_unlock(&ubi->wl_lock);
+
+ /*
+ * Call the worker function. Do not touch the work structure
+ * after this call as it will have been freed or reused by that
+ * time by the worker function.
+ */
+ err = wrk->func(ubi, wrk, 0);
+ if (err)
+ ubi_err(ubi, "work failed with error code %d", err);
+ up_read(&ubi->work_sem);
+
+ return err;
+}
+
+/**
+ * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
+ * @e: the wear-leveling entry to check
+ * @root: the root of the tree
+ *
+ * This function returns non-zero if @e is in the @root RB-tree and zero if it
+ * is not.
+ */
+static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
+{
+ struct rb_node *p;
+
+ p = root->rb_node;
+ while (p) {
+ struct ubi_wl_entry *e1;
+
+ e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
+
+ if (e->pnum == e1->pnum) {
+ ubi_assert(e == e1);
+ return 1;
+ }
+
+ if (e->ec < e1->ec)
+ p = p->rb_left;
+ else if (e->ec > e1->ec)
+ p = p->rb_right;
+ else {
+ ubi_assert(e->pnum != e1->pnum);
+ if (e->pnum < e1->pnum)
+ p = p->rb_left;
+ else
+ p = p->rb_right;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * in_pq - check if a wear-leveling entry is present in the protection queue.
+ * @ubi: UBI device description object
+ * @e: the wear-leveling entry to check
+ *
+ * This function returns non-zero if @e is in the protection queue and zero
+ * if it is not.
+ */
+static inline int in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e)
+{
+ struct ubi_wl_entry *p;
+ int i;
+
+ for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
+ list_for_each_entry(p, &ubi->pq[i], u.list)
+ if (p == e)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * prot_queue_add - add physical eraseblock to the protection queue.
+ * @ubi: UBI device description object
+ * @e: the physical eraseblock to add
+ *
+ * This function adds @e to the tail of the protection queue @ubi->pq, where
+ * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
+ * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
+ * be locked.
+ */
+static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
+{
+ int pq_tail = ubi->pq_head - 1;
+
+ if (pq_tail < 0)
+ pq_tail = UBI_PROT_QUEUE_LEN - 1;
+ ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
+ list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
+ dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
+}
+
+/**
+ * find_wl_entry - find wear-leveling entry closest to certain erase counter.
+ * @ubi: UBI device description object
+ * @root: the RB-tree where to look for
+ * @diff: maximum possible difference from the smallest erase counter
+ *
+ * This function looks for a wear leveling entry with erase counter closest to
+ * min + @diff, where min is the smallest erase counter.
+ */
+static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
+ struct rb_root *root, int diff)
+{
+ struct rb_node *p;
+ struct ubi_wl_entry *e;
+ int max;
+
+ e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
+ max = e->ec + diff;
+
+ p = root->rb_node;
+ while (p) {
+ struct ubi_wl_entry *e1;
+
+ e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
+ if (e1->ec >= max)
+ p = p->rb_left;
+ else {
+ p = p->rb_right;
+ e = e1;
+ }
+ }
+
+ return e;
+}
+
+/**
+ * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
+ * @ubi: UBI device description object
+ * @root: the RB-tree where to look for
+ *
+ * This function looks for a wear leveling entry with medium erase counter,
+ * but not greater or equivalent than the lowest erase counter plus
+ * %WL_FREE_MAX_DIFF/2.
+ */
+static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
+ struct rb_root *root)
+{
+ struct ubi_wl_entry *e, *first, *last;
+
+ first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
+ last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
+
+ if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
+ e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
+
+ /* If no fastmap has been written and this WL entry can be used
+ * as anchor PEB, hold it back and return the second best
+ * WL entry such that fastmap can use the anchor PEB later. */
+ e = may_reserve_for_fm(ubi, e, root);
+ } else
+ e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
+
+ return e;
+}
+
+/**
+ * wl_get_wle - get a mean wl entry to be used by ubi_wl_get_peb() or
+ * refill_wl_user_pool().
+ * @ubi: UBI device description object
+ *
+ * This function returns a wear leveling entry in case of success and
+ * NULL in case of failure.
+ */
+static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
+{
+ struct ubi_wl_entry *e;
+
+ e = find_mean_wl_entry(ubi, &ubi->free);
+ if (!e) {
+ ubi_err(ubi, "no free eraseblocks");
+ return NULL;
+ }
+
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+
+ /*
+ * Move the physical eraseblock to the protection queue where it will
+ * be protected from being moved for some time.
+ */
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+ dbg_wl("PEB %d EC %d", e->pnum, e->ec);
+
+ return e;
+}
+
+/**
+ * prot_queue_del - remove a physical eraseblock from the protection queue.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock to remove
+ *
+ * This function deletes PEB @pnum from the protection queue and returns zero
+ * in case of success and %-ENODEV if the PEB was not found.
+ */
+static int prot_queue_del(struct ubi_device *ubi, int pnum)
+{
+ struct ubi_wl_entry *e;
+
+ e = ubi->lookuptbl[pnum];
+ if (!e)
+ return -ENODEV;
+
+ if (self_check_in_pq(ubi, e))
+ return -ENODEV;
+
+ list_del(&e->u.list);
+ dbg_wl("deleted PEB %d from the protection queue", e->pnum);
+ return 0;
+}
+
+/**
+ * sync_erase - synchronously erase a physical eraseblock.
+ * @ubi: UBI device description object
+ * @e: the physical eraseblock to erase
+ * @torture: if the physical eraseblock has to be tortured
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
+ int torture)
+{
+ int err;
+ struct ubi_ec_hdr *ec_hdr;
+ unsigned long long ec = e->ec;
+
+ dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
+
+ err = self_check_ec(ubi, e->pnum, e->ec);
+ if (err)
+ return -EINVAL;
+
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
+ if (!ec_hdr)
+ return -ENOMEM;
+
+ err = ubi_io_sync_erase(ubi, e->pnum, torture);
+ if (err < 0)
+ goto out_free;
+
+ ec += err;
+ if (ec > UBI_MAX_ERASECOUNTER) {
+ /*
+ * Erase counter overflow. Upgrade UBI and use 64-bit
+ * erase counters internally.
+ */
+ ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu",
+ e->pnum, ec);
+ err = -EINVAL;
+ goto out_free;
+ }
+
+ dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
+
+ ec_hdr->ec = cpu_to_be64(ec);
+
+ err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
+ if (err)
+ goto out_free;
+
+ e->ec = ec;
+ spin_lock(&ubi->wl_lock);
+ if (e->ec > ubi->max_ec)
+ ubi->max_ec = e->ec;
+ spin_unlock(&ubi->wl_lock);
+
+out_free:
+ kfree(ec_hdr);
+ return err;
+}
+
+/**
+ * serve_prot_queue - check if it is time to stop protecting PEBs.
+ * @ubi: UBI device description object
+ *
+ * This function is called after each erase operation and removes PEBs from the
+ * tail of the protection queue. These PEBs have been protected for long enough
+ * and should be moved to the used tree.
+ */
+static void serve_prot_queue(struct ubi_device *ubi)
+{
+ struct ubi_wl_entry *e, *tmp;
+ int count;
+
+ /*
+ * There may be several protected physical eraseblock to remove,
+ * process them all.
+ */
+repeat:
+ count = 0;
+ spin_lock(&ubi->wl_lock);
+ list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
+ dbg_wl("PEB %d EC %d protection over, move to used tree",
+ e->pnum, e->ec);
+
+ list_del(&e->u.list);
+ wl_tree_add(e, &ubi->used);
+ if (count++ > 32) {
+ /*
+ * Let's be nice and avoid holding the spinlock for
+ * too long.
+ */
+ spin_unlock(&ubi->wl_lock);
+ cond_resched();
+ goto repeat;
+ }
+ }
+
+ ubi->pq_head += 1;
+ if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
+ ubi->pq_head = 0;
+ ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
+ spin_unlock(&ubi->wl_lock);
+}
+
+/**
+ * __schedule_ubi_work - schedule a work.
+ * @ubi: UBI device description object
+ * @wrk: the work to schedule
+ *
+ * This function adds a work defined by @wrk to the tail of the pending works
+ * list. Can only be used if ubi->work_sem is already held in read mode!
+ */
+static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
+{
+ spin_lock(&ubi->wl_lock);
+ list_add_tail(&wrk->list, &ubi->works);
+ ubi_assert(ubi->works_count >= 0);
+ ubi->works_count += 1;
+ if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
+ wake_up_process(ubi->bgt_thread);
+ spin_unlock(&ubi->wl_lock);
+}
+
+/**
+ * schedule_ubi_work - schedule a work.
+ * @ubi: UBI device description object
+ * @wrk: the work to schedule
+ *
+ * This function adds a work defined by @wrk to the tail of the pending works
+ * list.
+ */
+static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
+{
+ down_read(&ubi->work_sem);
+ __schedule_ubi_work(ubi, wrk);
+ up_read(&ubi->work_sem);
+}
+
+static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
+ int shutdown);
+
+/**
+ * schedule_erase - schedule an erase work.
+ * @ubi: UBI device description object
+ * @e: the WL entry of the physical eraseblock to erase
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
+ * @torture: if the physical eraseblock has to be tortured
+ * @nested: denotes whether the work_sem is already held
+ *
+ * This function returns zero in case of success and a %-ENOMEM in case of
+ * failure.
+ */
+static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
+ int vol_id, int lnum, int torture, bool nested)
+{
+ struct ubi_work *wl_wrk;
+
+ ubi_assert(e);
+
+ dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
+ e->pnum, e->ec, torture);
+
+ wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
+ if (!wl_wrk)
+ return -ENOMEM;
+
+ wl_wrk->func = &erase_worker;
+ wl_wrk->e = e;
+ wl_wrk->vol_id = vol_id;
+ wl_wrk->lnum = lnum;
+ wl_wrk->torture = torture;
+
+ if (nested)
+ __schedule_ubi_work(ubi, wl_wrk);
+ else
+ schedule_ubi_work(ubi, wl_wrk);
+ return 0;
+}
+
+static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk);
+/**
+ * do_sync_erase - run the erase worker synchronously.
+ * @ubi: UBI device description object
+ * @e: the WL entry of the physical eraseblock to erase
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
+ * @torture: if the physical eraseblock has to be tortured
+ *
+ */
+static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
+ int vol_id, int lnum, int torture)
+{
+ struct ubi_work wl_wrk;
+
+ dbg_wl("sync erase of PEB %i", e->pnum);
+
+ wl_wrk.e = e;
+ wl_wrk.vol_id = vol_id;
+ wl_wrk.lnum = lnum;
+ wl_wrk.torture = torture;
+
+ return __erase_worker(ubi, &wl_wrk);
+}
+
+static int ensure_wear_leveling(struct ubi_device *ubi, int nested);
+/**
+ * wear_leveling_worker - wear-leveling worker function.
+ * @ubi: UBI device description object
+ * @wrk: the work object
+ * @shutdown: non-zero if the worker has to free memory and exit
+ * because the WL-subsystem is shutting down
+ *
+ * This function copies a more worn out physical eraseblock to a less worn out
+ * one. Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ int shutdown)
+{
+ int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
+ int erase = 0, keep = 0, vol_id = -1, lnum = -1;
+ struct ubi_wl_entry *e1, *e2;
+ struct ubi_vid_io_buf *vidb;
+ struct ubi_vid_hdr *vid_hdr;
+ int dst_leb_clean = 0;
+
+ kfree(wrk);
+ if (shutdown)
+ return 0;
+
+ vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
+ if (!vidb)
+ return -ENOMEM;
+
+ vid_hdr = ubi_get_vid_hdr(vidb);
+
+ down_read(&ubi->fm_eba_sem);
+ mutex_lock(&ubi->move_mutex);
+ spin_lock(&ubi->wl_lock);
+ ubi_assert(!ubi->move_from && !ubi->move_to);
+ ubi_assert(!ubi->move_to_put);
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ if (!next_peb_for_wl(ubi) ||
+#else
+ if (!ubi->free.rb_node ||
+#endif
+ (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
+ /*
+ * No free physical eraseblocks? Well, they must be waiting in
+ * the queue to be erased. Cancel movement - it will be
+ * triggered again when a free physical eraseblock appears.
+ *
+ * No used physical eraseblocks? They must be temporarily
+ * protected from being moved. They will be moved to the
+ * @ubi->used tree later and the wear-leveling will be
+ * triggered again.
+ */
+ dbg_wl("cancel WL, a list is empty: free %d, used %d",
+ !ubi->free.rb_node, !ubi->used.rb_node);
+ goto out_cancel;
+ }
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ e1 = find_anchor_wl_entry(&ubi->used);
+ if (e1 && ubi->fm_anchor &&
+ (ubi->fm_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
+ ubi->fm_do_produce_anchor = 1;
+ /*
+ * fm_anchor is no longer considered a good anchor.
+ * NULL assignment also prevents multiple wear level checks
+ * of this PEB.
+ */
+ wl_tree_add(ubi->fm_anchor, &ubi->free);
+ ubi->fm_anchor = NULL;
+ ubi->free_count++;
+ }
+
+ if (ubi->fm_do_produce_anchor) {
+ if (!e1)
+ goto out_cancel;
+ e2 = get_peb_for_wl(ubi);
+ if (!e2)
+ goto out_cancel;
+
+ self_check_in_wl_tree(ubi, e1, &ubi->used);
+ rb_erase(&e1->u.rb, &ubi->used);
+ dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
+ ubi->fm_do_produce_anchor = 0;
+ } else if (!ubi->scrub.rb_node) {
+#else
+ if (!ubi->scrub.rb_node) {
+#endif
+ /*
+ * Now pick the least worn-out used physical eraseblock and a
+ * highly worn-out free physical eraseblock. If the erase
+ * counters differ much enough, start wear-leveling.
+ */
+ e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
+ e2 = get_peb_for_wl(ubi);
+ if (!e2)
+ goto out_cancel;
+
+ if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
+ dbg_wl("no WL needed: min used EC %d, max free EC %d",
+ e1->ec, e2->ec);
+
+ /* Give the unused PEB back */
+ wl_tree_add(e2, &ubi->free);
+ ubi->free_count++;
+ goto out_cancel;
+ }
+ self_check_in_wl_tree(ubi, e1, &ubi->used);
+ rb_erase(&e1->u.rb, &ubi->used);
+ dbg_wl("move PEB %d EC %d to PEB %d EC %d",
+ e1->pnum, e1->ec, e2->pnum, e2->ec);
+ } else {
+ /* Perform scrubbing */
+ scrubbing = 1;
+ e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
+ e2 = get_peb_for_wl(ubi);
+ if (!e2)
+ goto out_cancel;
+
+ self_check_in_wl_tree(ubi, e1, &ubi->scrub);
+ rb_erase(&e1->u.rb, &ubi->scrub);
+ dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
+ }
+
+ ubi->move_from = e1;
+ ubi->move_to = e2;
+ spin_unlock(&ubi->wl_lock);
+
+ /*
+ * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
+ * We so far do not know which logical eraseblock our physical
+ * eraseblock (@e1) belongs to. We have to read the volume identifier
+ * header first.
+ *
+ * Note, we are protected from this PEB being unmapped and erased. The
+ * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
+ * which is being moved was unmapped.
+ */
+
+ err = ubi_io_read_vid_hdr(ubi, e1->pnum, vidb, 0);
+ if (err && err != UBI_IO_BITFLIPS) {
+ dst_leb_clean = 1;
+ if (err == UBI_IO_FF) {
+ /*
+ * We are trying to move PEB without a VID header. UBI
+ * always write VID headers shortly after the PEB was
+ * given, so we have a situation when it has not yet
+ * had a chance to write it, because it was preempted.
+ * So add this PEB to the protection queue so far,
+ * because presumably more data will be written there
+ * (including the missing VID header), and then we'll
+ * move it.
+ */
+ dbg_wl("PEB %d has no VID header", e1->pnum);
+ protect = 1;
+ goto out_not_moved;
+ } else if (err == UBI_IO_FF_BITFLIPS) {
+ /*
+ * The same situation as %UBI_IO_FF, but bit-flips were
+ * detected. It is better to schedule this PEB for
+ * scrubbing.
+ */
+ dbg_wl("PEB %d has no VID header but has bit-flips",
+ e1->pnum);
+ scrubbing = 1;
+ goto out_not_moved;
+ } else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) {
+ /*
+ * While a full scan would detect interrupted erasures
+ * at attach time we can face them here when attached from
+ * Fastmap.
+ */
+ dbg_wl("PEB %d has ECC errors, maybe from an interrupted erasure",
+ e1->pnum);
+ erase = 1;
+ goto out_not_moved;
+ }
+
+ ubi_err(ubi, "error %d while reading VID header from PEB %d",
+ err, e1->pnum);
+ goto out_error;
+ }
+
+ vol_id = be32_to_cpu(vid_hdr->vol_id);
+ lnum = be32_to_cpu(vid_hdr->lnum);
+
+ err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vidb);
+ if (err) {
+ if (err == MOVE_CANCEL_RACE) {
+ /*
+ * The LEB has not been moved because the volume is
+ * being deleted or the PEB has been put meanwhile. We
+ * should prevent this PEB from being selected for
+ * wear-leveling movement again, so put it to the
+ * protection queue.
+ */
+ protect = 1;
+ dst_leb_clean = 1;
+ goto out_not_moved;
+ }
+ if (err == MOVE_RETRY) {
+ scrubbing = 1;
+ dst_leb_clean = 1;
+ goto out_not_moved;
+ }
+ if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
+ err == MOVE_TARGET_RD_ERR) {
+ /*
+ * Target PEB had bit-flips or write error - torture it.
+ */
+ torture = 1;
+ keep = 1;
+ goto out_not_moved;
+ }
+
+ if (err == MOVE_SOURCE_RD_ERR) {
+ /*
+ * An error happened while reading the source PEB. Do
+ * not switch to R/O mode in this case, and give the
+ * upper layers a possibility to recover from this,
+ * e.g. by unmapping corresponding LEB. Instead, just
+ * put this PEB to the @ubi->erroneous list to prevent
+ * UBI from trying to move it over and over again.
+ */
+ if (ubi->erroneous_peb_count > ubi->max_erroneous) {
+ ubi_err(ubi, "too many erroneous eraseblocks (%d)",
+ ubi->erroneous_peb_count);
+ goto out_error;
+ }
+ dst_leb_clean = 1;
+ erroneous = 1;
+ goto out_not_moved;
+ }
+
+ if (err < 0)
+ goto out_error;
+
+ ubi_assert(0);
+ }
+
+ /* The PEB has been successfully moved */
+ if (scrubbing)
+ ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
+ e1->pnum, vol_id, lnum, e2->pnum);
+ ubi_free_vid_buf(vidb);
+
+ spin_lock(&ubi->wl_lock);
+ if (!ubi->move_to_put) {
+ wl_tree_add(e2, &ubi->used);
+ e2 = NULL;
+ }
+ ubi->move_from = ubi->move_to = NULL;
+ ubi->move_to_put = ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+
+ err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
+ if (err) {
+ if (e2) {
+ spin_lock(&ubi->wl_lock);
+ wl_entry_destroy(ubi, e2);
+ spin_unlock(&ubi->wl_lock);
+ }
+ goto out_ro;
+ }
+
+ if (e2) {
+ /*
+ * Well, the target PEB was put meanwhile, schedule it for
+ * erasure.
+ */
+ dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
+ e2->pnum, vol_id, lnum);
+ err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
+ if (err)
+ goto out_ro;
+ }
+
+ dbg_wl("done");
+ mutex_unlock(&ubi->move_mutex);
+ up_read(&ubi->fm_eba_sem);
+ return 0;
+
+ /*
+ * For some reasons the LEB was not moved, might be an error, might be
+ * something else. @e1 was not changed, so return it back. @e2 might
+ * have been changed, schedule it for erasure.
+ */
+out_not_moved:
+ if (vol_id != -1)
+ dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
+ e1->pnum, vol_id, lnum, e2->pnum, err);
+ else
+ dbg_wl("cancel moving PEB %d to PEB %d (%d)",
+ e1->pnum, e2->pnum, err);
+ spin_lock(&ubi->wl_lock);
+ if (protect)
+ prot_queue_add(ubi, e1);
+ else if (erroneous) {
+ wl_tree_add(e1, &ubi->erroneous);
+ ubi->erroneous_peb_count += 1;
+ } else if (scrubbing)
+ wl_tree_add(e1, &ubi->scrub);
+ else if (keep)
+ wl_tree_add(e1, &ubi->used);
+ if (dst_leb_clean) {
+ wl_tree_add(e2, &ubi->free);
+ ubi->free_count++;
+ }
+
+ ubi_assert(!ubi->move_to_put);
+ ubi->move_from = ubi->move_to = NULL;
+ ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+
+ ubi_free_vid_buf(vidb);
+ if (dst_leb_clean) {
+ ensure_wear_leveling(ubi, 1);
+ } else {
+ err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
+ if (err)
+ goto out_ro;
+ }
+
+ if (erase) {
+ err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
+ if (err)
+ goto out_ro;
+ }
+
+ mutex_unlock(&ubi->move_mutex);
+ up_read(&ubi->fm_eba_sem);
+ return 0;
+
+out_error:
+ if (vol_id != -1)
+ ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
+ err, e1->pnum, e2->pnum);
+ else
+ ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
+ err, e1->pnum, vol_id, lnum, e2->pnum);
+ spin_lock(&ubi->wl_lock);
+ ubi->move_from = ubi->move_to = NULL;
+ ubi->move_to_put = ubi->wl_scheduled = 0;
+ wl_entry_destroy(ubi, e1);
+ wl_entry_destroy(ubi, e2);
+ spin_unlock(&ubi->wl_lock);
+
+ ubi_free_vid_buf(vidb);
+
+out_ro:
+ ubi_ro_mode(ubi);
+ mutex_unlock(&ubi->move_mutex);
+ up_read(&ubi->fm_eba_sem);
+ ubi_assert(err != 0);
+ return err < 0 ? err : -EIO;
+
+out_cancel:
+ ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+ mutex_unlock(&ubi->move_mutex);
+ up_read(&ubi->fm_eba_sem);
+ ubi_free_vid_buf(vidb);
+ return 0;
+}
+
+/**
+ * ensure_wear_leveling - schedule wear-leveling if it is needed.
+ * @ubi: UBI device description object
+ * @nested: set to non-zero if this function is called from UBI worker
+ *
+ * This function checks if it is time to start wear-leveling and schedules it
+ * if yes. This function returns zero in case of success and a negative error
+ * code in case of failure.
+ */
+static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
+{
+ int err = 0;
+ struct ubi_work *wrk;
+
+ spin_lock(&ubi->wl_lock);
+ if (ubi->wl_scheduled)
+ /* Wear-leveling is already in the work queue */
+ goto out_unlock;
+
+ /*
+ * If the ubi->scrub tree is not empty, scrubbing is needed, and the
+ * WL worker has to be scheduled anyway.
+ */
+ if (!ubi->scrub.rb_node) {
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ if (!need_wear_leveling(ubi))
+ goto out_unlock;
+#else
+ struct ubi_wl_entry *e1;
+ struct ubi_wl_entry *e2;
+
+ if (!ubi->used.rb_node || !ubi->free.rb_node)
+ /* No physical eraseblocks - no deal */
+ goto out_unlock;
+
+ /*
+ * We schedule wear-leveling only if the difference between the
+ * lowest erase counter of used physical eraseblocks and a high
+ * erase counter of free physical eraseblocks is greater than
+ * %UBI_WL_THRESHOLD.
+ */
+ e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
+ e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+
+ if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
+ goto out_unlock;
+#endif
+ dbg_wl("schedule wear-leveling");
+ } else
+ dbg_wl("schedule scrubbing");
+
+ ubi->wl_scheduled = 1;
+ spin_unlock(&ubi->wl_lock);
+
+ wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
+ if (!wrk) {
+ err = -ENOMEM;
+ goto out_cancel;
+ }
+
+ wrk->func = &wear_leveling_worker;
+ if (nested)
+ __schedule_ubi_work(ubi, wrk);
+ else
+ schedule_ubi_work(ubi, wrk);
+ return err;
+
+out_cancel:
+ spin_lock(&ubi->wl_lock);
+ ubi->wl_scheduled = 0;
+out_unlock:
+ spin_unlock(&ubi->wl_lock);
+ return err;
+}
+
+/**
+ * __erase_worker - physical eraseblock erase worker function.
+ * @ubi: UBI device description object
+ * @wl_wrk: the work object
+ *
+ * This function erases a physical eraseblock and perform torture testing if
+ * needed. It also takes care about marking the physical eraseblock bad if
+ * needed. Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
+{
+ struct ubi_wl_entry *e = wl_wrk->e;
+ int pnum = e->pnum;
+ int vol_id = wl_wrk->vol_id;
+ int lnum = wl_wrk->lnum;
+ int err, available_consumed = 0;
+
+ dbg_wl("erase PEB %d EC %d LEB %d:%d",
+ pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
+
+ err = sync_erase(ubi, e, wl_wrk->torture);
+ if (!err) {
+ spin_lock(&ubi->wl_lock);
+
+ if (!ubi->fm_disabled && !ubi->fm_anchor &&
+ e->pnum < UBI_FM_MAX_START) {
+ /*
+ * Abort anchor production, if needed it will be
+ * enabled again in the wear leveling started below.
+ */
+ ubi->fm_anchor = e;
+ ubi->fm_do_produce_anchor = 0;
+ } else {
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+ }
+
+ spin_unlock(&ubi->wl_lock);
+
+ /*
+ * One more erase operation has happened, take care about
+ * protected physical eraseblocks.
+ */
+ serve_prot_queue(ubi);
+
+ /* And take care about wear-leveling */
+ err = ensure_wear_leveling(ubi, 1);
+ return err;
+ }
+
+ ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
+
+ if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
+ err == -EBUSY) {
+ int err1;
+
+ /* Re-schedule the LEB for erasure */
+ err1 = schedule_erase(ubi, e, vol_id, lnum, 0, true);
+ if (err1) {
+ spin_lock(&ubi->wl_lock);
+ wl_entry_destroy(ubi, e);
+ spin_unlock(&ubi->wl_lock);
+ err = err1;
+ goto out_ro;
+ }
+ return err;
+ }
+
+ spin_lock(&ubi->wl_lock);
+ wl_entry_destroy(ubi, e);
+ spin_unlock(&ubi->wl_lock);
+ if (err != -EIO)
+ /*
+ * If this is not %-EIO, we have no idea what to do. Scheduling
+ * this physical eraseblock for erasure again would cause
+ * errors again and again. Well, lets switch to R/O mode.
+ */
+ goto out_ro;
+
+ /* It is %-EIO, the PEB went bad */
+
+ if (!ubi->bad_allowed) {
+ ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
+ goto out_ro;
+ }
+
+ spin_lock(&ubi->volumes_lock);
+ if (ubi->beb_rsvd_pebs == 0) {
+ if (ubi->avail_pebs == 0) {
+ spin_unlock(&ubi->volumes_lock);
+ ubi_err(ubi, "no reserved/available physical eraseblocks");
+ goto out_ro;
+ }
+ ubi->avail_pebs -= 1;
+ available_consumed = 1;
+ }
+ spin_unlock(&ubi->volumes_lock);
+
+ ubi_msg(ubi, "mark PEB %d as bad", pnum);
+ err = ubi_io_mark_bad(ubi, pnum);
+ if (err)
+ goto out_ro;
+
+ spin_lock(&ubi->volumes_lock);
+ if (ubi->beb_rsvd_pebs > 0) {
+ if (available_consumed) {
+ /*
+ * The amount of reserved PEBs increased since we last
+ * checked.
+ */
+ ubi->avail_pebs += 1;
+ available_consumed = 0;
+ }
+ ubi->beb_rsvd_pebs -= 1;
+ }
+ ubi->bad_peb_count += 1;
+ ubi->good_peb_count -= 1;
+ ubi_calculate_reserved(ubi);
+ if (available_consumed)
+ ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
+ else if (ubi->beb_rsvd_pebs)
+ ubi_msg(ubi, "%d PEBs left in the reserve",
+ ubi->beb_rsvd_pebs);
+ else
+ ubi_warn(ubi, "last PEB from the reserve was used");
+ spin_unlock(&ubi->volumes_lock);
+
+ return err;
+
+out_ro:
+ if (available_consumed) {
+ spin_lock(&ubi->volumes_lock);
+ ubi->avail_pebs += 1;
+ spin_unlock(&ubi->volumes_lock);
+ }
+ ubi_ro_mode(ubi);
+ return err;
+}
+
+static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
+ int shutdown)
+{
+ int ret;
+
+ if (shutdown) {
+ struct ubi_wl_entry *e = wl_wrk->e;
+
+ dbg_wl("cancel erasure of PEB %d EC %d", e->pnum, e->ec);
+ kfree(wl_wrk);
+ wl_entry_destroy(ubi, e);
+ return 0;
+ }
+
+ ret = __erase_worker(ubi, wl_wrk);
+ kfree(wl_wrk);
+ return ret;
+}
+
+/**
+ * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
+ * @ubi: UBI device description object
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
+ * @pnum: physical eraseblock to return
+ * @torture: if this physical eraseblock has to be tortured
+ *
+ * This function is called to return physical eraseblock @pnum to the pool of
+ * free physical eraseblocks. The @torture flag has to be set if an I/O error
+ * occurred to this @pnum and it has to be tested. This function returns zero
+ * in case of success, and a negative error code in case of failure.
+ */
+int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
+ int pnum, int torture)
+{
+ int err;
+ struct ubi_wl_entry *e;
+
+ dbg_wl("PEB %d", pnum);
+ ubi_assert(pnum >= 0);
+ ubi_assert(pnum < ubi->peb_count);
+
+ down_read(&ubi->fm_protect);
+
+retry:
+ spin_lock(&ubi->wl_lock);
+ e = ubi->lookuptbl[pnum];
+ if (!e) {
+ /*
+ * This wl entry has been removed for some errors by other
+ * process (eg. wear leveling worker), corresponding process
+ * (except __erase_worker, which cannot concurrent with
+ * ubi_wl_put_peb) will set ubi ro_mode at the same time,
+ * just ignore this wl entry.
+ */
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->fm_protect);
+ return 0;
+ }
+ if (e == ubi->move_from) {
+ /*
+ * User is putting the physical eraseblock which was selected to
+ * be moved. It will be scheduled for erasure in the
+ * wear-leveling worker.
+ */
+ dbg_wl("PEB %d is being moved, wait", pnum);
+ spin_unlock(&ubi->wl_lock);
+
+ /* Wait for the WL worker by taking the @ubi->move_mutex */
+ mutex_lock(&ubi->move_mutex);
+ mutex_unlock(&ubi->move_mutex);
+ goto retry;
+ } else if (e == ubi->move_to) {
+ /*
+ * User is putting the physical eraseblock which was selected
+ * as the target the data is moved to. It may happen if the EBA
+ * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
+ * but the WL sub-system has not put the PEB to the "used" tree
+ * yet, but it is about to do this. So we just set a flag which
+ * will tell the WL worker that the PEB is not needed anymore
+ * and should be scheduled for erasure.
+ */
+ dbg_wl("PEB %d is the target of data moving", pnum);
+ ubi_assert(!ubi->move_to_put);
+ ubi->move_to_put = 1;
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->fm_protect);
+ return 0;
+ } else {
+ if (in_wl_tree(e, &ubi->used)) {
+ self_check_in_wl_tree(ubi, e, &ubi->used);
+ rb_erase(&e->u.rb, &ubi->used);
+ } else if (in_wl_tree(e, &ubi->scrub)) {
+ self_check_in_wl_tree(ubi, e, &ubi->scrub);
+ rb_erase(&e->u.rb, &ubi->scrub);
+ } else if (in_wl_tree(e, &ubi->erroneous)) {
+ self_check_in_wl_tree(ubi, e, &ubi->erroneous);
+ rb_erase(&e->u.rb, &ubi->erroneous);
+ ubi->erroneous_peb_count -= 1;
+ ubi_assert(ubi->erroneous_peb_count >= 0);
+ /* Erroneous PEBs should be tortured */
+ torture = 1;
+ } else {
+ err = prot_queue_del(ubi, e->pnum);
+ if (err) {
+ ubi_err(ubi, "PEB %d not found", pnum);
+ ubi_ro_mode(ubi);
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->fm_protect);
+ return err;
+ }
+ }
+ }
+ spin_unlock(&ubi->wl_lock);
+
+ err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
+ if (err) {
+ spin_lock(&ubi->wl_lock);
+ wl_tree_add(e, &ubi->used);
+ spin_unlock(&ubi->wl_lock);
+ }
+
+ up_read(&ubi->fm_protect);
+ return err;
+}
+
+/**
+ * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock to schedule
+ *
+ * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
+ * needs scrubbing. This function schedules a physical eraseblock for
+ * scrubbing which is done in background. This function returns zero in case of
+ * success and a negative error code in case of failure.
+ */
+int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
+{
+ struct ubi_wl_entry *e;
+
+ ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
+
+retry:
+ spin_lock(&ubi->wl_lock);
+ e = ubi->lookuptbl[pnum];
+ if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
+ in_wl_tree(e, &ubi->erroneous)) {
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ }
+
+ if (e == ubi->move_to) {
+ /*
+ * This physical eraseblock was used to move data to. The data
+ * was moved but the PEB was not yet inserted to the proper
+ * tree. We should just wait a little and let the WL worker
+ * proceed.
+ */
+ spin_unlock(&ubi->wl_lock);
+ dbg_wl("the PEB %d is not in proper tree, retry", pnum);
+ yield();
+ goto retry;
+ }
+
+ if (in_wl_tree(e, &ubi->used)) {
+ self_check_in_wl_tree(ubi, e, &ubi->used);
+ rb_erase(&e->u.rb, &ubi->used);
+ } else {
+ int err;
+
+ err = prot_queue_del(ubi, e->pnum);
+ if (err) {
+ ubi_err(ubi, "PEB %d not found", pnum);
+ ubi_ro_mode(ubi);
+ spin_unlock(&ubi->wl_lock);
+ return err;
+ }
+ }
+
+ wl_tree_add(e, &ubi->scrub);
+ spin_unlock(&ubi->wl_lock);
+
+ /*
+ * Technically scrubbing is the same as wear-leveling, so it is done
+ * by the WL worker.
+ */
+ return ensure_wear_leveling(ubi, 0);
+}
+
+/**
+ * ubi_wl_flush - flush all pending works.
+ * @ubi: UBI device description object
+ * @vol_id: the volume id to flush for
+ * @lnum: the logical eraseblock number to flush for
+ *
+ * This function executes all pending works for a particular volume id /
+ * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
+ * acts as a wildcard for all of the corresponding volume numbers or logical
+ * eraseblock numbers. It returns zero in case of success and a negative error
+ * code in case of failure.
+ */
+int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
+{
+ int err = 0;
+ int found = 1;
+
+ /*
+ * Erase while the pending works queue is not empty, but not more than
+ * the number of currently pending works.
+ */
+ dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
+ vol_id, lnum, ubi->works_count);
+
+ while (found) {
+ struct ubi_work *wrk, *tmp;
+ found = 0;
+
+ down_read(&ubi->work_sem);
+ spin_lock(&ubi->wl_lock);
+ list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
+ if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
+ (lnum == UBI_ALL || wrk->lnum == lnum)) {
+ list_del(&wrk->list);
+ ubi->works_count -= 1;
+ ubi_assert(ubi->works_count >= 0);
+ spin_unlock(&ubi->wl_lock);
+
+ err = wrk->func(ubi, wrk, 0);
+ if (err) {
+ up_read(&ubi->work_sem);
+ return err;
+ }
+
+ spin_lock(&ubi->wl_lock);
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->work_sem);
+ }
+
+ /*
+ * Make sure all the works which have been done in parallel are
+ * finished.
+ */
+ down_write(&ubi->work_sem);
+ up_write(&ubi->work_sem);
+
+ return err;
+}
+
+static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e)
+{
+ if (in_wl_tree(e, &ubi->scrub))
+ return false;
+ else if (in_wl_tree(e, &ubi->erroneous))
+ return false;
+ else if (ubi->move_from == e)
+ return false;
+ else if (ubi->move_to == e)
+ return false;
+
+ return true;
+}
+
+/**
+ * ubi_bitflip_check - Check an eraseblock for bitflips and scrub it if needed.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock to schedule
+ * @force: don't read the block, assume bitflips happened and take action.
+ *
+ * This function reads the given eraseblock and checks if bitflips occured.
+ * In case of bitflips, the eraseblock is scheduled for scrubbing.
+ * If scrubbing is forced with @force, the eraseblock is not read,
+ * but scheduled for scrubbing right away.
+ *
+ * Returns:
+ * %EINVAL, PEB is out of range
+ * %ENOENT, PEB is no longer used by UBI
+ * %EBUSY, PEB cannot be checked now or a check is currently running on it
+ * %EAGAIN, bit flips happened but scrubbing is currently not possible
+ * %EUCLEAN, bit flips happened and PEB is scheduled for scrubbing
+ * %0, no bit flips detected
+ */
+int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force)
+{
+ int err = 0;
+ struct ubi_wl_entry *e;
+
+ if (pnum < 0 || pnum >= ubi->peb_count) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Pause all parallel work, otherwise it can happen that the
+ * erase worker frees a wl entry under us.
+ */
+ down_write(&ubi->work_sem);
+
+ /*
+ * Make sure that the wl entry does not change state while
+ * inspecting it.
+ */
+ spin_lock(&ubi->wl_lock);
+ e = ubi->lookuptbl[pnum];
+ if (!e) {
+ spin_unlock(&ubi->wl_lock);
+ err = -ENOENT;
+ goto out_resume;
+ }
+
+ /*
+ * Does it make sense to check this PEB?
+ */
+ if (!scrub_possible(ubi, e)) {
+ spin_unlock(&ubi->wl_lock);
+ err = -EBUSY;
+ goto out_resume;
+ }
+ spin_unlock(&ubi->wl_lock);
+
+ if (!force) {
+ mutex_lock(&ubi->buf_mutex);
+ err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
+ mutex_unlock(&ubi->buf_mutex);
+ }
+
+ if (force || err == UBI_IO_BITFLIPS) {
+ /*
+ * Okay, bit flip happened, let's figure out what we can do.
+ */
+ spin_lock(&ubi->wl_lock);
+
+ /*
+ * Recheck. We released wl_lock, UBI might have killed the
+ * wl entry under us.
+ */
+ e = ubi->lookuptbl[pnum];
+ if (!e) {
+ spin_unlock(&ubi->wl_lock);
+ err = -ENOENT;
+ goto out_resume;
+ }
+
+ /*
+ * Need to re-check state
+ */
+ if (!scrub_possible(ubi, e)) {
+ spin_unlock(&ubi->wl_lock);
+ err = -EBUSY;
+ goto out_resume;
+ }
+
+ if (in_pq(ubi, e)) {
+ prot_queue_del(ubi, e->pnum);
+ wl_tree_add(e, &ubi->scrub);
+ spin_unlock(&ubi->wl_lock);
+
+ err = ensure_wear_leveling(ubi, 1);
+ } else if (in_wl_tree(e, &ubi->used)) {
+ rb_erase(&e->u.rb, &ubi->used);
+ wl_tree_add(e, &ubi->scrub);
+ spin_unlock(&ubi->wl_lock);
+
+ err = ensure_wear_leveling(ubi, 1);
+ } else if (in_wl_tree(e, &ubi->free)) {
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+ spin_unlock(&ubi->wl_lock);
+
+ /*
+ * This PEB is empty we can schedule it for
+ * erasure right away. No wear leveling needed.
+ */
+ err = schedule_erase(ubi, e, UBI_UNKNOWN, UBI_UNKNOWN,
+ force ? 0 : 1, true);
+ } else {
+ spin_unlock(&ubi->wl_lock);
+ err = -EAGAIN;
+ }
+
+ if (!err && !force)
+ err = -EUCLEAN;
+ } else {
+ err = 0;
+ }
+
+out_resume:
+ up_write(&ubi->work_sem);
+out:
+
+ return err;
+}
+
+/**
+ * tree_destroy - destroy an RB-tree.
+ * @ubi: UBI device description object
+ * @root: the root of the tree to destroy
+ */
+static void tree_destroy(struct ubi_device *ubi, struct rb_root *root)
+{
+ struct rb_node *rb;
+ struct ubi_wl_entry *e;
+
+ rb = root->rb_node;
+ while (rb) {
+ if (rb->rb_left)
+ rb = rb->rb_left;
+ else if (rb->rb_right)
+ rb = rb->rb_right;
+ else {
+ e = rb_entry(rb, struct ubi_wl_entry, u.rb);
+
+ rb = rb_parent(rb);
+ if (rb) {
+ if (rb->rb_left == &e->u.rb)
+ rb->rb_left = NULL;
+ else
+ rb->rb_right = NULL;
+ }
+
+ wl_entry_destroy(ubi, e);
+ }
+ }
+}
+
+/**
+ * ubi_thread - UBI background thread.
+ * @u: the UBI device description object pointer
+ */
+int ubi_thread(void *u)
+{
+ int failures = 0;
+ struct ubi_device *ubi = u;
+
+ ubi_msg(ubi, "background thread \"%s\" started, PID %d",
+ ubi->bgt_name, task_pid_nr(current));
+
+ set_freezable();
+ for (;;) {
+ int err;
+
+ if (kthread_should_stop())
+ break;
+
+ if (try_to_freeze())
+ continue;
+
+ spin_lock(&ubi->wl_lock);
+ if (list_empty(&ubi->works) || ubi->ro_mode ||
+ !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock(&ubi->wl_lock);
+
+ /*
+ * Check kthread_should_stop() after we set the task
+ * state to guarantee that we either see the stop bit
+ * and exit or the task state is reset to runnable such
+ * that it's not scheduled out indefinitely and detects
+ * the stop bit at kthread_should_stop().
+ */
+ if (kthread_should_stop()) {
+ set_current_state(TASK_RUNNING);
+ break;
+ }
+
+ schedule();
+ continue;
+ }
+ spin_unlock(&ubi->wl_lock);
+
+ err = do_work(ubi);
+ if (err) {
+ ubi_err(ubi, "%s: work failed with error code %d",
+ ubi->bgt_name, err);
+ if (failures++ > WL_MAX_FAILURES) {
+ /*
+ * Too many failures, disable the thread and
+ * switch to read-only mode.
+ */
+ ubi_msg(ubi, "%s: %d consecutive failures",
+ ubi->bgt_name, WL_MAX_FAILURES);
+ ubi_ro_mode(ubi);
+ ubi->thread_enabled = 0;
+ continue;
+ }
+ } else
+ failures = 0;
+
+ cond_resched();
+ }
+
+ dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
+ ubi->thread_enabled = 0;
+ return 0;
+}
+
+/**
+ * shutdown_work - shutdown all pending works.
+ * @ubi: UBI device description object
+ */
+static void shutdown_work(struct ubi_device *ubi)
+{
+ while (!list_empty(&ubi->works)) {
+ struct ubi_work *wrk;
+
+ wrk = list_entry(ubi->works.next, struct ubi_work, list);
+ list_del(&wrk->list);
+ wrk->func(ubi, wrk, 1);
+ ubi->works_count -= 1;
+ ubi_assert(ubi->works_count >= 0);
+ }
+}
+
+/**
+ * erase_aeb - erase a PEB given in UBI attach info PEB
+ * @ubi: UBI device description object
+ * @aeb: UBI attach info PEB
+ * @sync: If true, erase synchronously. Otherwise schedule for erasure
+ */
+static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync)
+{
+ struct ubi_wl_entry *e;
+ int err;
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
+ ubi->lookuptbl[e->pnum] = e;
+
+ if (sync) {
+ err = sync_erase(ubi, e, false);
+ if (err)
+ goto out_free;
+
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+ } else {
+ err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
+ if (err)
+ goto out_free;
+ }
+
+ return 0;
+
+out_free:
+ wl_entry_destroy(ubi, e);
+
+ return err;
+}
+
+/**
+ * ubi_wl_init - initialize the WL sub-system using attaching information.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This function returns zero in case of success, and a negative error code in
+ * case of failure.
+ */
+int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
+{
+ int err, i, reserved_pebs, found_pebs = 0;
+ struct rb_node *rb1, *rb2;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb, *tmp;
+ struct ubi_wl_entry *e;
+
+ ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
+ spin_lock_init(&ubi->wl_lock);
+ mutex_init(&ubi->move_mutex);
+ init_rwsem(&ubi->work_sem);
+ ubi->max_ec = ai->max_ec;
+ INIT_LIST_HEAD(&ubi->works);
+
+ sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
+
+ err = -ENOMEM;
+ ubi->lookuptbl = kcalloc(ubi->peb_count, sizeof(void *), GFP_KERNEL);
+ if (!ubi->lookuptbl)
+ return err;
+
+ for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
+ INIT_LIST_HEAD(&ubi->pq[i]);
+ ubi->pq_head = 0;
+
+ ubi->free_count = 0;
+ list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
+ cond_resched();
+
+ err = erase_aeb(ubi, aeb, false);
+ if (err)
+ goto out_free;
+
+ found_pebs++;
+ }
+
+ list_for_each_entry(aeb, &ai->free, u.list) {
+ cond_resched();
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
+ ubi_assert(e->ec >= 0);
+
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+
+ ubi->lookuptbl[e->pnum] = e;
+
+ found_pebs++;
+ }
+
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
+ cond_resched();
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
+ ubi->lookuptbl[e->pnum] = e;
+
+ if (!aeb->scrub) {
+ dbg_wl("add PEB %d EC %d to the used tree",
+ e->pnum, e->ec);
+ wl_tree_add(e, &ubi->used);
+ } else {
+ dbg_wl("add PEB %d EC %d to the scrub tree",
+ e->pnum, e->ec);
+ wl_tree_add(e, &ubi->scrub);
+ }
+
+ found_pebs++;
+ }
+ }
+
+ list_for_each_entry(aeb, &ai->fastmap, u.list) {
+ cond_resched();
+
+ e = ubi_find_fm_block(ubi, aeb->pnum);
+
+ if (e) {
+ ubi_assert(!ubi->lookuptbl[e->pnum]);
+ ubi->lookuptbl[e->pnum] = e;
+ } else {
+ bool sync = false;
+
+ /*
+ * Usually old Fastmap PEBs are scheduled for erasure
+ * and we don't have to care about them but if we face
+ * an power cut before scheduling them we need to
+ * take care of them here.
+ */
+ if (ubi->lookuptbl[aeb->pnum])
+ continue;
+
+ /*
+ * The fastmap update code might not find a free PEB for
+ * writing the fastmap anchor to and then reuses the
+ * current fastmap anchor PEB. When this PEB gets erased
+ * and a power cut happens before it is written again we
+ * must make sure that the fastmap attach code doesn't
+ * find any outdated fastmap anchors, hence we erase the
+ * outdated fastmap anchor PEBs synchronously here.
+ */
+ if (aeb->vol_id == UBI_FM_SB_VOLUME_ID)
+ sync = true;
+
+ err = erase_aeb(ubi, aeb, sync);
+ if (err)
+ goto out_free;
+ }
+
+ found_pebs++;
+ }
+
+ dbg_wl("found %i PEBs", found_pebs);
+
+ ubi_assert(ubi->good_peb_count == found_pebs);
+
+ reserved_pebs = WL_RESERVED_PEBS;
+ ubi_fastmap_init(ubi, &reserved_pebs);
+
+ if (ubi->avail_pebs < reserved_pebs) {
+ ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
+ ubi->avail_pebs, reserved_pebs);
+ if (ubi->corr_peb_count)
+ ubi_err(ubi, "%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
+ err = -ENOSPC;
+ goto out_free;
+ }
+ ubi->avail_pebs -= reserved_pebs;
+ ubi->rsvd_pebs += reserved_pebs;
+
+ /* Schedule wear-leveling if needed */
+ err = ensure_wear_leveling(ubi, 0);
+ if (err)
+ goto out_free;
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ if (!ubi->ro_mode && !ubi->fm_disabled)
+ ubi_ensure_anchor_pebs(ubi);
+#endif
+ return 0;
+
+out_free:
+ shutdown_work(ubi);
+ tree_destroy(ubi, &ubi->used);
+ tree_destroy(ubi, &ubi->free);
+ tree_destroy(ubi, &ubi->scrub);
+ kfree(ubi->lookuptbl);
+ return err;
+}
+
+/**
+ * protection_queue_destroy - destroy the protection queue.
+ * @ubi: UBI device description object
+ */
+static void protection_queue_destroy(struct ubi_device *ubi)
+{
+ int i;
+ struct ubi_wl_entry *e, *tmp;
+
+ for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
+ list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
+ list_del(&e->u.list);
+ wl_entry_destroy(ubi, e);
+ }
+ }
+}
+
+/**
+ * ubi_wl_close - close the wear-leveling sub-system.
+ * @ubi: UBI device description object
+ */
+void ubi_wl_close(struct ubi_device *ubi)
+{
+ dbg_wl("close the WL sub-system");
+ ubi_fastmap_close(ubi);
+ shutdown_work(ubi);
+ protection_queue_destroy(ubi);
+ tree_destroy(ubi, &ubi->used);
+ tree_destroy(ubi, &ubi->erroneous);
+ tree_destroy(ubi, &ubi->free);
+ tree_destroy(ubi, &ubi->scrub);
+ kfree(ubi->lookuptbl);
+}
+
+/**
+ * self_check_ec - make sure that the erase counter of a PEB is correct.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to check
+ * @ec: the erase counter to check
+ *
+ * This function returns zero if the erase counter of physical eraseblock @pnum
+ * is equivalent to @ec, and a negative error code if not or if an error
+ * occurred.
+ */
+static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
+{
+ int err;
+ long long read_ec;
+ struct ubi_ec_hdr *ec_hdr;
+
+ if (!ubi_dbg_chk_gen(ubi))
+ return 0;
+
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
+ if (!ec_hdr)
+ return -ENOMEM;
+
+ err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
+ if (err && err != UBI_IO_BITFLIPS) {
+ /* The header does not have to exist */
+ err = 0;
+ goto out_free;
+ }
+
+ read_ec = be64_to_cpu(ec_hdr->ec);
+ if (ec != read_ec && read_ec - ec > 1) {
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
+ ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
+ dump_stack();
+ err = 1;
+ } else
+ err = 0;
+
+out_free:
+ kfree(ec_hdr);
+ return err;
+}
+
+/**
+ * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
+ * @ubi: UBI device description object
+ * @e: the wear-leveling entry to check
+ * @root: the root of the tree
+ *
+ * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
+ * is not.
+ */
+static int self_check_in_wl_tree(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e, struct rb_root *root)
+{
+ if (!ubi_dbg_chk_gen(ubi))
+ return 0;
+
+ if (in_wl_tree(e, root))
+ return 0;
+
+ ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
+ e->pnum, e->ec, root);
+ dump_stack();
+ return -EINVAL;
+}
+
+/**
+ * self_check_in_pq - check if wear-leveling entry is in the protection
+ * queue.
+ * @ubi: UBI device description object
+ * @e: the wear-leveling entry to check
+ *
+ * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
+ */
+static int self_check_in_pq(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e)
+{
+ if (!ubi_dbg_chk_gen(ubi))
+ return 0;
+
+ if (in_pq(ubi, e))
+ return 0;
+
+ ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
+ e->pnum, e->ec);
+ dump_stack();
+ return -EINVAL;
+}
+#ifndef CONFIG_MTD_UBI_FASTMAP
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
+{
+ struct ubi_wl_entry *e;
+
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+ ubi->free_count--;
+ ubi_assert(ubi->free_count >= 0);
+ rb_erase(&e->u.rb, &ubi->free);
+
+ return e;
+}
+
+/**
+ * produce_free_peb - produce a free physical eraseblock.
+ * @ubi: UBI device description object
+ *
+ * This function tries to make a free PEB by means of synchronous execution of
+ * pending works. This may be needed if, for example the background thread is
+ * disabled. Returns zero in case of success and a negative error code in case
+ * of failure.
+ */
+static int produce_free_peb(struct ubi_device *ubi)
+{
+ int err;
+
+ while (!ubi->free.rb_node && ubi->works_count) {
+ spin_unlock(&ubi->wl_lock);
+
+ dbg_wl("do one work synchronously");
+ err = do_work(ubi);
+
+ spin_lock(&ubi->wl_lock);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ubi_wl_get_peb - get a physical eraseblock.
+ * @ubi: UBI device description object
+ *
+ * This function returns a physical eraseblock in case of success and a
+ * negative error code in case of failure.
+ * Returns with ubi->fm_eba_sem held in read mode!
+ */
+int ubi_wl_get_peb(struct ubi_device *ubi)
+{
+ int err;
+ struct ubi_wl_entry *e;
+
+retry:
+ down_read(&ubi->fm_eba_sem);
+ spin_lock(&ubi->wl_lock);
+ if (!ubi->free.rb_node) {
+ if (ubi->works_count == 0) {
+ ubi_err(ubi, "no free eraseblocks");
+ ubi_assert(list_empty(&ubi->works));
+ spin_unlock(&ubi->wl_lock);
+ return -ENOSPC;
+ }
+
+ err = produce_free_peb(ubi);
+ if (err < 0) {
+ spin_unlock(&ubi->wl_lock);
+ return err;
+ }
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->fm_eba_sem);
+ goto retry;
+
+ }
+ e = wl_get_wle(ubi);
+ prot_queue_add(ubi, e);
+ spin_unlock(&ubi->wl_lock);
+
+ err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
+ ubi->peb_size - ubi->vid_hdr_aloffset);
+ if (err) {
+ ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
+ return err;
+ }
+
+ return e->pnum;
+}
+#else
+#include "fastmap-wl.c"
+#endif
diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h
new file mode 100644
index 0000000000..5ebe374a08
--- /dev/null
+++ b/drivers/mtd/ubi/wl.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef UBI_WL_H
+#define UBI_WL_H
+#ifdef CONFIG_MTD_UBI_FASTMAP
+static void update_fastmap_work_fn(struct work_struct *wrk);
+static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
+static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi);
+static bool need_wear_leveling(struct ubi_device *ubi);
+static void ubi_fastmap_close(struct ubi_device *ubi);
+static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count)
+{
+ /* Reserve enough LEBs to store two fastmaps. */
+ *count += (ubi->fm_size / ubi->leb_size) * 2;
+ INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
+}
+static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
+ struct ubi_wl_entry *e,
+ struct rb_root *root);
+#else /* !CONFIG_MTD_UBI_FASTMAP */
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
+static inline void ubi_fastmap_close(struct ubi_device *ubi) { }
+static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count) { }
+static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
+ struct ubi_wl_entry *e,
+ struct rb_root *root) {
+ return e;
+}
+#endif /* CONFIG_MTD_UBI_FASTMAP */
+#endif /* UBI_WL_H */