diff options
Diffstat (limited to 'drivers/mtd/spi-nor')
30 files changed, 11464 insertions, 0 deletions
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig new file mode 100644 index 0000000000..24cd25de2b --- /dev/null +++ b/drivers/mtd/spi-nor/Kconfig @@ -0,0 +1,73 @@ +# SPDX-License-Identifier: GPL-2.0-only +menuconfig MTD_SPI_NOR + tristate "SPI NOR device support" + depends on MTD + depends on MTD && SPI_MASTER + select SPI_MEM + help + This is the framework for the SPI NOR which can be used by the SPI + device drivers and the SPI NOR device driver. + +if MTD_SPI_NOR + +config MTD_SPI_NOR_USE_4K_SECTORS + bool "Use small 4096 B erase sectors" + default y + help + Many flash memories support erasing small (4096 B) sectors. Depending + on the usage this feature may provide performance gain in comparison + to erasing whole blocks (32/64 KiB). + Changing a small part of the flash's contents is usually faster with + small sectors. On the other hand erasing should be faster when using + 64 KiB block instead of 16 × 4 KiB sectors. + + Please note that some tools/drivers/filesystems may not work with + 4096 B erase size (e.g. UBIFS requires 15 KiB as a minimum). + +choice + prompt "Software write protection at boot" + default MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE + +config MTD_SPI_NOR_SWP_DISABLE + bool "Disable SWP on any flashes (legacy behavior)" + help + This option disables the software write protection on any SPI + flashes at boot-up. + + Depending on the flash chip this either clears the block protection + bits or does a "Global Unprotect" command. + + Don't use this if you intent to use the software write protection + of your SPI flash. This is only to keep backwards compatibility. + +config MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE + bool "Disable SWP on flashes w/ volatile protection bits" + help + Some SPI flashes have volatile block protection bits, ie. after a + power-up or a reset the flash is software write protected by + default. + + This option disables the software write protection for these kind + of flashes while keeping it enabled for any other SPI flashes + which have non-volatile write protection bits. + + If the software write protection will be disabled depending on + the flash either the block protection bits are cleared or a + "Global Unprotect" command is issued. + + If you are unsure, select this option. + +config MTD_SPI_NOR_SWP_KEEP + bool "Keep software write protection as is" + help + If you select this option the software write protection of any + SPI flashes will not be changed. If your flash is software write + protected or will be automatically software write protected after + power-up you have to manually unlock it before you are able to + write to it. + +endchoice + +source "drivers/mtd/spi-nor/controllers/Kconfig" + +endif # MTD_SPI_NOR diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile new file mode 100644 index 0000000000..e347b435a0 --- /dev/null +++ b/drivers/mtd/spi-nor/Makefile @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: GPL-2.0 + +spi-nor-objs := core.o sfdp.o swp.o otp.o sysfs.o +spi-nor-objs += atmel.o +spi-nor-objs += catalyst.o +spi-nor-objs += eon.o +spi-nor-objs += esmt.o +spi-nor-objs += everspin.o +spi-nor-objs += fujitsu.o +spi-nor-objs += gigadevice.o +spi-nor-objs += intel.o +spi-nor-objs += issi.o +spi-nor-objs += macronix.o +spi-nor-objs += micron-st.o +spi-nor-objs += spansion.o +spi-nor-objs += sst.o +spi-nor-objs += winbond.o +spi-nor-objs += xilinx.o +spi-nor-objs += xmc.o +spi-nor-$(CONFIG_DEBUG_FS) += debugfs.o +obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o + +obj-$(CONFIG_MTD_SPI_NOR) += controllers/ diff --git a/drivers/mtd/spi-nor/atmel.c b/drivers/mtd/spi-nor/atmel.c new file mode 100644 index 0000000000..58968c1e7d --- /dev/null +++ b/drivers/mtd/spi-nor/atmel.c @@ -0,0 +1,215 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +#define ATMEL_SR_GLOBAL_PROTECT_MASK GENMASK(5, 2) + +/* + * The Atmel AT25FS010/AT25FS040 parts have some weird configuration for the + * block protection bits. We don't support them. But legacy behavior in linux + * is to unlock the whole flash array on startup. Therefore, we have to support + * exactly this operation. + */ +static int at25fs_nor_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) +{ + return -EOPNOTSUPP; +} + +static int at25fs_nor_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) +{ + int ret; + + /* We only support unlocking the whole flash array */ + if (ofs || len != nor->params->size) + return -EINVAL; + + /* Write 0x00 to the status register to disable write protection */ + ret = spi_nor_write_sr_and_check(nor, 0); + if (ret) + dev_dbg(nor->dev, "unable to clear BP bits, WP# asserted?\n"); + + return ret; +} + +static int at25fs_nor_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len) +{ + return -EOPNOTSUPP; +} + +static const struct spi_nor_locking_ops at25fs_nor_locking_ops = { + .lock = at25fs_nor_lock, + .unlock = at25fs_nor_unlock, + .is_locked = at25fs_nor_is_locked, +}; + +static int at25fs_nor_late_init(struct spi_nor *nor) +{ + nor->params->locking_ops = &at25fs_nor_locking_ops; + + return 0; +} + +static const struct spi_nor_fixups at25fs_nor_fixups = { + .late_init = at25fs_nor_late_init, +}; + +/** + * atmel_nor_set_global_protection - Do a Global Protect or Unprotect command + * @nor: pointer to 'struct spi_nor' + * @ofs: offset in bytes + * @len: len in bytes + * @is_protect: if true do a Global Protect otherwise it is a Global Unprotect + * + * Return: 0 on success, -error otherwise. + */ +static int atmel_nor_set_global_protection(struct spi_nor *nor, loff_t ofs, + uint64_t len, bool is_protect) +{ + int ret; + u8 sr; + + /* We only support locking the whole flash array */ + if (ofs || len != nor->params->size) + return -EINVAL; + + ret = spi_nor_read_sr(nor, nor->bouncebuf); + if (ret) + return ret; + + sr = nor->bouncebuf[0]; + + /* SRWD bit needs to be cleared, otherwise the protection doesn't change */ + if (sr & SR_SRWD) { + sr &= ~SR_SRWD; + ret = spi_nor_write_sr_and_check(nor, sr); + if (ret) { + dev_dbg(nor->dev, "unable to clear SRWD bit, WP# asserted?\n"); + return ret; + } + } + + if (is_protect) { + sr |= ATMEL_SR_GLOBAL_PROTECT_MASK; + /* + * Set the SRWD bit again as soon as we are protecting + * anything. This will ensure that the WP# pin is working + * correctly. By doing this we also behave the same as + * spi_nor_sr_lock(), which sets SRWD if any block protection + * is active. + */ + sr |= SR_SRWD; + } else { + sr &= ~ATMEL_SR_GLOBAL_PROTECT_MASK; + } + + nor->bouncebuf[0] = sr; + + /* + * We cannot use the spi_nor_write_sr_and_check() because this command + * isn't really setting any bits, instead it is an pseudo command for + * "Global Unprotect" or "Global Protect" + */ + return spi_nor_write_sr(nor, nor->bouncebuf, 1); +} + +static int atmel_nor_global_protect(struct spi_nor *nor, loff_t ofs, + uint64_t len) +{ + return atmel_nor_set_global_protection(nor, ofs, len, true); +} + +static int atmel_nor_global_unprotect(struct spi_nor *nor, loff_t ofs, + uint64_t len) +{ + return atmel_nor_set_global_protection(nor, ofs, len, false); +} + +static int atmel_nor_is_global_protected(struct spi_nor *nor, loff_t ofs, + uint64_t len) +{ + int ret; + + if (ofs >= nor->params->size || (ofs + len) > nor->params->size) + return -EINVAL; + + ret = spi_nor_read_sr(nor, nor->bouncebuf); + if (ret) + return ret; + + return ((nor->bouncebuf[0] & ATMEL_SR_GLOBAL_PROTECT_MASK) == ATMEL_SR_GLOBAL_PROTECT_MASK); +} + +static const struct spi_nor_locking_ops atmel_nor_global_protection_ops = { + .lock = atmel_nor_global_protect, + .unlock = atmel_nor_global_unprotect, + .is_locked = atmel_nor_is_global_protected, +}; + +static int atmel_nor_global_protection_late_init(struct spi_nor *nor) +{ + nor->params->locking_ops = &atmel_nor_global_protection_ops; + + return 0; +} + +static const struct spi_nor_fixups atmel_nor_global_protection_fixups = { + .late_init = atmel_nor_global_protection_late_init, +}; + +static const struct flash_info atmel_nor_parts[] = { + /* Atmel -- some are (confusingly) marketed as "DataFlash" */ + { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4) + FLAGS(SPI_NOR_HAS_LOCK) + NO_SFDP_FLAGS(SECT_4K) + .fixups = &at25fs_nor_fixups }, + { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8) + FLAGS(SPI_NOR_HAS_LOCK) + NO_SFDP_FLAGS(SECT_4K) + .fixups = &at25fs_nor_fixups }, + { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) + NO_SFDP_FLAGS(SECT_4K) + .fixups = &atmel_nor_global_protection_fixups }, + { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) + NO_SFDP_FLAGS(SECT_4K) + .fixups = &atmel_nor_global_protection_fixups }, + { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) + NO_SFDP_FLAGS(SECT_4K) + .fixups = &atmel_nor_global_protection_fixups }, + { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) + NO_SFDP_FLAGS(SECT_4K) + .fixups = &atmel_nor_global_protection_fixups }, + { "at25sl321", INFO(0x1f4216, 0, 64 * 1024, 64) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8) + NO_SFDP_FLAGS(SECT_4K) }, + { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) + NO_SFDP_FLAGS(SECT_4K) + .fixups = &atmel_nor_global_protection_fixups }, + { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) + NO_SFDP_FLAGS(SECT_4K) + .fixups = &atmel_nor_global_protection_fixups }, + { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) + NO_SFDP_FLAGS(SECT_4K) + .fixups = &atmel_nor_global_protection_fixups }, + { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16) + NO_SFDP_FLAGS(SECT_4K) }, +}; + +const struct spi_nor_manufacturer spi_nor_atmel = { + .name = "atmel", + .parts = atmel_nor_parts, + .nparts = ARRAY_SIZE(atmel_nor_parts), +}; diff --git a/drivers/mtd/spi-nor/catalyst.c b/drivers/mtd/spi-nor/catalyst.c new file mode 100644 index 0000000000..6d310815fb --- /dev/null +++ b/drivers/mtd/spi-nor/catalyst.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +static const struct flash_info catalyst_nor_parts[] = { + /* Catalyst / On Semiconductor -- non-JEDEC */ + { "cat25c11", CAT25_INFO(16, 8, 16, 1) }, + { "cat25c03", CAT25_INFO(32, 8, 16, 2) }, + { "cat25c09", CAT25_INFO(128, 8, 32, 2) }, + { "cat25c17", CAT25_INFO(256, 8, 32, 2) }, + { "cat25128", CAT25_INFO(2048, 8, 64, 2) }, +}; + +const struct spi_nor_manufacturer spi_nor_catalyst = { + .name = "catalyst", + .parts = catalyst_nor_parts, + .nparts = ARRAY_SIZE(catalyst_nor_parts), +}; diff --git a/drivers/mtd/spi-nor/controllers/Kconfig b/drivers/mtd/spi-nor/controllers/Kconfig new file mode 100644 index 0000000000..ca45dcd3ff --- /dev/null +++ b/drivers/mtd/spi-nor/controllers/Kconfig @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0-only +config SPI_HISI_SFC + tristate "Hisilicon FMC SPI NOR Flash Controller(SFC)" + depends on ARCH_HISI || COMPILE_TEST + depends on HAS_IOMEM + help + This enables support for HiSilicon FMC SPI NOR flash controller. + +config SPI_NXP_SPIFI + tristate "NXP SPI Flash Interface (SPIFI)" + depends on OF && (ARCH_LPC18XX || COMPILE_TEST) + depends on HAS_IOMEM + help + Enable support for the NXP LPC SPI Flash Interface controller. + + SPIFI is a specialized controller for connecting serial SPI + Flash. Enable this option if you have a device with a SPIFI + controller and want to access the Flash as a mtd device. diff --git a/drivers/mtd/spi-nor/controllers/Makefile b/drivers/mtd/spi-nor/controllers/Makefile new file mode 100644 index 0000000000..0b8e1d5309 --- /dev/null +++ b/drivers/mtd/spi-nor/controllers/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o +obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o diff --git a/drivers/mtd/spi-nor/controllers/hisi-sfc.c b/drivers/mtd/spi-nor/controllers/hisi-sfc.c new file mode 100644 index 0000000000..5070d72835 --- /dev/null +++ b/drivers/mtd/spi-nor/controllers/hisi-sfc.c @@ -0,0 +1,497 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * HiSilicon FMC SPI NOR flash controller driver + * + * Copyright (c) 2015-2016 HiSilicon Technologies Co., Ltd. + */ +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/spi-nor.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +/* Hardware register offsets and field definitions */ +#define FMC_CFG 0x00 +#define FMC_CFG_OP_MODE_MASK BIT_MASK(0) +#define FMC_CFG_OP_MODE_BOOT 0 +#define FMC_CFG_OP_MODE_NORMAL 1 +#define FMC_CFG_FLASH_SEL(type) (((type) & 0x3) << 1) +#define FMC_CFG_FLASH_SEL_MASK 0x6 +#define FMC_ECC_TYPE(type) (((type) & 0x7) << 5) +#define FMC_ECC_TYPE_MASK GENMASK(7, 5) +#define SPI_NOR_ADDR_MODE_MASK BIT_MASK(10) +#define SPI_NOR_ADDR_MODE_3BYTES (0x0 << 10) +#define SPI_NOR_ADDR_MODE_4BYTES (0x1 << 10) +#define FMC_GLOBAL_CFG 0x04 +#define FMC_GLOBAL_CFG_WP_ENABLE BIT(6) +#define FMC_SPI_TIMING_CFG 0x08 +#define TIMING_CFG_TCSH(nr) (((nr) & 0xf) << 8) +#define TIMING_CFG_TCSS(nr) (((nr) & 0xf) << 4) +#define TIMING_CFG_TSHSL(nr) ((nr) & 0xf) +#define CS_HOLD_TIME 0x6 +#define CS_SETUP_TIME 0x6 +#define CS_DESELECT_TIME 0xf +#define FMC_INT 0x18 +#define FMC_INT_OP_DONE BIT(0) +#define FMC_INT_CLR 0x20 +#define FMC_CMD 0x24 +#define FMC_CMD_CMD1(cmd) ((cmd) & 0xff) +#define FMC_ADDRL 0x2c +#define FMC_OP_CFG 0x30 +#define OP_CFG_FM_CS(cs) ((cs) << 11) +#define OP_CFG_MEM_IF_TYPE(type) (((type) & 0x7) << 7) +#define OP_CFG_ADDR_NUM(addr) (((addr) & 0x7) << 4) +#define OP_CFG_DUMMY_NUM(dummy) ((dummy) & 0xf) +#define FMC_DATA_NUM 0x38 +#define FMC_DATA_NUM_CNT(cnt) ((cnt) & GENMASK(13, 0)) +#define FMC_OP 0x3c +#define FMC_OP_DUMMY_EN BIT(8) +#define FMC_OP_CMD1_EN BIT(7) +#define FMC_OP_ADDR_EN BIT(6) +#define FMC_OP_WRITE_DATA_EN BIT(5) +#define FMC_OP_READ_DATA_EN BIT(2) +#define FMC_OP_READ_STATUS_EN BIT(1) +#define FMC_OP_REG_OP_START BIT(0) +#define FMC_DMA_LEN 0x40 +#define FMC_DMA_LEN_SET(len) ((len) & GENMASK(27, 0)) +#define FMC_DMA_SADDR_D0 0x4c +#define HIFMC_DMA_MAX_LEN (4096) +#define HIFMC_DMA_MASK (HIFMC_DMA_MAX_LEN - 1) +#define FMC_OP_DMA 0x68 +#define OP_CTRL_RD_OPCODE(code) (((code) & 0xff) << 16) +#define OP_CTRL_WR_OPCODE(code) (((code) & 0xff) << 8) +#define OP_CTRL_RW_OP(op) ((op) << 1) +#define OP_CTRL_DMA_OP_READY BIT(0) +#define FMC_OP_READ 0x0 +#define FMC_OP_WRITE 0x1 +#define FMC_WAIT_TIMEOUT 1000000 + +enum hifmc_iftype { + IF_TYPE_STD, + IF_TYPE_DUAL, + IF_TYPE_DIO, + IF_TYPE_QUAD, + IF_TYPE_QIO, +}; + +struct hifmc_priv { + u32 chipselect; + u32 clkrate; + struct hifmc_host *host; +}; + +#define HIFMC_MAX_CHIP_NUM 2 +struct hifmc_host { + struct device *dev; + struct mutex lock; + + void __iomem *regbase; + void __iomem *iobase; + struct clk *clk; + void *buffer; + dma_addr_t dma_buffer; + + struct spi_nor *nor[HIFMC_MAX_CHIP_NUM]; + u32 num_chip; +}; + +static inline int hisi_spi_nor_wait_op_finish(struct hifmc_host *host) +{ + u32 reg; + + return readl_poll_timeout(host->regbase + FMC_INT, reg, + (reg & FMC_INT_OP_DONE), 0, FMC_WAIT_TIMEOUT); +} + +static int hisi_spi_nor_get_if_type(enum spi_nor_protocol proto) +{ + enum hifmc_iftype if_type; + + switch (proto) { + case SNOR_PROTO_1_1_2: + if_type = IF_TYPE_DUAL; + break; + case SNOR_PROTO_1_2_2: + if_type = IF_TYPE_DIO; + break; + case SNOR_PROTO_1_1_4: + if_type = IF_TYPE_QUAD; + break; + case SNOR_PROTO_1_4_4: + if_type = IF_TYPE_QIO; + break; + case SNOR_PROTO_1_1_1: + default: + if_type = IF_TYPE_STD; + break; + } + + return if_type; +} + +static void hisi_spi_nor_init(struct hifmc_host *host) +{ + u32 reg; + + reg = TIMING_CFG_TCSH(CS_HOLD_TIME) + | TIMING_CFG_TCSS(CS_SETUP_TIME) + | TIMING_CFG_TSHSL(CS_DESELECT_TIME); + writel(reg, host->regbase + FMC_SPI_TIMING_CFG); +} + +static int hisi_spi_nor_prep(struct spi_nor *nor) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + int ret; + + mutex_lock(&host->lock); + + ret = clk_set_rate(host->clk, priv->clkrate); + if (ret) + goto out; + + ret = clk_prepare_enable(host->clk); + if (ret) + goto out; + + return 0; + +out: + mutex_unlock(&host->lock); + return ret; +} + +static void hisi_spi_nor_unprep(struct spi_nor *nor) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + + clk_disable_unprepare(host->clk); + mutex_unlock(&host->lock); +} + +static int hisi_spi_nor_op_reg(struct spi_nor *nor, + u8 opcode, size_t len, u8 optype) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + u32 reg; + + reg = FMC_CMD_CMD1(opcode); + writel(reg, host->regbase + FMC_CMD); + + reg = FMC_DATA_NUM_CNT(len); + writel(reg, host->regbase + FMC_DATA_NUM); + + reg = OP_CFG_FM_CS(priv->chipselect); + writel(reg, host->regbase + FMC_OP_CFG); + + writel(0xff, host->regbase + FMC_INT_CLR); + reg = FMC_OP_CMD1_EN | FMC_OP_REG_OP_START | optype; + writel(reg, host->regbase + FMC_OP); + + return hisi_spi_nor_wait_op_finish(host); +} + +static int hisi_spi_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, + size_t len) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + int ret; + + ret = hisi_spi_nor_op_reg(nor, opcode, len, FMC_OP_READ_DATA_EN); + if (ret) + return ret; + + memcpy_fromio(buf, host->iobase, len); + return 0; +} + +static int hisi_spi_nor_write_reg(struct spi_nor *nor, u8 opcode, + const u8 *buf, size_t len) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + + if (len) + memcpy_toio(host->iobase, buf, len); + + return hisi_spi_nor_op_reg(nor, opcode, len, FMC_OP_WRITE_DATA_EN); +} + +static int hisi_spi_nor_dma_transfer(struct spi_nor *nor, loff_t start_off, + dma_addr_t dma_buf, size_t len, u8 op_type) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + u8 if_type = 0; + u32 reg; + + reg = readl(host->regbase + FMC_CFG); + reg &= ~(FMC_CFG_OP_MODE_MASK | SPI_NOR_ADDR_MODE_MASK); + reg |= FMC_CFG_OP_MODE_NORMAL; + reg |= (nor->addr_nbytes == 4) ? SPI_NOR_ADDR_MODE_4BYTES + : SPI_NOR_ADDR_MODE_3BYTES; + writel(reg, host->regbase + FMC_CFG); + + writel(start_off, host->regbase + FMC_ADDRL); + writel(dma_buf, host->regbase + FMC_DMA_SADDR_D0); + writel(FMC_DMA_LEN_SET(len), host->regbase + FMC_DMA_LEN); + + reg = OP_CFG_FM_CS(priv->chipselect); + if (op_type == FMC_OP_READ) + if_type = hisi_spi_nor_get_if_type(nor->read_proto); + else + if_type = hisi_spi_nor_get_if_type(nor->write_proto); + reg |= OP_CFG_MEM_IF_TYPE(if_type); + if (op_type == FMC_OP_READ) + reg |= OP_CFG_DUMMY_NUM(nor->read_dummy >> 3); + writel(reg, host->regbase + FMC_OP_CFG); + + writel(0xff, host->regbase + FMC_INT_CLR); + reg = OP_CTRL_RW_OP(op_type) | OP_CTRL_DMA_OP_READY; + reg |= (op_type == FMC_OP_READ) + ? OP_CTRL_RD_OPCODE(nor->read_opcode) + : OP_CTRL_WR_OPCODE(nor->program_opcode); + writel(reg, host->regbase + FMC_OP_DMA); + + return hisi_spi_nor_wait_op_finish(host); +} + +static ssize_t hisi_spi_nor_read(struct spi_nor *nor, loff_t from, size_t len, + u_char *read_buf) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + size_t offset; + int ret; + + for (offset = 0; offset < len; offset += HIFMC_DMA_MAX_LEN) { + size_t trans = min_t(size_t, HIFMC_DMA_MAX_LEN, len - offset); + + ret = hisi_spi_nor_dma_transfer(nor, + from + offset, host->dma_buffer, trans, FMC_OP_READ); + if (ret) { + dev_warn(nor->dev, "DMA read timeout\n"); + return ret; + } + memcpy(read_buf + offset, host->buffer, trans); + } + + return len; +} + +static ssize_t hisi_spi_nor_write(struct spi_nor *nor, loff_t to, + size_t len, const u_char *write_buf) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + size_t offset; + int ret; + + for (offset = 0; offset < len; offset += HIFMC_DMA_MAX_LEN) { + size_t trans = min_t(size_t, HIFMC_DMA_MAX_LEN, len - offset); + + memcpy(host->buffer, write_buf + offset, trans); + ret = hisi_spi_nor_dma_transfer(nor, + to + offset, host->dma_buffer, trans, FMC_OP_WRITE); + if (ret) { + dev_warn(nor->dev, "DMA write timeout\n"); + return ret; + } + } + + return len; +} + +static const struct spi_nor_controller_ops hisi_controller_ops = { + .prepare = hisi_spi_nor_prep, + .unprepare = hisi_spi_nor_unprep, + .read_reg = hisi_spi_nor_read_reg, + .write_reg = hisi_spi_nor_write_reg, + .read = hisi_spi_nor_read, + .write = hisi_spi_nor_write, +}; + +/* + * Get spi flash device information and register it as a mtd device. + */ +static int hisi_spi_nor_register(struct device_node *np, + struct hifmc_host *host) +{ + const struct spi_nor_hwcaps hwcaps = { + .mask = SNOR_HWCAPS_READ | + SNOR_HWCAPS_READ_FAST | + SNOR_HWCAPS_READ_1_1_2 | + SNOR_HWCAPS_READ_1_1_4 | + SNOR_HWCAPS_PP, + }; + struct device *dev = host->dev; + struct spi_nor *nor; + struct hifmc_priv *priv; + struct mtd_info *mtd; + int ret; + + nor = devm_kzalloc(dev, sizeof(*nor), GFP_KERNEL); + if (!nor) + return -ENOMEM; + + nor->dev = dev; + spi_nor_set_flash_node(nor, np); + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + ret = of_property_read_u32(np, "reg", &priv->chipselect); + if (ret) { + dev_err(dev, "There's no reg property for %pOF\n", + np); + return ret; + } + + ret = of_property_read_u32(np, "spi-max-frequency", + &priv->clkrate); + if (ret) { + dev_err(dev, "There's no spi-max-frequency property for %pOF\n", + np); + return ret; + } + priv->host = host; + nor->priv = priv; + nor->controller_ops = &hisi_controller_ops; + + ret = spi_nor_scan(nor, NULL, &hwcaps); + if (ret) + return ret; + + mtd = &nor->mtd; + mtd->name = np->name; + ret = mtd_device_register(mtd, NULL, 0); + if (ret) + return ret; + + host->nor[host->num_chip] = nor; + host->num_chip++; + return 0; +} + +static void hisi_spi_nor_unregister_all(struct hifmc_host *host) +{ + int i; + + for (i = 0; i < host->num_chip; i++) + mtd_device_unregister(&host->nor[i]->mtd); +} + +static int hisi_spi_nor_register_all(struct hifmc_host *host) +{ + struct device *dev = host->dev; + struct device_node *np; + int ret; + + for_each_available_child_of_node(dev->of_node, np) { + ret = hisi_spi_nor_register(np, host); + if (ret) { + of_node_put(np); + goto fail; + } + + if (host->num_chip == HIFMC_MAX_CHIP_NUM) { + dev_warn(dev, "Flash device number exceeds the maximum chipselect number\n"); + of_node_put(np); + break; + } + } + + return 0; + +fail: + hisi_spi_nor_unregister_all(host); + return ret; +} + +static int hisi_spi_nor_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct hifmc_host *host; + int ret; + + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); + if (!host) + return -ENOMEM; + + platform_set_drvdata(pdev, host); + host->dev = dev; + + host->regbase = devm_platform_ioremap_resource_byname(pdev, "control"); + if (IS_ERR(host->regbase)) + return PTR_ERR(host->regbase); + + host->iobase = devm_platform_ioremap_resource_byname(pdev, "memory"); + if (IS_ERR(host->iobase)) + return PTR_ERR(host->iobase); + + host->clk = devm_clk_get(dev, NULL); + if (IS_ERR(host->clk)) + return PTR_ERR(host->clk); + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) { + dev_warn(dev, "Unable to set dma mask\n"); + return ret; + } + + host->buffer = dmam_alloc_coherent(dev, HIFMC_DMA_MAX_LEN, + &host->dma_buffer, GFP_KERNEL); + if (!host->buffer) + return -ENOMEM; + + ret = clk_prepare_enable(host->clk); + if (ret) + return ret; + + mutex_init(&host->lock); + hisi_spi_nor_init(host); + ret = hisi_spi_nor_register_all(host); + if (ret) + mutex_destroy(&host->lock); + + clk_disable_unprepare(host->clk); + return ret; +} + +static int hisi_spi_nor_remove(struct platform_device *pdev) +{ + struct hifmc_host *host = platform_get_drvdata(pdev); + + hisi_spi_nor_unregister_all(host); + mutex_destroy(&host->lock); + return 0; +} + +static const struct of_device_id hisi_spi_nor_dt_ids[] = { + { .compatible = "hisilicon,fmc-spi-nor"}, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, hisi_spi_nor_dt_ids); + +static struct platform_driver hisi_spi_nor_driver = { + .driver = { + .name = "hisi-sfc", + .of_match_table = hisi_spi_nor_dt_ids, + }, + .probe = hisi_spi_nor_probe, + .remove = hisi_spi_nor_remove, +}; +module_platform_driver(hisi_spi_nor_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("HiSilicon SPI Nor Flash Controller Driver"); diff --git a/drivers/mtd/spi-nor/controllers/nxp-spifi.c b/drivers/mtd/spi-nor/controllers/nxp-spifi.c new file mode 100644 index 0000000000..5d8f47ab14 --- /dev/null +++ b/drivers/mtd/spi-nor/controllers/nxp-spifi.c @@ -0,0 +1,461 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * SPI NOR driver for NXP SPI Flash Interface (SPIFI) + * + * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com> + * + * Based on Freescale QuadSPI driver: + * Copyright (C) 2013 Freescale Semiconductor, Inc. + */ + +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> +#include <linux/mtd/spi-nor.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> + +/* NXP SPIFI registers, bits and macros */ +#define SPIFI_CTRL 0x000 +#define SPIFI_CTRL_TIMEOUT(timeout) (timeout) +#define SPIFI_CTRL_CSHIGH(cshigh) ((cshigh) << 16) +#define SPIFI_CTRL_MODE3 BIT(23) +#define SPIFI_CTRL_DUAL BIT(28) +#define SPIFI_CTRL_FBCLK BIT(30) +#define SPIFI_CMD 0x004 +#define SPIFI_CMD_DATALEN(dlen) ((dlen) & 0x3fff) +#define SPIFI_CMD_DOUT BIT(15) +#define SPIFI_CMD_INTLEN(ilen) ((ilen) << 16) +#define SPIFI_CMD_FIELDFORM(field) ((field) << 19) +#define SPIFI_CMD_FIELDFORM_ALL_SERIAL SPIFI_CMD_FIELDFORM(0x0) +#define SPIFI_CMD_FIELDFORM_QUAD_DUAL_DATA SPIFI_CMD_FIELDFORM(0x1) +#define SPIFI_CMD_FRAMEFORM(frame) ((frame) << 21) +#define SPIFI_CMD_FRAMEFORM_OPCODE_ONLY SPIFI_CMD_FRAMEFORM(0x1) +#define SPIFI_CMD_OPCODE(op) ((op) << 24) +#define SPIFI_ADDR 0x008 +#define SPIFI_IDATA 0x00c +#define SPIFI_CLIMIT 0x010 +#define SPIFI_DATA 0x014 +#define SPIFI_MCMD 0x018 +#define SPIFI_STAT 0x01c +#define SPIFI_STAT_MCINIT BIT(0) +#define SPIFI_STAT_CMD BIT(1) +#define SPIFI_STAT_RESET BIT(4) + +#define SPI_NOR_MAX_ID_LEN 6 + +struct nxp_spifi { + struct device *dev; + struct clk *clk_spifi; + struct clk *clk_reg; + void __iomem *io_base; + void __iomem *flash_base; + struct spi_nor nor; + bool memory_mode; + u32 mcmd; +}; + +static int nxp_spifi_wait_for_cmd(struct nxp_spifi *spifi) +{ + u8 stat; + int ret; + + ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat, + !(stat & SPIFI_STAT_CMD), 10, 30); + if (ret) + dev_warn(spifi->dev, "command timed out\n"); + + return ret; +} + +static int nxp_spifi_reset(struct nxp_spifi *spifi) +{ + u8 stat; + int ret; + + writel(SPIFI_STAT_RESET, spifi->io_base + SPIFI_STAT); + ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat, + !(stat & SPIFI_STAT_RESET), 10, 30); + if (ret) + dev_warn(spifi->dev, "state reset timed out\n"); + + return ret; +} + +static int nxp_spifi_set_memory_mode_off(struct nxp_spifi *spifi) +{ + int ret; + + if (!spifi->memory_mode) + return 0; + + ret = nxp_spifi_reset(spifi); + if (ret) + dev_err(spifi->dev, "unable to enter command mode\n"); + else + spifi->memory_mode = false; + + return ret; +} + +static int nxp_spifi_set_memory_mode_on(struct nxp_spifi *spifi) +{ + u8 stat; + int ret; + + if (spifi->memory_mode) + return 0; + + writel(spifi->mcmd, spifi->io_base + SPIFI_MCMD); + ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat, + stat & SPIFI_STAT_MCINIT, 10, 30); + if (ret) + dev_err(spifi->dev, "unable to enter memory mode\n"); + else + spifi->memory_mode = true; + + return ret; +} + +static int nxp_spifi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, + size_t len) +{ + struct nxp_spifi *spifi = nor->priv; + u32 cmd; + int ret; + + ret = nxp_spifi_set_memory_mode_off(spifi); + if (ret) + return ret; + + cmd = SPIFI_CMD_DATALEN(len) | + SPIFI_CMD_OPCODE(opcode) | + SPIFI_CMD_FIELDFORM_ALL_SERIAL | + SPIFI_CMD_FRAMEFORM_OPCODE_ONLY; + writel(cmd, spifi->io_base + SPIFI_CMD); + + while (len--) + *buf++ = readb(spifi->io_base + SPIFI_DATA); + + return nxp_spifi_wait_for_cmd(spifi); +} + +static int nxp_spifi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf, + size_t len) +{ + struct nxp_spifi *spifi = nor->priv; + u32 cmd; + int ret; + + ret = nxp_spifi_set_memory_mode_off(spifi); + if (ret) + return ret; + + cmd = SPIFI_CMD_DOUT | + SPIFI_CMD_DATALEN(len) | + SPIFI_CMD_OPCODE(opcode) | + SPIFI_CMD_FIELDFORM_ALL_SERIAL | + SPIFI_CMD_FRAMEFORM_OPCODE_ONLY; + writel(cmd, spifi->io_base + SPIFI_CMD); + + while (len--) + writeb(*buf++, spifi->io_base + SPIFI_DATA); + + return nxp_spifi_wait_for_cmd(spifi); +} + +static ssize_t nxp_spifi_read(struct spi_nor *nor, loff_t from, size_t len, + u_char *buf) +{ + struct nxp_spifi *spifi = nor->priv; + int ret; + + ret = nxp_spifi_set_memory_mode_on(spifi); + if (ret) + return ret; + + memcpy_fromio(buf, spifi->flash_base + from, len); + + return len; +} + +static ssize_t nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len, + const u_char *buf) +{ + struct nxp_spifi *spifi = nor->priv; + u32 cmd; + int ret; + size_t i; + + ret = nxp_spifi_set_memory_mode_off(spifi); + if (ret) + return ret; + + writel(to, spifi->io_base + SPIFI_ADDR); + + cmd = SPIFI_CMD_DOUT | + SPIFI_CMD_DATALEN(len) | + SPIFI_CMD_FIELDFORM_ALL_SERIAL | + SPIFI_CMD_OPCODE(nor->program_opcode) | + SPIFI_CMD_FRAMEFORM(spifi->nor.addr_nbytes + 1); + writel(cmd, spifi->io_base + SPIFI_CMD); + + for (i = 0; i < len; i++) + writeb(buf[i], spifi->io_base + SPIFI_DATA); + + ret = nxp_spifi_wait_for_cmd(spifi); + if (ret) + return ret; + + return len; +} + +static int nxp_spifi_erase(struct spi_nor *nor, loff_t offs) +{ + struct nxp_spifi *spifi = nor->priv; + u32 cmd; + int ret; + + ret = nxp_spifi_set_memory_mode_off(spifi); + if (ret) + return ret; + + writel(offs, spifi->io_base + SPIFI_ADDR); + + cmd = SPIFI_CMD_FIELDFORM_ALL_SERIAL | + SPIFI_CMD_OPCODE(nor->erase_opcode) | + SPIFI_CMD_FRAMEFORM(spifi->nor.addr_nbytes + 1); + writel(cmd, spifi->io_base + SPIFI_CMD); + + return nxp_spifi_wait_for_cmd(spifi); +} + +static int nxp_spifi_setup_memory_cmd(struct nxp_spifi *spifi) +{ + switch (spifi->nor.read_proto) { + case SNOR_PROTO_1_1_1: + spifi->mcmd = SPIFI_CMD_FIELDFORM_ALL_SERIAL; + break; + case SNOR_PROTO_1_1_2: + case SNOR_PROTO_1_1_4: + spifi->mcmd = SPIFI_CMD_FIELDFORM_QUAD_DUAL_DATA; + break; + default: + dev_err(spifi->dev, "unsupported SPI read mode\n"); + return -EINVAL; + } + + /* Memory mode supports address length between 1 and 4 */ + if (spifi->nor.addr_nbytes < 1 || spifi->nor.addr_nbytes > 4) + return -EINVAL; + + spifi->mcmd |= SPIFI_CMD_OPCODE(spifi->nor.read_opcode) | + SPIFI_CMD_INTLEN(spifi->nor.read_dummy / 8) | + SPIFI_CMD_FRAMEFORM(spifi->nor.addr_nbytes + 1); + + return 0; +} + +static void nxp_spifi_dummy_id_read(struct spi_nor *nor) +{ + u8 id[SPI_NOR_MAX_ID_LEN]; + nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id, + SPI_NOR_MAX_ID_LEN); +} + +static const struct spi_nor_controller_ops nxp_spifi_controller_ops = { + .read_reg = nxp_spifi_read_reg, + .write_reg = nxp_spifi_write_reg, + .read = nxp_spifi_read, + .write = nxp_spifi_write, + .erase = nxp_spifi_erase, +}; + +static int nxp_spifi_setup_flash(struct nxp_spifi *spifi, + struct device_node *np) +{ + struct spi_nor_hwcaps hwcaps = { + .mask = SNOR_HWCAPS_READ | + SNOR_HWCAPS_READ_FAST | + SNOR_HWCAPS_PP, + }; + u32 ctrl, property; + u16 mode = 0; + int ret; + + if (!of_property_read_u32(np, "spi-rx-bus-width", &property)) { + switch (property) { + case 1: + break; + case 2: + mode |= SPI_RX_DUAL; + break; + case 4: + mode |= SPI_RX_QUAD; + break; + default: + dev_err(spifi->dev, "unsupported rx-bus-width\n"); + return -EINVAL; + } + } + + if (of_property_read_bool(np, "spi-cpha")) + mode |= SPI_CPHA; + + if (of_property_read_bool(np, "spi-cpol")) + mode |= SPI_CPOL; + + /* Setup control register defaults */ + ctrl = SPIFI_CTRL_TIMEOUT(1000) | + SPIFI_CTRL_CSHIGH(15) | + SPIFI_CTRL_FBCLK; + + if (mode & SPI_RX_DUAL) { + ctrl |= SPIFI_CTRL_DUAL; + hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2; + } else if (mode & SPI_RX_QUAD) { + ctrl &= ~SPIFI_CTRL_DUAL; + hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4; + } else { + ctrl |= SPIFI_CTRL_DUAL; + } + + switch (mode & SPI_MODE_X_MASK) { + case SPI_MODE_0: + ctrl &= ~SPIFI_CTRL_MODE3; + break; + case SPI_MODE_3: + ctrl |= SPIFI_CTRL_MODE3; + break; + default: + dev_err(spifi->dev, "only mode 0 and 3 supported\n"); + return -EINVAL; + } + + writel(ctrl, spifi->io_base + SPIFI_CTRL); + + spifi->nor.dev = spifi->dev; + spi_nor_set_flash_node(&spifi->nor, np); + spifi->nor.priv = spifi; + spifi->nor.controller_ops = &nxp_spifi_controller_ops; + + /* + * The first read on a hard reset isn't reliable so do a + * dummy read of the id before calling spi_nor_scan(). + * The reason for this problem is unknown. + * + * The official NXP spifilib uses more or less the same + * workaround that is applied here by reading the device + * id multiple times. + */ + nxp_spifi_dummy_id_read(&spifi->nor); + + ret = spi_nor_scan(&spifi->nor, NULL, &hwcaps); + if (ret) { + dev_err(spifi->dev, "device scan failed\n"); + return ret; + } + + ret = nxp_spifi_setup_memory_cmd(spifi); + if (ret) { + dev_err(spifi->dev, "memory command setup failed\n"); + return ret; + } + + ret = mtd_device_register(&spifi->nor.mtd, NULL, 0); + if (ret) { + dev_err(spifi->dev, "mtd device parse failed\n"); + return ret; + } + + return 0; +} + +static int nxp_spifi_probe(struct platform_device *pdev) +{ + struct device_node *flash_np; + struct nxp_spifi *spifi; + int ret; + + spifi = devm_kzalloc(&pdev->dev, sizeof(*spifi), GFP_KERNEL); + if (!spifi) + return -ENOMEM; + + spifi->io_base = devm_platform_ioremap_resource_byname(pdev, "spifi"); + if (IS_ERR(spifi->io_base)) + return PTR_ERR(spifi->io_base); + + spifi->flash_base = devm_platform_ioremap_resource_byname(pdev, "flash"); + if (IS_ERR(spifi->flash_base)) + return PTR_ERR(spifi->flash_base); + + spifi->clk_spifi = devm_clk_get_enabled(&pdev->dev, "spifi"); + if (IS_ERR(spifi->clk_spifi)) { + dev_err(&pdev->dev, "spifi clock not found or unable to enable\n"); + return PTR_ERR(spifi->clk_spifi); + } + + spifi->clk_reg = devm_clk_get_enabled(&pdev->dev, "reg"); + if (IS_ERR(spifi->clk_reg)) { + dev_err(&pdev->dev, "reg clock not found or unable to enable\n"); + return PTR_ERR(spifi->clk_reg); + } + + spifi->dev = &pdev->dev; + platform_set_drvdata(pdev, spifi); + + /* Initialize and reset device */ + nxp_spifi_reset(spifi); + writel(0, spifi->io_base + SPIFI_IDATA); + writel(0, spifi->io_base + SPIFI_MCMD); + nxp_spifi_reset(spifi); + + flash_np = of_get_next_available_child(pdev->dev.of_node, NULL); + if (!flash_np) { + dev_err(&pdev->dev, "no SPI flash device to configure\n"); + return -ENODEV; + } + + ret = nxp_spifi_setup_flash(spifi, flash_np); + of_node_put(flash_np); + if (ret) { + dev_err(&pdev->dev, "unable to setup flash chip\n"); + return ret; + } + + return 0; +} + +static int nxp_spifi_remove(struct platform_device *pdev) +{ + struct nxp_spifi *spifi = platform_get_drvdata(pdev); + + mtd_device_unregister(&spifi->nor.mtd); + + return 0; +} + +static const struct of_device_id nxp_spifi_match[] = { + {.compatible = "nxp,lpc1773-spifi"}, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, nxp_spifi_match); + +static struct platform_driver nxp_spifi_driver = { + .probe = nxp_spifi_probe, + .remove = nxp_spifi_remove, + .driver = { + .name = "nxp-spifi", + .of_match_table = nxp_spifi_match, + }, +}; +module_platform_driver(nxp_spifi_driver); + +MODULE_DESCRIPTION("NXP SPI Flash Interface driver"); +MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c new file mode 100644 index 0000000000..1b0c6770c1 --- /dev/null +++ b/drivers/mtd/spi-nor/core.c @@ -0,0 +1,3813 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with + * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c + * + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/math64.h> +#include <linux/module.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/spi-nor.h> +#include <linux/mutex.h> +#include <linux/of_platform.h> +#include <linux/sched/task_stack.h> +#include <linux/sizes.h> +#include <linux/slab.h> +#include <linux/spi/flash.h> + +#include "core.h" + +/* Define max times to check status register before we give up. */ + +/* + * For everything but full-chip erase; probably could be much smaller, but kept + * around for safety for now + */ +#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ) + +/* + * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up + * for larger flash + */ +#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ) + +#define SPI_NOR_MAX_ADDR_NBYTES 4 + +#define SPI_NOR_SRST_SLEEP_MIN 200 +#define SPI_NOR_SRST_SLEEP_MAX 400 + +/** + * spi_nor_get_cmd_ext() - Get the command opcode extension based on the + * extension type. + * @nor: pointer to a 'struct spi_nor' + * @op: pointer to the 'struct spi_mem_op' whose properties + * need to be initialized. + * + * Right now, only "repeat" and "invert" are supported. + * + * Return: The opcode extension. + */ +static u8 spi_nor_get_cmd_ext(const struct spi_nor *nor, + const struct spi_mem_op *op) +{ + switch (nor->cmd_ext_type) { + case SPI_NOR_EXT_INVERT: + return ~op->cmd.opcode; + + case SPI_NOR_EXT_REPEAT: + return op->cmd.opcode; + + default: + dev_err(nor->dev, "Unknown command extension type\n"); + return 0; + } +} + +/** + * spi_nor_spimem_setup_op() - Set up common properties of a spi-mem op. + * @nor: pointer to a 'struct spi_nor' + * @op: pointer to the 'struct spi_mem_op' whose properties + * need to be initialized. + * @proto: the protocol from which the properties need to be set. + */ +void spi_nor_spimem_setup_op(const struct spi_nor *nor, + struct spi_mem_op *op, + const enum spi_nor_protocol proto) +{ + u8 ext; + + op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(proto); + + if (op->addr.nbytes) + op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto); + + if (op->dummy.nbytes) + op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto); + + if (op->data.nbytes) + op->data.buswidth = spi_nor_get_protocol_data_nbits(proto); + + if (spi_nor_protocol_is_dtr(proto)) { + /* + * SPIMEM supports mixed DTR modes, but right now we can only + * have all phases either DTR or STR. IOW, SPIMEM can have + * something like 4S-4D-4D, but SPI NOR can't. So, set all 4 + * phases to either DTR or STR. + */ + op->cmd.dtr = true; + op->addr.dtr = true; + op->dummy.dtr = true; + op->data.dtr = true; + + /* 2 bytes per clock cycle in DTR mode. */ + op->dummy.nbytes *= 2; + + ext = spi_nor_get_cmd_ext(nor, op); + op->cmd.opcode = (op->cmd.opcode << 8) | ext; + op->cmd.nbytes = 2; + } +} + +/** + * spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data + * transfer + * @nor: pointer to 'struct spi_nor' + * @op: pointer to 'struct spi_mem_op' template for transfer + * + * If we have to use the bounce buffer, the data field in @op will be updated. + * + * Return: true if the bounce buffer is needed, false if not + */ +static bool spi_nor_spimem_bounce(struct spi_nor *nor, struct spi_mem_op *op) +{ + /* op->data.buf.in occupies the same memory as op->data.buf.out */ + if (object_is_on_stack(op->data.buf.in) || + !virt_addr_valid(op->data.buf.in)) { + if (op->data.nbytes > nor->bouncebuf_size) + op->data.nbytes = nor->bouncebuf_size; + op->data.buf.in = nor->bouncebuf; + return true; + } + + return false; +} + +/** + * spi_nor_spimem_exec_op() - execute a memory operation + * @nor: pointer to 'struct spi_nor' + * @op: pointer to 'struct spi_mem_op' template for transfer + * + * Return: 0 on success, -error otherwise. + */ +static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op) +{ + int error; + + error = spi_mem_adjust_op_size(nor->spimem, op); + if (error) + return error; + + return spi_mem_exec_op(nor->spimem, op); +} + +int spi_nor_controller_ops_read_reg(struct spi_nor *nor, u8 opcode, + u8 *buf, size_t len) +{ + if (spi_nor_protocol_is_dtr(nor->reg_proto)) + return -EOPNOTSUPP; + + return nor->controller_ops->read_reg(nor, opcode, buf, len); +} + +int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode, + const u8 *buf, size_t len) +{ + if (spi_nor_protocol_is_dtr(nor->reg_proto)) + return -EOPNOTSUPP; + + return nor->controller_ops->write_reg(nor, opcode, buf, len); +} + +static int spi_nor_controller_ops_erase(struct spi_nor *nor, loff_t offs) +{ + if (spi_nor_protocol_is_dtr(nor->reg_proto)) + return -EOPNOTSUPP; + + return nor->controller_ops->erase(nor, offs); +} + +/** + * spi_nor_spimem_read_data() - read data from flash's memory region via + * spi-mem + * @nor: pointer to 'struct spi_nor' + * @from: offset to read from + * @len: number of bytes to read + * @buf: pointer to dst buffer + * + * Return: number of bytes read successfully, -errno otherwise + */ +static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from, + size_t len, u8 *buf) +{ + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0), + SPI_MEM_OP_ADDR(nor->addr_nbytes, from, 0), + SPI_MEM_OP_DUMMY(nor->read_dummy, 0), + SPI_MEM_OP_DATA_IN(len, buf, 0)); + bool usebouncebuf; + ssize_t nbytes; + int error; + + spi_nor_spimem_setup_op(nor, &op, nor->read_proto); + + /* convert the dummy cycles to the number of bytes */ + op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8; + if (spi_nor_protocol_is_dtr(nor->read_proto)) + op.dummy.nbytes *= 2; + + usebouncebuf = spi_nor_spimem_bounce(nor, &op); + + if (nor->dirmap.rdesc) { + nbytes = spi_mem_dirmap_read(nor->dirmap.rdesc, op.addr.val, + op.data.nbytes, op.data.buf.in); + } else { + error = spi_nor_spimem_exec_op(nor, &op); + if (error) + return error; + nbytes = op.data.nbytes; + } + + if (usebouncebuf && nbytes > 0) + memcpy(buf, op.data.buf.in, nbytes); + + return nbytes; +} + +/** + * spi_nor_read_data() - read data from flash memory + * @nor: pointer to 'struct spi_nor' + * @from: offset to read from + * @len: number of bytes to read + * @buf: pointer to dst buffer + * + * Return: number of bytes read successfully, -errno otherwise + */ +ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf) +{ + if (nor->spimem) + return spi_nor_spimem_read_data(nor, from, len, buf); + + return nor->controller_ops->read(nor, from, len, buf); +} + +/** + * spi_nor_spimem_write_data() - write data to flash memory via + * spi-mem + * @nor: pointer to 'struct spi_nor' + * @to: offset to write to + * @len: number of bytes to write + * @buf: pointer to src buffer + * + * Return: number of bytes written successfully, -errno otherwise + */ +static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to, + size_t len, const u8 *buf) +{ + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0), + SPI_MEM_OP_ADDR(nor->addr_nbytes, to, 0), + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_OUT(len, buf, 0)); + ssize_t nbytes; + int error; + + if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second) + op.addr.nbytes = 0; + + spi_nor_spimem_setup_op(nor, &op, nor->write_proto); + + if (spi_nor_spimem_bounce(nor, &op)) + memcpy(nor->bouncebuf, buf, op.data.nbytes); + + if (nor->dirmap.wdesc) { + nbytes = spi_mem_dirmap_write(nor->dirmap.wdesc, op.addr.val, + op.data.nbytes, op.data.buf.out); + } else { + error = spi_nor_spimem_exec_op(nor, &op); + if (error) + return error; + nbytes = op.data.nbytes; + } + + return nbytes; +} + +/** + * spi_nor_write_data() - write data to flash memory + * @nor: pointer to 'struct spi_nor' + * @to: offset to write to + * @len: number of bytes to write + * @buf: pointer to src buffer + * + * Return: number of bytes written successfully, -errno otherwise + */ +ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len, + const u8 *buf) +{ + if (nor->spimem) + return spi_nor_spimem_write_data(nor, to, len, buf); + + return nor->controller_ops->write(nor, to, len, buf); +} + +/** + * spi_nor_read_any_reg() - read any register from flash memory, nonvolatile or + * volatile. + * @nor: pointer to 'struct spi_nor'. + * @op: SPI memory operation. op->data.buf must be DMA-able. + * @proto: SPI protocol to use for the register operation. + * + * Return: zero on success, -errno otherwise + */ +int spi_nor_read_any_reg(struct spi_nor *nor, struct spi_mem_op *op, + enum spi_nor_protocol proto) +{ + if (!nor->spimem) + return -EOPNOTSUPP; + + spi_nor_spimem_setup_op(nor, op, proto); + return spi_nor_spimem_exec_op(nor, op); +} + +/** + * spi_nor_write_any_volatile_reg() - write any volatile register to flash + * memory. + * @nor: pointer to 'struct spi_nor' + * @op: SPI memory operation. op->data.buf must be DMA-able. + * @proto: SPI protocol to use for the register operation. + * + * Writing volatile registers are instant according to some manufacturers + * (Cypress, Micron) and do not need any status polling. + * + * Return: zero on success, -errno otherwise + */ +int spi_nor_write_any_volatile_reg(struct spi_nor *nor, struct spi_mem_op *op, + enum spi_nor_protocol proto) +{ + int ret; + + if (!nor->spimem) + return -EOPNOTSUPP; + + ret = spi_nor_write_enable(nor); + if (ret) + return ret; + spi_nor_spimem_setup_op(nor, op, proto); + return spi_nor_spimem_exec_op(nor, op); +} + +/** + * spi_nor_write_enable() - Set write enable latch with Write Enable command. + * @nor: pointer to 'struct spi_nor'. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_write_enable(struct spi_nor *nor) +{ + int ret; + + if (nor->spimem) { + struct spi_mem_op op = SPI_NOR_WREN_OP; + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREN, + NULL, 0); + } + + if (ret) + dev_dbg(nor->dev, "error %d on Write Enable\n", ret); + + return ret; +} + +/** + * spi_nor_write_disable() - Send Write Disable instruction to the chip. + * @nor: pointer to 'struct spi_nor'. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_write_disable(struct spi_nor *nor) +{ + int ret; + + if (nor->spimem) { + struct spi_mem_op op = SPI_NOR_WRDI_OP; + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRDI, + NULL, 0); + } + + if (ret) + dev_dbg(nor->dev, "error %d on Write Disable\n", ret); + + return ret; +} + +/** + * spi_nor_read_id() - Read the JEDEC ID. + * @nor: pointer to 'struct spi_nor'. + * @naddr: number of address bytes to send. Can be zero if the operation + * does not need to send an address. + * @ndummy: number of dummy bytes to send after an opcode or address. Can + * be zero if the operation does not require dummy bytes. + * @id: pointer to a DMA-able buffer where the value of the JEDEC ID + * will be written. + * @proto: the SPI protocol for register operation. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_read_id(struct spi_nor *nor, u8 naddr, u8 ndummy, u8 *id, + enum spi_nor_protocol proto) +{ + int ret; + + if (nor->spimem) { + struct spi_mem_op op = + SPI_NOR_READID_OP(naddr, ndummy, id, SPI_NOR_MAX_ID_LEN); + + spi_nor_spimem_setup_op(nor, &op, proto); + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id, + SPI_NOR_MAX_ID_LEN); + } + return ret; +} + +/** + * spi_nor_read_sr() - Read the Status Register. + * @nor: pointer to 'struct spi_nor'. + * @sr: pointer to a DMA-able buffer where the value of the + * Status Register will be written. Should be at least 2 bytes. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_read_sr(struct spi_nor *nor, u8 *sr) +{ + int ret; + + if (nor->spimem) { + struct spi_mem_op op = SPI_NOR_RDSR_OP(sr); + + if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) { + op.addr.nbytes = nor->params->rdsr_addr_nbytes; + op.dummy.nbytes = nor->params->rdsr_dummy; + /* + * We don't want to read only one byte in DTR mode. So, + * read 2 and then discard the second byte. + */ + op.data.nbytes = 2; + } + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR, sr, + 1); + } + + if (ret) + dev_dbg(nor->dev, "error %d reading SR\n", ret); + + return ret; +} + +/** + * spi_nor_read_cr() - Read the Configuration Register using the + * SPINOR_OP_RDCR (35h) command. + * @nor: pointer to 'struct spi_nor' + * @cr: pointer to a DMA-able buffer where the value of the + * Configuration Register will be written. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_read_cr(struct spi_nor *nor, u8 *cr) +{ + int ret; + + if (nor->spimem) { + struct spi_mem_op op = SPI_NOR_RDCR_OP(cr); + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDCR, cr, + 1); + } + + if (ret) + dev_dbg(nor->dev, "error %d reading CR\n", ret); + + return ret; +} + +/** + * spi_nor_set_4byte_addr_mode_en4b_ex4b() - Enter/Exit 4-byte address mode + * using SPINOR_OP_EN4B/SPINOR_OP_EX4B. Typically used by + * Winbond and Macronix. + * @nor: pointer to 'struct spi_nor'. + * @enable: true to enter the 4-byte address mode, false to exit the 4-byte + * address mode. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_set_4byte_addr_mode_en4b_ex4b(struct spi_nor *nor, bool enable) +{ + int ret; + + if (nor->spimem) { + struct spi_mem_op op = SPI_NOR_EN4B_EX4B_OP(enable); + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = spi_nor_controller_ops_write_reg(nor, + enable ? SPINOR_OP_EN4B : + SPINOR_OP_EX4B, + NULL, 0); + } + + if (ret) + dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret); + + return ret; +} + +/** + * spi_nor_set_4byte_addr_mode_wren_en4b_ex4b() - Set 4-byte address mode using + * SPINOR_OP_WREN followed by SPINOR_OP_EN4B or SPINOR_OP_EX4B. Typically used + * by ST and Micron flashes. + * @nor: pointer to 'struct spi_nor'. + * @enable: true to enter the 4-byte address mode, false to exit the 4-byte + * address mode. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_set_4byte_addr_mode_wren_en4b_ex4b(struct spi_nor *nor, bool enable) +{ + int ret; + + ret = spi_nor_write_enable(nor); + if (ret) + return ret; + + ret = spi_nor_set_4byte_addr_mode_en4b_ex4b(nor, enable); + if (ret) + return ret; + + return spi_nor_write_disable(nor); +} + +/** + * spi_nor_set_4byte_addr_mode_brwr() - Set 4-byte address mode using + * SPINOR_OP_BRWR. Typically used by Spansion flashes. + * @nor: pointer to 'struct spi_nor'. + * @enable: true to enter the 4-byte address mode, false to exit the 4-byte + * address mode. + * + * 8-bit volatile bank register used to define A[30:A24] bits. MSB (bit[7]) is + * used to enable/disable 4-byte address mode. When MSB is set to ‘1’, 4-byte + * address mode is active and A[30:24] bits are don’t care. Write instruction is + * SPINOR_OP_BRWR(17h) with 1 byte of data. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_set_4byte_addr_mode_brwr(struct spi_nor *nor, bool enable) +{ + int ret; + + nor->bouncebuf[0] = enable << 7; + + if (nor->spimem) { + struct spi_mem_op op = SPI_NOR_BRWR_OP(nor->bouncebuf); + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_BRWR, + nor->bouncebuf, 1); + } + + if (ret) + dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret); + + return ret; +} + +/** + * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready + * for new commands. + * @nor: pointer to 'struct spi_nor'. + * + * Return: 1 if ready, 0 if not ready, -errno on errors. + */ +int spi_nor_sr_ready(struct spi_nor *nor) +{ + int ret; + + ret = spi_nor_read_sr(nor, nor->bouncebuf); + if (ret) + return ret; + + return !(nor->bouncebuf[0] & SR_WIP); +} + +/** + * spi_nor_use_parallel_locking() - Checks if RWW locking scheme shall be used + * @nor: pointer to 'struct spi_nor'. + * + * Return: true if parallel locking is enabled, false otherwise. + */ +static bool spi_nor_use_parallel_locking(struct spi_nor *nor) +{ + return nor->flags & SNOR_F_RWW; +} + +/* Locking helpers for status read operations */ +static int spi_nor_rww_start_rdst(struct spi_nor *nor) +{ + struct spi_nor_rww *rww = &nor->rww; + int ret = -EAGAIN; + + mutex_lock(&nor->lock); + + if (rww->ongoing_io || rww->ongoing_rd) + goto busy; + + rww->ongoing_io = true; + rww->ongoing_rd = true; + ret = 0; + +busy: + mutex_unlock(&nor->lock); + return ret; +} + +static void spi_nor_rww_end_rdst(struct spi_nor *nor) +{ + struct spi_nor_rww *rww = &nor->rww; + + mutex_lock(&nor->lock); + + rww->ongoing_io = false; + rww->ongoing_rd = false; + + mutex_unlock(&nor->lock); +} + +static int spi_nor_lock_rdst(struct spi_nor *nor) +{ + if (spi_nor_use_parallel_locking(nor)) + return spi_nor_rww_start_rdst(nor); + + return 0; +} + +static void spi_nor_unlock_rdst(struct spi_nor *nor) +{ + if (spi_nor_use_parallel_locking(nor)) { + spi_nor_rww_end_rdst(nor); + wake_up(&nor->rww.wait); + } +} + +/** + * spi_nor_ready() - Query the flash to see if it is ready for new commands. + * @nor: pointer to 'struct spi_nor'. + * + * Return: 1 if ready, 0 if not ready, -errno on errors. + */ +static int spi_nor_ready(struct spi_nor *nor) +{ + int ret; + + ret = spi_nor_lock_rdst(nor); + if (ret) + return 0; + + /* Flashes might override the standard routine. */ + if (nor->params->ready) + ret = nor->params->ready(nor); + else + ret = spi_nor_sr_ready(nor); + + spi_nor_unlock_rdst(nor); + + return ret; +} + +/** + * spi_nor_wait_till_ready_with_timeout() - Service routine to read the + * Status Register until ready, or timeout occurs. + * @nor: pointer to "struct spi_nor". + * @timeout_jiffies: jiffies to wait until timeout. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor, + unsigned long timeout_jiffies) +{ + unsigned long deadline; + int timeout = 0, ret; + + deadline = jiffies + timeout_jiffies; + + while (!timeout) { + if (time_after_eq(jiffies, deadline)) + timeout = 1; + + ret = spi_nor_ready(nor); + if (ret < 0) + return ret; + if (ret) + return 0; + + cond_resched(); + } + + dev_dbg(nor->dev, "flash operation timed out\n"); + + return -ETIMEDOUT; +} + +/** + * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the + * flash to be ready, or timeout occurs. + * @nor: pointer to "struct spi_nor". + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_wait_till_ready(struct spi_nor *nor) +{ + return spi_nor_wait_till_ready_with_timeout(nor, + DEFAULT_READY_WAIT_JIFFIES); +} + +/** + * spi_nor_global_block_unlock() - Unlock Global Block Protection. + * @nor: pointer to 'struct spi_nor'. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_global_block_unlock(struct spi_nor *nor) +{ + int ret; + + ret = spi_nor_write_enable(nor); + if (ret) + return ret; + + if (nor->spimem) { + struct spi_mem_op op = SPI_NOR_GBULK_OP; + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_GBULK, + NULL, 0); + } + + if (ret) { + dev_dbg(nor->dev, "error %d on Global Block Unlock\n", ret); + return ret; + } + + return spi_nor_wait_till_ready(nor); +} + +/** + * spi_nor_write_sr() - Write the Status Register. + * @nor: pointer to 'struct spi_nor'. + * @sr: pointer to DMA-able buffer to write to the Status Register. + * @len: number of bytes to write to the Status Register. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len) +{ + int ret; + + ret = spi_nor_write_enable(nor); + if (ret) + return ret; + + if (nor->spimem) { + struct spi_mem_op op = SPI_NOR_WRSR_OP(sr, len); + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR, sr, + len); + } + + if (ret) { + dev_dbg(nor->dev, "error %d writing SR\n", ret); + return ret; + } + + return spi_nor_wait_till_ready(nor); +} + +/** + * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and + * ensure that the byte written match the received value. + * @nor: pointer to a 'struct spi_nor'. + * @sr1: byte value to be written to the Status Register. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1) +{ + int ret; + + nor->bouncebuf[0] = sr1; + + ret = spi_nor_write_sr(nor, nor->bouncebuf, 1); + if (ret) + return ret; + + ret = spi_nor_read_sr(nor, nor->bouncebuf); + if (ret) + return ret; + + if (nor->bouncebuf[0] != sr1) { + dev_dbg(nor->dev, "SR1: read back test failed\n"); + return -EIO; + } + + return 0; +} + +/** + * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the + * Status Register 2 in one shot. Ensure that the byte written in the Status + * Register 1 match the received value, and that the 16-bit Write did not + * affect what was already in the Status Register 2. + * @nor: pointer to a 'struct spi_nor'. + * @sr1: byte value to be written to the Status Register 1. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1) +{ + int ret; + u8 *sr_cr = nor->bouncebuf; + u8 cr_written; + + /* Make sure we don't overwrite the contents of Status Register 2. */ + if (!(nor->flags & SNOR_F_NO_READ_CR)) { + ret = spi_nor_read_cr(nor, &sr_cr[1]); + if (ret) + return ret; + } else if (spi_nor_get_protocol_width(nor->read_proto) == 4 && + spi_nor_get_protocol_width(nor->write_proto) == 4 && + nor->params->quad_enable) { + /* + * If the Status Register 2 Read command (35h) is not + * supported, we should at least be sure we don't + * change the value of the SR2 Quad Enable bit. + * + * When the Quad Enable method is set and the buswidth is 4, we + * can safely assume that the value of the QE bit is one, as a + * consequence of the nor->params->quad_enable() call. + * + * According to the JESD216 revB standard, BFPT DWORDS[15], + * bits 22:20, the 16-bit Write Status (01h) command is + * available just for the cases in which the QE bit is + * described in SR2 at BIT(1). + */ + sr_cr[1] = SR2_QUAD_EN_BIT1; + } else { + sr_cr[1] = 0; + } + + sr_cr[0] = sr1; + + ret = spi_nor_write_sr(nor, sr_cr, 2); + if (ret) + return ret; + + ret = spi_nor_read_sr(nor, sr_cr); + if (ret) + return ret; + + if (sr1 != sr_cr[0]) { + dev_dbg(nor->dev, "SR: Read back test failed\n"); + return -EIO; + } + + if (nor->flags & SNOR_F_NO_READ_CR) + return 0; + + cr_written = sr_cr[1]; + + ret = spi_nor_read_cr(nor, &sr_cr[1]); + if (ret) + return ret; + + if (cr_written != sr_cr[1]) { + dev_dbg(nor->dev, "CR: read back test failed\n"); + return -EIO; + } + + return 0; +} + +/** + * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the + * Configuration Register in one shot. Ensure that the byte written in the + * Configuration Register match the received value, and that the 16-bit Write + * did not affect what was already in the Status Register 1. + * @nor: pointer to a 'struct spi_nor'. + * @cr: byte value to be written to the Configuration Register. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr) +{ + int ret; + u8 *sr_cr = nor->bouncebuf; + u8 sr_written; + + /* Keep the current value of the Status Register 1. */ + ret = spi_nor_read_sr(nor, sr_cr); + if (ret) + return ret; + + sr_cr[1] = cr; + + ret = spi_nor_write_sr(nor, sr_cr, 2); + if (ret) + return ret; + + sr_written = sr_cr[0]; + + ret = spi_nor_read_sr(nor, sr_cr); + if (ret) + return ret; + + if (sr_written != sr_cr[0]) { + dev_dbg(nor->dev, "SR: Read back test failed\n"); + return -EIO; + } + + if (nor->flags & SNOR_F_NO_READ_CR) + return 0; + + ret = spi_nor_read_cr(nor, &sr_cr[1]); + if (ret) + return ret; + + if (cr != sr_cr[1]) { + dev_dbg(nor->dev, "CR: read back test failed\n"); + return -EIO; + } + + return 0; +} + +/** + * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that + * the byte written match the received value without affecting other bits in the + * Status Register 1 and 2. + * @nor: pointer to a 'struct spi_nor'. + * @sr1: byte value to be written to the Status Register. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1) +{ + if (nor->flags & SNOR_F_HAS_16BIT_SR) + return spi_nor_write_16bit_sr_and_check(nor, sr1); + + return spi_nor_write_sr1_and_check(nor, sr1); +} + +/** + * spi_nor_write_sr2() - Write the Status Register 2 using the + * SPINOR_OP_WRSR2 (3eh) command. + * @nor: pointer to 'struct spi_nor'. + * @sr2: pointer to DMA-able buffer to write to the Status Register 2. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2) +{ + int ret; + + ret = spi_nor_write_enable(nor); + if (ret) + return ret; + + if (nor->spimem) { + struct spi_mem_op op = SPI_NOR_WRSR2_OP(sr2); + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR2, + sr2, 1); + } + + if (ret) { + dev_dbg(nor->dev, "error %d writing SR2\n", ret); + return ret; + } + + return spi_nor_wait_till_ready(nor); +} + +/** + * spi_nor_read_sr2() - Read the Status Register 2 using the + * SPINOR_OP_RDSR2 (3fh) command. + * @nor: pointer to 'struct spi_nor'. + * @sr2: pointer to DMA-able buffer where the value of the + * Status Register 2 will be written. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2) +{ + int ret; + + if (nor->spimem) { + struct spi_mem_op op = SPI_NOR_RDSR2_OP(sr2); + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR2, sr2, + 1); + } + + if (ret) + dev_dbg(nor->dev, "error %d reading SR2\n", ret); + + return ret; +} + +/** + * spi_nor_erase_chip() - Erase the entire flash memory. + * @nor: pointer to 'struct spi_nor'. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_erase_chip(struct spi_nor *nor) +{ + int ret; + + dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10)); + + if (nor->spimem) { + struct spi_mem_op op = SPI_NOR_CHIP_ERASE_OP; + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = spi_nor_controller_ops_write_reg(nor, + SPINOR_OP_CHIP_ERASE, + NULL, 0); + } + + if (ret) + dev_dbg(nor->dev, "error %d erasing chip\n", ret); + + return ret; +} + +static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size) +{ + size_t i; + + for (i = 0; i < size; i++) + if (table[i][0] == opcode) + return table[i][1]; + + /* No conversion found, keep input op code. */ + return opcode; +} + +u8 spi_nor_convert_3to4_read(u8 opcode) +{ + static const u8 spi_nor_3to4_read[][2] = { + { SPINOR_OP_READ, SPINOR_OP_READ_4B }, + { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B }, + { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B }, + { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B }, + { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B }, + { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B }, + { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B }, + { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B }, + + { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B }, + { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B }, + { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B }, + }; + + return spi_nor_convert_opcode(opcode, spi_nor_3to4_read, + ARRAY_SIZE(spi_nor_3to4_read)); +} + +static u8 spi_nor_convert_3to4_program(u8 opcode) +{ + static const u8 spi_nor_3to4_program[][2] = { + { SPINOR_OP_PP, SPINOR_OP_PP_4B }, + { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B }, + { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B }, + { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B }, + { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B }, + }; + + return spi_nor_convert_opcode(opcode, spi_nor_3to4_program, + ARRAY_SIZE(spi_nor_3to4_program)); +} + +static u8 spi_nor_convert_3to4_erase(u8 opcode) +{ + static const u8 spi_nor_3to4_erase[][2] = { + { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B }, + { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B }, + { SPINOR_OP_SE, SPINOR_OP_SE_4B }, + }; + + return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase, + ARRAY_SIZE(spi_nor_3to4_erase)); +} + +static bool spi_nor_has_uniform_erase(const struct spi_nor *nor) +{ + return !!nor->params->erase_map.uniform_erase_type; +} + +static void spi_nor_set_4byte_opcodes(struct spi_nor *nor) +{ + nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode); + nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode); + nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode); + + if (!spi_nor_has_uniform_erase(nor)) { + struct spi_nor_erase_map *map = &nor->params->erase_map; + struct spi_nor_erase_type *erase; + int i; + + for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { + erase = &map->erase_type[i]; + erase->opcode = + spi_nor_convert_3to4_erase(erase->opcode); + } + } +} + +static int spi_nor_prep(struct spi_nor *nor) +{ + int ret = 0; + + if (nor->controller_ops && nor->controller_ops->prepare) + ret = nor->controller_ops->prepare(nor); + + return ret; +} + +static void spi_nor_unprep(struct spi_nor *nor) +{ + if (nor->controller_ops && nor->controller_ops->unprepare) + nor->controller_ops->unprepare(nor); +} + +static void spi_nor_offset_to_banks(u64 bank_size, loff_t start, size_t len, + u8 *first, u8 *last) +{ + /* This is currently safe, the number of banks being very small */ + *first = DIV_ROUND_DOWN_ULL(start, bank_size); + *last = DIV_ROUND_DOWN_ULL(start + len - 1, bank_size); +} + +/* Generic helpers for internal locking and serialization */ +static bool spi_nor_rww_start_io(struct spi_nor *nor) +{ + struct spi_nor_rww *rww = &nor->rww; + bool start = false; + + mutex_lock(&nor->lock); + + if (rww->ongoing_io) + goto busy; + + rww->ongoing_io = true; + start = true; + +busy: + mutex_unlock(&nor->lock); + return start; +} + +static void spi_nor_rww_end_io(struct spi_nor *nor) +{ + mutex_lock(&nor->lock); + nor->rww.ongoing_io = false; + mutex_unlock(&nor->lock); +} + +static int spi_nor_lock_device(struct spi_nor *nor) +{ + if (!spi_nor_use_parallel_locking(nor)) + return 0; + + return wait_event_killable(nor->rww.wait, spi_nor_rww_start_io(nor)); +} + +static void spi_nor_unlock_device(struct spi_nor *nor) +{ + if (spi_nor_use_parallel_locking(nor)) { + spi_nor_rww_end_io(nor); + wake_up(&nor->rww.wait); + } +} + +/* Generic helpers for internal locking and serialization */ +static bool spi_nor_rww_start_exclusive(struct spi_nor *nor) +{ + struct spi_nor_rww *rww = &nor->rww; + bool start = false; + + mutex_lock(&nor->lock); + + if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe) + goto busy; + + rww->ongoing_io = true; + rww->ongoing_rd = true; + rww->ongoing_pe = true; + start = true; + +busy: + mutex_unlock(&nor->lock); + return start; +} + +static void spi_nor_rww_end_exclusive(struct spi_nor *nor) +{ + struct spi_nor_rww *rww = &nor->rww; + + mutex_lock(&nor->lock); + rww->ongoing_io = false; + rww->ongoing_rd = false; + rww->ongoing_pe = false; + mutex_unlock(&nor->lock); +} + +int spi_nor_prep_and_lock(struct spi_nor *nor) +{ + int ret; + + ret = spi_nor_prep(nor); + if (ret) + return ret; + + if (!spi_nor_use_parallel_locking(nor)) + mutex_lock(&nor->lock); + else + ret = wait_event_killable(nor->rww.wait, + spi_nor_rww_start_exclusive(nor)); + + return ret; +} + +void spi_nor_unlock_and_unprep(struct spi_nor *nor) +{ + if (!spi_nor_use_parallel_locking(nor)) { + mutex_unlock(&nor->lock); + } else { + spi_nor_rww_end_exclusive(nor); + wake_up(&nor->rww.wait); + } + + spi_nor_unprep(nor); +} + +/* Internal locking helpers for program and erase operations */ +static bool spi_nor_rww_start_pe(struct spi_nor *nor, loff_t start, size_t len) +{ + struct spi_nor_rww *rww = &nor->rww; + unsigned int used_banks = 0; + bool started = false; + u8 first, last; + int bank; + + mutex_lock(&nor->lock); + + if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe) + goto busy; + + spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last); + for (bank = first; bank <= last; bank++) { + if (rww->used_banks & BIT(bank)) + goto busy; + + used_banks |= BIT(bank); + } + + rww->used_banks |= used_banks; + rww->ongoing_pe = true; + started = true; + +busy: + mutex_unlock(&nor->lock); + return started; +} + +static void spi_nor_rww_end_pe(struct spi_nor *nor, loff_t start, size_t len) +{ + struct spi_nor_rww *rww = &nor->rww; + u8 first, last; + int bank; + + mutex_lock(&nor->lock); + + spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last); + for (bank = first; bank <= last; bank++) + rww->used_banks &= ~BIT(bank); + + rww->ongoing_pe = false; + + mutex_unlock(&nor->lock); +} + +static int spi_nor_prep_and_lock_pe(struct spi_nor *nor, loff_t start, size_t len) +{ + int ret; + + ret = spi_nor_prep(nor); + if (ret) + return ret; + + if (!spi_nor_use_parallel_locking(nor)) + mutex_lock(&nor->lock); + else + ret = wait_event_killable(nor->rww.wait, + spi_nor_rww_start_pe(nor, start, len)); + + return ret; +} + +static void spi_nor_unlock_and_unprep_pe(struct spi_nor *nor, loff_t start, size_t len) +{ + if (!spi_nor_use_parallel_locking(nor)) { + mutex_unlock(&nor->lock); + } else { + spi_nor_rww_end_pe(nor, start, len); + wake_up(&nor->rww.wait); + } + + spi_nor_unprep(nor); +} + +/* Internal locking helpers for read operations */ +static bool spi_nor_rww_start_rd(struct spi_nor *nor, loff_t start, size_t len) +{ + struct spi_nor_rww *rww = &nor->rww; + unsigned int used_banks = 0; + bool started = false; + u8 first, last; + int bank; + + mutex_lock(&nor->lock); + + if (rww->ongoing_io || rww->ongoing_rd) + goto busy; + + spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last); + for (bank = first; bank <= last; bank++) { + if (rww->used_banks & BIT(bank)) + goto busy; + + used_banks |= BIT(bank); + } + + rww->used_banks |= used_banks; + rww->ongoing_io = true; + rww->ongoing_rd = true; + started = true; + +busy: + mutex_unlock(&nor->lock); + return started; +} + +static void spi_nor_rww_end_rd(struct spi_nor *nor, loff_t start, size_t len) +{ + struct spi_nor_rww *rww = &nor->rww; + u8 first, last; + int bank; + + mutex_lock(&nor->lock); + + spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last); + for (bank = first; bank <= last; bank++) + nor->rww.used_banks &= ~BIT(bank); + + rww->ongoing_io = false; + rww->ongoing_rd = false; + + mutex_unlock(&nor->lock); +} + +static int spi_nor_prep_and_lock_rd(struct spi_nor *nor, loff_t start, size_t len) +{ + int ret; + + ret = spi_nor_prep(nor); + if (ret) + return ret; + + if (!spi_nor_use_parallel_locking(nor)) + mutex_lock(&nor->lock); + else + ret = wait_event_killable(nor->rww.wait, + spi_nor_rww_start_rd(nor, start, len)); + + return ret; +} + +static void spi_nor_unlock_and_unprep_rd(struct spi_nor *nor, loff_t start, size_t len) +{ + if (!spi_nor_use_parallel_locking(nor)) { + mutex_unlock(&nor->lock); + } else { + spi_nor_rww_end_rd(nor, start, len); + wake_up(&nor->rww.wait); + } + + spi_nor_unprep(nor); +} + +static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr) +{ + if (!nor->params->convert_addr) + return addr; + + return nor->params->convert_addr(nor, addr); +} + +/* + * Initiate the erasure of a single sector + */ +int spi_nor_erase_sector(struct spi_nor *nor, u32 addr) +{ + int i; + + addr = spi_nor_convert_addr(nor, addr); + + if (nor->spimem) { + struct spi_mem_op op = + SPI_NOR_SECTOR_ERASE_OP(nor->erase_opcode, + nor->addr_nbytes, addr); + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + return spi_mem_exec_op(nor->spimem, &op); + } else if (nor->controller_ops->erase) { + return spi_nor_controller_ops_erase(nor, addr); + } + + /* + * Default implementation, if driver doesn't have a specialized HW + * control + */ + for (i = nor->addr_nbytes - 1; i >= 0; i--) { + nor->bouncebuf[i] = addr & 0xff; + addr >>= 8; + } + + return spi_nor_controller_ops_write_reg(nor, nor->erase_opcode, + nor->bouncebuf, nor->addr_nbytes); +} + +/** + * spi_nor_div_by_erase_size() - calculate remainder and update new dividend + * @erase: pointer to a structure that describes a SPI NOR erase type + * @dividend: dividend value + * @remainder: pointer to u32 remainder (will be updated) + * + * Return: the result of the division + */ +static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase, + u64 dividend, u32 *remainder) +{ + /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */ + *remainder = (u32)dividend & erase->size_mask; + return dividend >> erase->size_shift; +} + +/** + * spi_nor_find_best_erase_type() - find the best erase type for the given + * offset in the serial flash memory and the + * number of bytes to erase. The region in + * which the address fits is expected to be + * provided. + * @map: the erase map of the SPI NOR + * @region: pointer to a structure that describes a SPI NOR erase region + * @addr: offset in the serial flash memory + * @len: number of bytes to erase + * + * Return: a pointer to the best fitted erase type, NULL otherwise. + */ +static const struct spi_nor_erase_type * +spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map, + const struct spi_nor_erase_region *region, + u64 addr, u32 len) +{ + const struct spi_nor_erase_type *erase; + u32 rem; + int i; + u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK; + + /* + * Erase types are ordered by size, with the smallest erase type at + * index 0. + */ + for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) { + /* Does the erase region support the tested erase type? */ + if (!(erase_mask & BIT(i))) + continue; + + erase = &map->erase_type[i]; + if (!erase->size) + continue; + + /* Alignment is not mandatory for overlaid regions */ + if (region->offset & SNOR_OVERLAID_REGION && + region->size <= len) + return erase; + + /* Don't erase more than what the user has asked for. */ + if (erase->size > len) + continue; + + spi_nor_div_by_erase_size(erase, addr, &rem); + if (!rem) + return erase; + } + + return NULL; +} + +static u64 spi_nor_region_is_last(const struct spi_nor_erase_region *region) +{ + return region->offset & SNOR_LAST_REGION; +} + +static u64 spi_nor_region_end(const struct spi_nor_erase_region *region) +{ + return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size; +} + +/** + * spi_nor_region_next() - get the next spi nor region + * @region: pointer to a structure that describes a SPI NOR erase region + * + * Return: the next spi nor region or NULL if last region. + */ +struct spi_nor_erase_region * +spi_nor_region_next(struct spi_nor_erase_region *region) +{ + if (spi_nor_region_is_last(region)) + return NULL; + region++; + return region; +} + +/** + * spi_nor_find_erase_region() - find the region of the serial flash memory in + * which the offset fits + * @map: the erase map of the SPI NOR + * @addr: offset in the serial flash memory + * + * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno) + * otherwise. + */ +static struct spi_nor_erase_region * +spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr) +{ + struct spi_nor_erase_region *region = map->regions; + u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK; + u64 region_end = region_start + region->size; + + while (addr < region_start || addr >= region_end) { + region = spi_nor_region_next(region); + if (!region) + return ERR_PTR(-EINVAL); + + region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK; + region_end = region_start + region->size; + } + + return region; +} + +/** + * spi_nor_init_erase_cmd() - initialize an erase command + * @region: pointer to a structure that describes a SPI NOR erase region + * @erase: pointer to a structure that describes a SPI NOR erase type + * + * Return: the pointer to the allocated erase command, ERR_PTR(-errno) + * otherwise. + */ +static struct spi_nor_erase_command * +spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region, + const struct spi_nor_erase_type *erase) +{ + struct spi_nor_erase_command *cmd; + + cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&cmd->list); + cmd->opcode = erase->opcode; + cmd->count = 1; + + if (region->offset & SNOR_OVERLAID_REGION) + cmd->size = region->size; + else + cmd->size = erase->size; + + return cmd; +} + +/** + * spi_nor_destroy_erase_cmd_list() - destroy erase command list + * @erase_list: list of erase commands + */ +static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list) +{ + struct spi_nor_erase_command *cmd, *next; + + list_for_each_entry_safe(cmd, next, erase_list, list) { + list_del(&cmd->list); + kfree(cmd); + } +} + +/** + * spi_nor_init_erase_cmd_list() - initialize erase command list + * @nor: pointer to a 'struct spi_nor' + * @erase_list: list of erase commands to be executed once we validate that the + * erase can be performed + * @addr: offset in the serial flash memory + * @len: number of bytes to erase + * + * Builds the list of best fitted erase commands and verifies if the erase can + * be performed. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_init_erase_cmd_list(struct spi_nor *nor, + struct list_head *erase_list, + u64 addr, u32 len) +{ + const struct spi_nor_erase_map *map = &nor->params->erase_map; + const struct spi_nor_erase_type *erase, *prev_erase = NULL; + struct spi_nor_erase_region *region; + struct spi_nor_erase_command *cmd = NULL; + u64 region_end; + int ret = -EINVAL; + + region = spi_nor_find_erase_region(map, addr); + if (IS_ERR(region)) + return PTR_ERR(region); + + region_end = spi_nor_region_end(region); + + while (len) { + erase = spi_nor_find_best_erase_type(map, region, addr, len); + if (!erase) + goto destroy_erase_cmd_list; + + if (prev_erase != erase || + erase->size != cmd->size || + region->offset & SNOR_OVERLAID_REGION) { + cmd = spi_nor_init_erase_cmd(region, erase); + if (IS_ERR(cmd)) { + ret = PTR_ERR(cmd); + goto destroy_erase_cmd_list; + } + + list_add_tail(&cmd->list, erase_list); + } else { + cmd->count++; + } + + addr += cmd->size; + len -= cmd->size; + + if (len && addr >= region_end) { + region = spi_nor_region_next(region); + if (!region) + goto destroy_erase_cmd_list; + region_end = spi_nor_region_end(region); + } + + prev_erase = erase; + } + + return 0; + +destroy_erase_cmd_list: + spi_nor_destroy_erase_cmd_list(erase_list); + return ret; +} + +/** + * spi_nor_erase_multi_sectors() - perform a non-uniform erase + * @nor: pointer to a 'struct spi_nor' + * @addr: offset in the serial flash memory + * @len: number of bytes to erase + * + * Build a list of best fitted erase commands and execute it once we validate + * that the erase can be performed. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len) +{ + LIST_HEAD(erase_list); + struct spi_nor_erase_command *cmd, *next; + int ret; + + ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len); + if (ret) + return ret; + + list_for_each_entry_safe(cmd, next, &erase_list, list) { + nor->erase_opcode = cmd->opcode; + while (cmd->count) { + dev_vdbg(nor->dev, "erase_cmd->size = 0x%08x, erase_cmd->opcode = 0x%02x, erase_cmd->count = %u\n", + cmd->size, cmd->opcode, cmd->count); + + ret = spi_nor_lock_device(nor); + if (ret) + goto destroy_erase_cmd_list; + + ret = spi_nor_write_enable(nor); + if (ret) { + spi_nor_unlock_device(nor); + goto destroy_erase_cmd_list; + } + + ret = spi_nor_erase_sector(nor, addr); + spi_nor_unlock_device(nor); + if (ret) + goto destroy_erase_cmd_list; + + ret = spi_nor_wait_till_ready(nor); + if (ret) + goto destroy_erase_cmd_list; + + addr += cmd->size; + cmd->count--; + } + list_del(&cmd->list); + kfree(cmd); + } + + return 0; + +destroy_erase_cmd_list: + spi_nor_destroy_erase_cmd_list(&erase_list); + return ret; +} + +/* + * Erase an address range on the nor chip. The address range may extend + * one or more erase sectors. Return an error if there is a problem erasing. + */ +static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + u32 addr, len; + uint32_t rem; + int ret; + + dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr, + (long long)instr->len); + + if (spi_nor_has_uniform_erase(nor)) { + div_u64_rem(instr->len, mtd->erasesize, &rem); + if (rem) + return -EINVAL; + } + + addr = instr->addr; + len = instr->len; + + ret = spi_nor_prep_and_lock_pe(nor, instr->addr, instr->len); + if (ret) + return ret; + + /* whole-chip erase? */ + if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) { + unsigned long timeout; + + ret = spi_nor_lock_device(nor); + if (ret) + goto erase_err; + + ret = spi_nor_write_enable(nor); + if (ret) { + spi_nor_unlock_device(nor); + goto erase_err; + } + + ret = spi_nor_erase_chip(nor); + spi_nor_unlock_device(nor); + if (ret) + goto erase_err; + + /* + * Scale the timeout linearly with the size of the flash, with + * a minimum calibrated to an old 2MB flash. We could try to + * pull these from CFI/SFDP, but these values should be good + * enough for now. + */ + timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES, + CHIP_ERASE_2MB_READY_WAIT_JIFFIES * + (unsigned long)(mtd->size / SZ_2M)); + ret = spi_nor_wait_till_ready_with_timeout(nor, timeout); + if (ret) + goto erase_err; + + /* REVISIT in some cases we could speed up erasing large regions + * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up + * to use "small sector erase", but that's not always optimal. + */ + + /* "sector"-at-a-time erase */ + } else if (spi_nor_has_uniform_erase(nor)) { + while (len) { + ret = spi_nor_lock_device(nor); + if (ret) + goto erase_err; + + ret = spi_nor_write_enable(nor); + if (ret) { + spi_nor_unlock_device(nor); + goto erase_err; + } + + ret = spi_nor_erase_sector(nor, addr); + spi_nor_unlock_device(nor); + if (ret) + goto erase_err; + + ret = spi_nor_wait_till_ready(nor); + if (ret) + goto erase_err; + + addr += mtd->erasesize; + len -= mtd->erasesize; + } + + /* erase multiple sectors */ + } else { + ret = spi_nor_erase_multi_sectors(nor, addr, len); + if (ret) + goto erase_err; + } + + ret = spi_nor_write_disable(nor); + +erase_err: + spi_nor_unlock_and_unprep_pe(nor, instr->addr, instr->len); + + return ret; +} + +/** + * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status + * Register 1. + * @nor: pointer to a 'struct spi_nor' + * + * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor) +{ + int ret; + + ret = spi_nor_read_sr(nor, nor->bouncebuf); + if (ret) + return ret; + + if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6) + return 0; + + nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6; + + return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]); +} + +/** + * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status + * Register 2. + * @nor: pointer to a 'struct spi_nor'. + * + * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor) +{ + int ret; + + if (nor->flags & SNOR_F_NO_READ_CR) + return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1); + + ret = spi_nor_read_cr(nor, nor->bouncebuf); + if (ret) + return ret; + + if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1) + return 0; + + nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1; + + return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]); +} + +/** + * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2. + * @nor: pointer to a 'struct spi_nor' + * + * Set the Quad Enable (QE) bit in the Status Register 2. + * + * This is one of the procedures to set the QE bit described in the SFDP + * (JESD216 rev B) specification but no manufacturer using this procedure has + * been identified yet, hence the name of the function. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor) +{ + u8 *sr2 = nor->bouncebuf; + int ret; + u8 sr2_written; + + /* Check current Quad Enable bit value. */ + ret = spi_nor_read_sr2(nor, sr2); + if (ret) + return ret; + if (*sr2 & SR2_QUAD_EN_BIT7) + return 0; + + /* Update the Quad Enable bit. */ + *sr2 |= SR2_QUAD_EN_BIT7; + + ret = spi_nor_write_sr2(nor, sr2); + if (ret) + return ret; + + sr2_written = *sr2; + + /* Read back and check it. */ + ret = spi_nor_read_sr2(nor, sr2); + if (ret) + return ret; + + if (*sr2 != sr2_written) { + dev_dbg(nor->dev, "SR2: Read back test failed\n"); + return -EIO; + } + + return 0; +} + +static const struct spi_nor_manufacturer *manufacturers[] = { + &spi_nor_atmel, + &spi_nor_catalyst, + &spi_nor_eon, + &spi_nor_esmt, + &spi_nor_everspin, + &spi_nor_fujitsu, + &spi_nor_gigadevice, + &spi_nor_intel, + &spi_nor_issi, + &spi_nor_macronix, + &spi_nor_micron, + &spi_nor_st, + &spi_nor_spansion, + &spi_nor_sst, + &spi_nor_winbond, + &spi_nor_xilinx, + &spi_nor_xmc, +}; + +static const struct flash_info spi_nor_generic_flash = { + .name = "spi-nor-generic", + .n_banks = 1, + /* + * JESD216 rev A doesn't specify the page size, therefore we need a + * sane default. + */ + .page_size = 256, + .parse_sfdp = true, +}; + +static const struct flash_info *spi_nor_match_id(struct spi_nor *nor, + const u8 *id) +{ + const struct flash_info *part; + unsigned int i, j; + + for (i = 0; i < ARRAY_SIZE(manufacturers); i++) { + for (j = 0; j < manufacturers[i]->nparts; j++) { + part = &manufacturers[i]->parts[j]; + if (part->id_len && + !memcmp(part->id, id, part->id_len)) { + nor->manufacturer = manufacturers[i]; + return part; + } + } + } + + return NULL; +} + +static const struct flash_info *spi_nor_detect(struct spi_nor *nor) +{ + const struct flash_info *info; + u8 *id = nor->bouncebuf; + int ret; + + ret = spi_nor_read_id(nor, 0, 0, id, nor->reg_proto); + if (ret) { + dev_dbg(nor->dev, "error %d reading JEDEC ID\n", ret); + return ERR_PTR(ret); + } + + /* Cache the complete flash ID. */ + nor->id = devm_kmemdup(nor->dev, id, SPI_NOR_MAX_ID_LEN, GFP_KERNEL); + if (!nor->id) + return ERR_PTR(-ENOMEM); + + info = spi_nor_match_id(nor, id); + + /* Fallback to a generic flash described only by its SFDP data. */ + if (!info) { + ret = spi_nor_check_sfdp_signature(nor); + if (!ret) + info = &spi_nor_generic_flash; + } + + if (!info) { + dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n", + SPI_NOR_MAX_ID_LEN, id); + return ERR_PTR(-ENODEV); + } + return info; +} + +static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + loff_t from_lock = from; + size_t len_lock = len; + ssize_t ret; + + dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len); + + ret = spi_nor_prep_and_lock_rd(nor, from_lock, len_lock); + if (ret) + return ret; + + while (len) { + loff_t addr = from; + + addr = spi_nor_convert_addr(nor, addr); + + ret = spi_nor_read_data(nor, addr, len, buf); + if (ret == 0) { + /* We shouldn't see 0-length reads */ + ret = -EIO; + goto read_err; + } + if (ret < 0) + goto read_err; + + WARN_ON(ret > len); + *retlen += ret; + buf += ret; + from += ret; + len -= ret; + } + ret = 0; + +read_err: + spi_nor_unlock_and_unprep_rd(nor, from_lock, len_lock); + + return ret; +} + +/* + * Write an address range to the nor chip. Data must be written in + * FLASH_PAGESIZE chunks. The address range may be any size provided + * it is within the physical boundaries. + */ +static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + size_t page_offset, page_remain, i; + ssize_t ret; + u32 page_size = nor->params->page_size; + + dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len); + + ret = spi_nor_prep_and_lock_pe(nor, to, len); + if (ret) + return ret; + + for (i = 0; i < len; ) { + ssize_t written; + loff_t addr = to + i; + + /* + * If page_size is a power of two, the offset can be quickly + * calculated with an AND operation. On the other cases we + * need to do a modulus operation (more expensive). + */ + if (is_power_of_2(page_size)) { + page_offset = addr & (page_size - 1); + } else { + uint64_t aux = addr; + + page_offset = do_div(aux, page_size); + } + /* the size of data remaining on the first page */ + page_remain = min_t(size_t, page_size - page_offset, len - i); + + addr = spi_nor_convert_addr(nor, addr); + + ret = spi_nor_lock_device(nor); + if (ret) + goto write_err; + + ret = spi_nor_write_enable(nor); + if (ret) { + spi_nor_unlock_device(nor); + goto write_err; + } + + ret = spi_nor_write_data(nor, addr, page_remain, buf + i); + spi_nor_unlock_device(nor); + if (ret < 0) + goto write_err; + written = ret; + + ret = spi_nor_wait_till_ready(nor); + if (ret) + goto write_err; + *retlen += written; + i += written; + } + +write_err: + spi_nor_unlock_and_unprep_pe(nor, to, len); + + return ret; +} + +static int spi_nor_check(struct spi_nor *nor) +{ + if (!nor->dev || + (!nor->spimem && !nor->controller_ops) || + (!nor->spimem && nor->controller_ops && + (!nor->controller_ops->read || + !nor->controller_ops->write || + !nor->controller_ops->read_reg || + !nor->controller_ops->write_reg))) { + pr_err("spi-nor: please fill all the necessary fields!\n"); + return -EINVAL; + } + + if (nor->spimem && nor->controller_ops) { + dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n"); + return -EINVAL; + } + + return 0; +} + +void +spi_nor_set_read_settings(struct spi_nor_read_command *read, + u8 num_mode_clocks, + u8 num_wait_states, + u8 opcode, + enum spi_nor_protocol proto) +{ + read->num_mode_clocks = num_mode_clocks; + read->num_wait_states = num_wait_states; + read->opcode = opcode; + read->proto = proto; +} + +void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode, + enum spi_nor_protocol proto) +{ + pp->opcode = opcode; + pp->proto = proto; +} + +static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size) +{ + size_t i; + + for (i = 0; i < size; i++) + if (table[i][0] == (int)hwcaps) + return table[i][1]; + + return -EINVAL; +} + +int spi_nor_hwcaps_read2cmd(u32 hwcaps) +{ + static const int hwcaps_read2cmd[][2] = { + { SNOR_HWCAPS_READ, SNOR_CMD_READ }, + { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST }, + { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR }, + { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 }, + { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 }, + { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 }, + { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR }, + { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 }, + { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 }, + { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 }, + { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR }, + { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 }, + { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 }, + { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 }, + { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR }, + { SNOR_HWCAPS_READ_8_8_8_DTR, SNOR_CMD_READ_8_8_8_DTR }, + }; + + return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd, + ARRAY_SIZE(hwcaps_read2cmd)); +} + +int spi_nor_hwcaps_pp2cmd(u32 hwcaps) +{ + static const int hwcaps_pp2cmd[][2] = { + { SNOR_HWCAPS_PP, SNOR_CMD_PP }, + { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 }, + { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 }, + { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 }, + { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 }, + { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 }, + { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 }, + { SNOR_HWCAPS_PP_8_8_8_DTR, SNOR_CMD_PP_8_8_8_DTR }, + }; + + return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd, + ARRAY_SIZE(hwcaps_pp2cmd)); +} + +/** + * spi_nor_spimem_check_op - check if the operation is supported + * by controller + *@nor: pointer to a 'struct spi_nor' + *@op: pointer to op template to be checked + * + * Returns 0 if operation is supported, -EOPNOTSUPP otherwise. + */ +static int spi_nor_spimem_check_op(struct spi_nor *nor, + struct spi_mem_op *op) +{ + /* + * First test with 4 address bytes. The opcode itself might + * be a 3B addressing opcode but we don't care, because + * SPI controller implementation should not check the opcode, + * but just the sequence. + */ + op->addr.nbytes = 4; + if (!spi_mem_supports_op(nor->spimem, op)) { + if (nor->params->size > SZ_16M) + return -EOPNOTSUPP; + + /* If flash size <= 16MB, 3 address bytes are sufficient */ + op->addr.nbytes = 3; + if (!spi_mem_supports_op(nor->spimem, op)) + return -EOPNOTSUPP; + } + + return 0; +} + +/** + * spi_nor_spimem_check_readop - check if the read op is supported + * by controller + *@nor: pointer to a 'struct spi_nor' + *@read: pointer to op template to be checked + * + * Returns 0 if operation is supported, -EOPNOTSUPP otherwise. + */ +static int spi_nor_spimem_check_readop(struct spi_nor *nor, + const struct spi_nor_read_command *read) +{ + struct spi_mem_op op = SPI_NOR_READ_OP(read->opcode); + + spi_nor_spimem_setup_op(nor, &op, read->proto); + + /* convert the dummy cycles to the number of bytes */ + op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) * + op.dummy.buswidth / 8; + if (spi_nor_protocol_is_dtr(nor->read_proto)) + op.dummy.nbytes *= 2; + + return spi_nor_spimem_check_op(nor, &op); +} + +/** + * spi_nor_spimem_check_pp - check if the page program op is supported + * by controller + *@nor: pointer to a 'struct spi_nor' + *@pp: pointer to op template to be checked + * + * Returns 0 if operation is supported, -EOPNOTSUPP otherwise. + */ +static int spi_nor_spimem_check_pp(struct spi_nor *nor, + const struct spi_nor_pp_command *pp) +{ + struct spi_mem_op op = SPI_NOR_PP_OP(pp->opcode); + + spi_nor_spimem_setup_op(nor, &op, pp->proto); + + return spi_nor_spimem_check_op(nor, &op); +} + +/** + * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol + * based on SPI controller capabilities + * @nor: pointer to a 'struct spi_nor' + * @hwcaps: pointer to resulting capabilities after adjusting + * according to controller and flash's capability + */ +static void +spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps) +{ + struct spi_nor_flash_parameter *params = nor->params; + unsigned int cap; + + /* X-X-X modes are not supported yet, mask them all. */ + *hwcaps &= ~SNOR_HWCAPS_X_X_X; + + /* + * If the reset line is broken, we do not want to enter a stateful + * mode. + */ + if (nor->flags & SNOR_F_BROKEN_RESET) + *hwcaps &= ~(SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR); + + for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) { + int rdidx, ppidx; + + if (!(*hwcaps & BIT(cap))) + continue; + + rdidx = spi_nor_hwcaps_read2cmd(BIT(cap)); + if (rdidx >= 0 && + spi_nor_spimem_check_readop(nor, ¶ms->reads[rdidx])) + *hwcaps &= ~BIT(cap); + + ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap)); + if (ppidx < 0) + continue; + + if (spi_nor_spimem_check_pp(nor, + ¶ms->page_programs[ppidx])) + *hwcaps &= ~BIT(cap); + } +} + +/** + * spi_nor_set_erase_type() - set a SPI NOR erase type + * @erase: pointer to a structure that describes a SPI NOR erase type + * @size: the size of the sector/block erased by the erase type + * @opcode: the SPI command op code to erase the sector/block + */ +void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size, + u8 opcode) +{ + erase->size = size; + erase->opcode = opcode; + /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */ + erase->size_shift = ffs(erase->size) - 1; + erase->size_mask = (1 << erase->size_shift) - 1; +} + +/** + * spi_nor_mask_erase_type() - mask out a SPI NOR erase type + * @erase: pointer to a structure that describes a SPI NOR erase type + */ +void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase) +{ + erase->size = 0; +} + +/** + * spi_nor_init_uniform_erase_map() - Initialize uniform erase map + * @map: the erase map of the SPI NOR + * @erase_mask: bitmask encoding erase types that can erase the entire + * flash memory + * @flash_size: the spi nor flash memory size + */ +void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map, + u8 erase_mask, u64 flash_size) +{ + /* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */ + map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) | + SNOR_LAST_REGION; + map->uniform_region.size = flash_size; + map->regions = &map->uniform_region; + map->uniform_erase_type = erase_mask; +} + +int spi_nor_post_bfpt_fixups(struct spi_nor *nor, + const struct sfdp_parameter_header *bfpt_header, + const struct sfdp_bfpt *bfpt) +{ + int ret; + + if (nor->manufacturer && nor->manufacturer->fixups && + nor->manufacturer->fixups->post_bfpt) { + ret = nor->manufacturer->fixups->post_bfpt(nor, bfpt_header, + bfpt); + if (ret) + return ret; + } + + if (nor->info->fixups && nor->info->fixups->post_bfpt) + return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt); + + return 0; +} + +static int spi_nor_select_read(struct spi_nor *nor, + u32 shared_hwcaps) +{ + int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1; + const struct spi_nor_read_command *read; + + if (best_match < 0) + return -EINVAL; + + cmd = spi_nor_hwcaps_read2cmd(BIT(best_match)); + if (cmd < 0) + return -EINVAL; + + read = &nor->params->reads[cmd]; + nor->read_opcode = read->opcode; + nor->read_proto = read->proto; + + /* + * In the SPI NOR framework, we don't need to make the difference + * between mode clock cycles and wait state clock cycles. + * Indeed, the value of the mode clock cycles is used by a QSPI + * flash memory to know whether it should enter or leave its 0-4-4 + * (Continuous Read / XIP) mode. + * eXecution In Place is out of the scope of the mtd sub-system. + * Hence we choose to merge both mode and wait state clock cycles + * into the so called dummy clock cycles. + */ + nor->read_dummy = read->num_mode_clocks + read->num_wait_states; + return 0; +} + +static int spi_nor_select_pp(struct spi_nor *nor, + u32 shared_hwcaps) +{ + int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1; + const struct spi_nor_pp_command *pp; + + if (best_match < 0) + return -EINVAL; + + cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match)); + if (cmd < 0) + return -EINVAL; + + pp = &nor->params->page_programs[cmd]; + nor->program_opcode = pp->opcode; + nor->write_proto = pp->proto; + return 0; +} + +/** + * spi_nor_select_uniform_erase() - select optimum uniform erase type + * @map: the erase map of the SPI NOR + * @wanted_size: the erase type size to search for. Contains the value of + * info->sector_size, the "small sector" size in case + * CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined or 0 if + * there is no information about the sector size. The + * latter is the case if the flash parameters are parsed + * solely by SFDP, then the largest supported erase type + * is selected. + * + * Once the optimum uniform sector erase command is found, disable all the + * other. + * + * Return: pointer to erase type on success, NULL otherwise. + */ +static const struct spi_nor_erase_type * +spi_nor_select_uniform_erase(struct spi_nor_erase_map *map, + const u32 wanted_size) +{ + const struct spi_nor_erase_type *tested_erase, *erase = NULL; + int i; + u8 uniform_erase_type = map->uniform_erase_type; + + for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) { + if (!(uniform_erase_type & BIT(i))) + continue; + + tested_erase = &map->erase_type[i]; + + /* Skip masked erase types. */ + if (!tested_erase->size) + continue; + + /* + * If the current erase size is the one, stop here: + * we have found the right uniform Sector Erase command. + */ + if (tested_erase->size == wanted_size) { + erase = tested_erase; + break; + } + + /* + * Otherwise, the current erase size is still a valid candidate. + * Select the biggest valid candidate. + */ + if (!erase && tested_erase->size) + erase = tested_erase; + /* keep iterating to find the wanted_size */ + } + + if (!erase) + return NULL; + + /* Disable all other Sector Erase commands. */ + map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK; + map->uniform_erase_type |= BIT(erase - map->erase_type); + return erase; +} + +static int spi_nor_select_erase(struct spi_nor *nor) +{ + struct spi_nor_erase_map *map = &nor->params->erase_map; + const struct spi_nor_erase_type *erase = NULL; + struct mtd_info *mtd = &nor->mtd; + u32 wanted_size = nor->info->sector_size; + int i; + + /* + * The previous implementation handling Sector Erase commands assumed + * that the SPI flash memory has an uniform layout then used only one + * of the supported erase sizes for all Sector Erase commands. + * So to be backward compatible, the new implementation also tries to + * manage the SPI flash memory as uniform with a single erase sector + * size, when possible. + */ +#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS + /* prefer "small sector" erase if possible */ + wanted_size = 4096u; +#endif + + if (spi_nor_has_uniform_erase(nor)) { + erase = spi_nor_select_uniform_erase(map, wanted_size); + if (!erase) + return -EINVAL; + nor->erase_opcode = erase->opcode; + mtd->erasesize = erase->size; + return 0; + } + + /* + * For non-uniform SPI flash memory, set mtd->erasesize to the + * maximum erase sector size. No need to set nor->erase_opcode. + */ + for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) { + if (map->erase_type[i].size) { + erase = &map->erase_type[i]; + break; + } + } + + if (!erase) + return -EINVAL; + + mtd->erasesize = erase->size; + return 0; +} + +static int spi_nor_default_setup(struct spi_nor *nor, + const struct spi_nor_hwcaps *hwcaps) +{ + struct spi_nor_flash_parameter *params = nor->params; + u32 ignored_mask, shared_mask; + int err; + + /* + * Keep only the hardware capabilities supported by both the SPI + * controller and the SPI flash memory. + */ + shared_mask = hwcaps->mask & params->hwcaps.mask; + + if (nor->spimem) { + /* + * When called from spi_nor_probe(), all caps are set and we + * need to discard some of them based on what the SPI + * controller actually supports (using spi_mem_supports_op()). + */ + spi_nor_spimem_adjust_hwcaps(nor, &shared_mask); + } else { + /* + * SPI n-n-n protocols are not supported when the SPI + * controller directly implements the spi_nor interface. + * Yet another reason to switch to spi-mem. + */ + ignored_mask = SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR; + if (shared_mask & ignored_mask) { + dev_dbg(nor->dev, + "SPI n-n-n protocols are not supported.\n"); + shared_mask &= ~ignored_mask; + } + } + + /* Select the (Fast) Read command. */ + err = spi_nor_select_read(nor, shared_mask); + if (err) { + dev_dbg(nor->dev, + "can't select read settings supported by both the SPI controller and memory.\n"); + return err; + } + + /* Select the Page Program command. */ + err = spi_nor_select_pp(nor, shared_mask); + if (err) { + dev_dbg(nor->dev, + "can't select write settings supported by both the SPI controller and memory.\n"); + return err; + } + + /* Select the Sector Erase command. */ + err = spi_nor_select_erase(nor); + if (err) { + dev_dbg(nor->dev, + "can't select erase settings supported by both the SPI controller and memory.\n"); + return err; + } + + return 0; +} + +static int spi_nor_set_addr_nbytes(struct spi_nor *nor) +{ + if (nor->params->addr_nbytes) { + nor->addr_nbytes = nor->params->addr_nbytes; + } else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) { + /* + * In 8D-8D-8D mode, one byte takes half a cycle to transfer. So + * in this protocol an odd addr_nbytes cannot be used because + * then the address phase would only span a cycle and a half. + * Half a cycle would be left over. We would then have to start + * the dummy phase in the middle of a cycle and so too the data + * phase, and we will end the transaction with half a cycle left + * over. + * + * Force all 8D-8D-8D flashes to use an addr_nbytes of 4 to + * avoid this situation. + */ + nor->addr_nbytes = 4; + } else if (nor->info->addr_nbytes) { + nor->addr_nbytes = nor->info->addr_nbytes; + } else { + nor->addr_nbytes = 3; + } + + if (nor->addr_nbytes == 3 && nor->params->size > 0x1000000) { + /* enable 4-byte addressing if the device exceeds 16MiB */ + nor->addr_nbytes = 4; + } + + if (nor->addr_nbytes > SPI_NOR_MAX_ADDR_NBYTES) { + dev_dbg(nor->dev, "The number of address bytes is too large: %u\n", + nor->addr_nbytes); + return -EINVAL; + } + + /* Set 4byte opcodes when possible. */ + if (nor->addr_nbytes == 4 && nor->flags & SNOR_F_4B_OPCODES && + !(nor->flags & SNOR_F_HAS_4BAIT)) + spi_nor_set_4byte_opcodes(nor); + + return 0; +} + +static int spi_nor_setup(struct spi_nor *nor, + const struct spi_nor_hwcaps *hwcaps) +{ + int ret; + + if (nor->params->setup) + ret = nor->params->setup(nor, hwcaps); + else + ret = spi_nor_default_setup(nor, hwcaps); + if (ret) + return ret; + + return spi_nor_set_addr_nbytes(nor); +} + +/** + * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and + * settings based on MFR register and ->default_init() hook. + * @nor: pointer to a 'struct spi_nor'. + */ +static void spi_nor_manufacturer_init_params(struct spi_nor *nor) +{ + if (nor->manufacturer && nor->manufacturer->fixups && + nor->manufacturer->fixups->default_init) + nor->manufacturer->fixups->default_init(nor); + + if (nor->info->fixups && nor->info->fixups->default_init) + nor->info->fixups->default_init(nor); +} + +/** + * spi_nor_no_sfdp_init_params() - Initialize the flash's parameters and + * settings based on nor->info->sfdp_flags. This method should be called only by + * flashes that do not define SFDP tables. If the flash supports SFDP but the + * information is wrong and the settings from this function can not be retrieved + * by parsing SFDP, one should instead use the fixup hooks and update the wrong + * bits. + * @nor: pointer to a 'struct spi_nor'. + */ +static void spi_nor_no_sfdp_init_params(struct spi_nor *nor) +{ + struct spi_nor_flash_parameter *params = nor->params; + struct spi_nor_erase_map *map = ¶ms->erase_map; + const u8 no_sfdp_flags = nor->info->no_sfdp_flags; + u8 i, erase_mask; + + if (no_sfdp_flags & SPI_NOR_DUAL_READ) { + params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2; + spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_2], + 0, 8, SPINOR_OP_READ_1_1_2, + SNOR_PROTO_1_1_2); + } + + if (no_sfdp_flags & SPI_NOR_QUAD_READ) { + params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4; + spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_4], + 0, 8, SPINOR_OP_READ_1_1_4, + SNOR_PROTO_1_1_4); + } + + if (no_sfdp_flags & SPI_NOR_OCTAL_READ) { + params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8; + spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_8], + 0, 8, SPINOR_OP_READ_1_1_8, + SNOR_PROTO_1_1_8); + } + + if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_READ) { + params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR; + spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_8_8_8_DTR], + 0, 20, SPINOR_OP_READ_FAST, + SNOR_PROTO_8_8_8_DTR); + } + + if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_PP) { + params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR; + /* + * Since xSPI Page Program opcode is backward compatible with + * Legacy SPI, use Legacy SPI opcode there as well. + */ + spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_8_8_8_DTR], + SPINOR_OP_PP, SNOR_PROTO_8_8_8_DTR); + } + + /* + * Sector Erase settings. Sort Erase Types in ascending order, with the + * smallest erase size starting at BIT(0). + */ + erase_mask = 0; + i = 0; + if (no_sfdp_flags & SECT_4K) { + erase_mask |= BIT(i); + spi_nor_set_erase_type(&map->erase_type[i], 4096u, + SPINOR_OP_BE_4K); + i++; + } + erase_mask |= BIT(i); + spi_nor_set_erase_type(&map->erase_type[i], nor->info->sector_size, + SPINOR_OP_SE); + spi_nor_init_uniform_erase_map(map, erase_mask, params->size); +} + +/** + * spi_nor_init_flags() - Initialize NOR flags for settings that are not defined + * in the JESD216 SFDP standard, thus can not be retrieved when parsing SFDP. + * @nor: pointer to a 'struct spi_nor' + */ +static void spi_nor_init_flags(struct spi_nor *nor) +{ + struct device_node *np = spi_nor_get_flash_node(nor); + const u16 flags = nor->info->flags; + + if (of_property_read_bool(np, "broken-flash-reset")) + nor->flags |= SNOR_F_BROKEN_RESET; + + if (of_property_read_bool(np, "no-wp")) + nor->flags |= SNOR_F_NO_WP; + + if (flags & SPI_NOR_SWP_IS_VOLATILE) + nor->flags |= SNOR_F_SWP_IS_VOLATILE; + + if (flags & SPI_NOR_HAS_LOCK) + nor->flags |= SNOR_F_HAS_LOCK; + + if (flags & SPI_NOR_HAS_TB) { + nor->flags |= SNOR_F_HAS_SR_TB; + if (flags & SPI_NOR_TB_SR_BIT6) + nor->flags |= SNOR_F_HAS_SR_TB_BIT6; + } + + if (flags & SPI_NOR_4BIT_BP) { + nor->flags |= SNOR_F_HAS_4BIT_BP; + if (flags & SPI_NOR_BP3_SR_BIT6) + nor->flags |= SNOR_F_HAS_SR_BP3_BIT6; + } + + if (flags & NO_CHIP_ERASE) + nor->flags |= SNOR_F_NO_OP_CHIP_ERASE; + + if (flags & SPI_NOR_RWW && nor->info->n_banks > 1 && + !nor->controller_ops) + nor->flags |= SNOR_F_RWW; +} + +/** + * spi_nor_init_fixup_flags() - Initialize NOR flags for settings that can not + * be discovered by SFDP for this particular flash because the SFDP table that + * indicates this support is not defined in the flash. In case the table for + * this support is defined but has wrong values, one should instead use a + * post_sfdp() hook to set the SNOR_F equivalent flag. + * @nor: pointer to a 'struct spi_nor' + */ +static void spi_nor_init_fixup_flags(struct spi_nor *nor) +{ + const u8 fixup_flags = nor->info->fixup_flags; + + if (fixup_flags & SPI_NOR_4B_OPCODES) + nor->flags |= SNOR_F_4B_OPCODES; + + if (fixup_flags & SPI_NOR_IO_MODE_EN_VOLATILE) + nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE; +} + +/** + * spi_nor_late_init_params() - Late initialization of default flash parameters. + * @nor: pointer to a 'struct spi_nor' + * + * Used to initialize flash parameters that are not declared in the JESD216 + * SFDP standard, or where SFDP tables are not defined at all. + * Will replace the spi_nor_manufacturer_init_params() method. + */ +static int spi_nor_late_init_params(struct spi_nor *nor) +{ + struct spi_nor_flash_parameter *params = nor->params; + int ret; + + if (nor->manufacturer && nor->manufacturer->fixups && + nor->manufacturer->fixups->late_init) { + ret = nor->manufacturer->fixups->late_init(nor); + if (ret) + return ret; + } + + if (nor->info->fixups && nor->info->fixups->late_init) { + ret = nor->info->fixups->late_init(nor); + if (ret) + return ret; + } + + /* Default method kept for backward compatibility. */ + if (!params->set_4byte_addr_mode) + params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_brwr; + + spi_nor_init_flags(nor); + spi_nor_init_fixup_flags(nor); + + /* + * NOR protection support. When locking_ops are not provided, we pick + * the default ones. + */ + if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops) + spi_nor_init_default_locking_ops(nor); + + if (nor->info->n_banks > 1) + params->bank_size = div64_u64(params->size, nor->info->n_banks); + + return 0; +} + +/** + * spi_nor_sfdp_init_params_deprecated() - Deprecated way of initializing flash + * parameters and settings based on JESD216 SFDP standard. + * @nor: pointer to a 'struct spi_nor'. + * + * The method has a roll-back mechanism: in case the SFDP parsing fails, the + * legacy flash parameters and settings will be restored. + */ +static void spi_nor_sfdp_init_params_deprecated(struct spi_nor *nor) +{ + struct spi_nor_flash_parameter sfdp_params; + + memcpy(&sfdp_params, nor->params, sizeof(sfdp_params)); + + if (spi_nor_parse_sfdp(nor)) { + memcpy(nor->params, &sfdp_params, sizeof(*nor->params)); + nor->flags &= ~SNOR_F_4B_OPCODES; + } +} + +/** + * spi_nor_init_params_deprecated() - Deprecated way of initializing flash + * parameters and settings. + * @nor: pointer to a 'struct spi_nor'. + * + * The method assumes that flash doesn't support SFDP so it initializes flash + * parameters in spi_nor_no_sfdp_init_params() which later on can be overwritten + * when parsing SFDP, if supported. + */ +static void spi_nor_init_params_deprecated(struct spi_nor *nor) +{ + spi_nor_no_sfdp_init_params(nor); + + spi_nor_manufacturer_init_params(nor); + + if (nor->info->no_sfdp_flags & (SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ | + SPI_NOR_OCTAL_READ | + SPI_NOR_OCTAL_DTR_READ)) + spi_nor_sfdp_init_params_deprecated(nor); +} + +/** + * spi_nor_init_default_params() - Default initialization of flash parameters + * and settings. Done for all flashes, regardless is they define SFDP tables + * or not. + * @nor: pointer to a 'struct spi_nor'. + */ +static void spi_nor_init_default_params(struct spi_nor *nor) +{ + struct spi_nor_flash_parameter *params = nor->params; + const struct flash_info *info = nor->info; + struct device_node *np = spi_nor_get_flash_node(nor); + + params->quad_enable = spi_nor_sr2_bit1_quad_enable; + params->otp.org = &info->otp_org; + + /* Default to 16-bit Write Status (01h) Command */ + nor->flags |= SNOR_F_HAS_16BIT_SR; + + /* Set SPI NOR sizes. */ + params->writesize = 1; + params->size = (u64)info->sector_size * info->n_sectors; + params->bank_size = params->size; + params->page_size = info->page_size; + + if (!(info->flags & SPI_NOR_NO_FR)) { + /* Default to Fast Read for DT and non-DT platform devices. */ + params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST; + + /* Mask out Fast Read if not requested at DT instantiation. */ + if (np && !of_property_read_bool(np, "m25p,fast-read")) + params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST; + } + + /* (Fast) Read settings. */ + params->hwcaps.mask |= SNOR_HWCAPS_READ; + spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ], + 0, 0, SPINOR_OP_READ, + SNOR_PROTO_1_1_1); + + if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST) + spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_FAST], + 0, 8, SPINOR_OP_READ_FAST, + SNOR_PROTO_1_1_1); + /* Page Program settings. */ + params->hwcaps.mask |= SNOR_HWCAPS_PP; + spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP], + SPINOR_OP_PP, SNOR_PROTO_1_1_1); + + if (info->flags & SPI_NOR_QUAD_PP) { + params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4; + spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_1_1_4], + SPINOR_OP_PP_1_1_4, SNOR_PROTO_1_1_4); + } +} + +/** + * spi_nor_init_params() - Initialize the flash's parameters and settings. + * @nor: pointer to a 'struct spi_nor'. + * + * The flash parameters and settings are initialized based on a sequence of + * calls that are ordered by priority: + * + * 1/ Default flash parameters initialization. The initializations are done + * based on nor->info data: + * spi_nor_info_init_params() + * + * which can be overwritten by: + * 2/ Manufacturer flash parameters initialization. The initializations are + * done based on MFR register, or when the decisions can not be done solely + * based on MFR, by using specific flash_info tweeks, ->default_init(): + * spi_nor_manufacturer_init_params() + * + * which can be overwritten by: + * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and + * should be more accurate that the above. + * spi_nor_parse_sfdp() or spi_nor_no_sfdp_init_params() + * + * Please note that there is a ->post_bfpt() fixup hook that can overwrite + * the flash parameters and settings immediately after parsing the Basic + * Flash Parameter Table. + * spi_nor_post_sfdp_fixups() is called after the SFDP tables are parsed. + * It is used to tweak various flash parameters when information provided + * by the SFDP tables are wrong. + * + * which can be overwritten by: + * 4/ Late flash parameters initialization, used to initialize flash + * parameters that are not declared in the JESD216 SFDP standard, or where SFDP + * tables are not defined at all. + * spi_nor_late_init_params() + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_init_params(struct spi_nor *nor) +{ + int ret; + + nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL); + if (!nor->params) + return -ENOMEM; + + spi_nor_init_default_params(nor); + + if (nor->info->parse_sfdp) { + ret = spi_nor_parse_sfdp(nor); + if (ret) { + dev_err(nor->dev, "BFPT parsing failed. Please consider using SPI_NOR_SKIP_SFDP when declaring the flash\n"); + return ret; + } + } else if (nor->info->no_sfdp_flags & SPI_NOR_SKIP_SFDP) { + spi_nor_no_sfdp_init_params(nor); + } else { + spi_nor_init_params_deprecated(nor); + } + + return spi_nor_late_init_params(nor); +} + +/** spi_nor_set_octal_dtr() - enable or disable Octal DTR I/O. + * @nor: pointer to a 'struct spi_nor' + * @enable: whether to enable or disable Octal DTR + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_set_octal_dtr(struct spi_nor *nor, bool enable) +{ + int ret; + + if (!nor->params->set_octal_dtr) + return 0; + + if (!(nor->read_proto == SNOR_PROTO_8_8_8_DTR && + nor->write_proto == SNOR_PROTO_8_8_8_DTR)) + return 0; + + if (!(nor->flags & SNOR_F_IO_MODE_EN_VOLATILE)) + return 0; + + ret = nor->params->set_octal_dtr(nor, enable); + if (ret) + return ret; + + if (enable) + nor->reg_proto = SNOR_PROTO_8_8_8_DTR; + else + nor->reg_proto = SNOR_PROTO_1_1_1; + + return 0; +} + +/** + * spi_nor_quad_enable() - enable Quad I/O if needed. + * @nor: pointer to a 'struct spi_nor' + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_quad_enable(struct spi_nor *nor) +{ + if (!nor->params->quad_enable) + return 0; + + if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 || + spi_nor_get_protocol_width(nor->write_proto) == 4)) + return 0; + + return nor->params->quad_enable(nor); +} + +/** + * spi_nor_set_4byte_addr_mode() - Set address mode. + * @nor: pointer to a 'struct spi_nor'. + * @enable: enable/disable 4 byte address mode. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable) +{ + struct spi_nor_flash_parameter *params = nor->params; + int ret; + + ret = params->set_4byte_addr_mode(nor, enable); + if (ret && ret != -ENOTSUPP) + return ret; + + if (enable) { + params->addr_nbytes = 4; + params->addr_mode_nbytes = 4; + } else { + params->addr_nbytes = 3; + params->addr_mode_nbytes = 3; + } + + return 0; +} + +static int spi_nor_init(struct spi_nor *nor) +{ + int err; + + err = spi_nor_set_octal_dtr(nor, true); + if (err) { + dev_dbg(nor->dev, "octal mode not supported\n"); + return err; + } + + err = spi_nor_quad_enable(nor); + if (err) { + dev_dbg(nor->dev, "quad mode not supported\n"); + return err; + } + + /* + * Some SPI NOR flashes are write protected by default after a power-on + * reset cycle, in order to avoid inadvertent writes during power-up. + * Backward compatibility imposes to unlock the entire flash memory + * array at power-up by default. Depending on the kernel configuration + * (1) do nothing, (2) always unlock the entire flash array or (3) + * unlock the entire flash array only when the software write + * protection bits are volatile. The latter is indicated by + * SNOR_F_SWP_IS_VOLATILE. + */ + if (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE) || + (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE) && + nor->flags & SNOR_F_SWP_IS_VOLATILE)) + spi_nor_try_unlock_all(nor); + + if (nor->addr_nbytes == 4 && + nor->read_proto != SNOR_PROTO_8_8_8_DTR && + !(nor->flags & SNOR_F_4B_OPCODES)) { + /* + * If the RESET# pin isn't hooked up properly, or the system + * otherwise doesn't perform a reset command in the boot + * sequence, it's impossible to 100% protect against unexpected + * reboots (e.g., crashes). Warn the user (or hopefully, system + * designer) that this is bad. + */ + WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET, + "enabling reset hack; may not recover from unexpected reboots\n"); + err = spi_nor_set_4byte_addr_mode(nor, true); + if (err) + return err; + } + + return 0; +} + +/** + * spi_nor_soft_reset() - Perform a software reset + * @nor: pointer to 'struct spi_nor' + * + * Performs a "Soft Reset and Enter Default Protocol Mode" sequence which resets + * the device to its power-on-reset state. This is useful when the software has + * made some changes to device (volatile) registers and needs to reset it before + * shutting down, for example. + * + * Not every flash supports this sequence. The same set of opcodes might be used + * for some other operation on a flash that does not support this. Support for + * this sequence can be discovered via SFDP in the BFPT table. + * + * Return: 0 on success, -errno otherwise. + */ +static void spi_nor_soft_reset(struct spi_nor *nor) +{ + struct spi_mem_op op; + int ret; + + op = (struct spi_mem_op)SPINOR_SRSTEN_OP; + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + ret = spi_mem_exec_op(nor->spimem, &op); + if (ret) { + dev_warn(nor->dev, "Software reset failed: %d\n", ret); + return; + } + + op = (struct spi_mem_op)SPINOR_SRST_OP; + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + ret = spi_mem_exec_op(nor->spimem, &op); + if (ret) { + dev_warn(nor->dev, "Software reset failed: %d\n", ret); + return; + } + + /* + * Software Reset is not instant, and the delay varies from flash to + * flash. Looking at a few flashes, most range somewhere below 100 + * microseconds. So, sleep for a range of 200-400 us. + */ + usleep_range(SPI_NOR_SRST_SLEEP_MIN, SPI_NOR_SRST_SLEEP_MAX); +} + +/* mtd suspend handler */ +static int spi_nor_suspend(struct mtd_info *mtd) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + int ret; + + /* Disable octal DTR mode if we enabled it. */ + ret = spi_nor_set_octal_dtr(nor, false); + if (ret) + dev_err(nor->dev, "suspend() failed\n"); + + return ret; +} + +/* mtd resume handler */ +static void spi_nor_resume(struct mtd_info *mtd) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + struct device *dev = nor->dev; + int ret; + + /* re-initialize the nor chip */ + ret = spi_nor_init(nor); + if (ret) + dev_err(dev, "resume() failed\n"); +} + +static int spi_nor_get_device(struct mtd_info *mtd) +{ + struct mtd_info *master = mtd_get_master(mtd); + struct spi_nor *nor = mtd_to_spi_nor(master); + struct device *dev; + + if (nor->spimem) + dev = nor->spimem->spi->controller->dev.parent; + else + dev = nor->dev; + + if (!try_module_get(dev->driver->owner)) + return -ENODEV; + + return 0; +} + +static void spi_nor_put_device(struct mtd_info *mtd) +{ + struct mtd_info *master = mtd_get_master(mtd); + struct spi_nor *nor = mtd_to_spi_nor(master); + struct device *dev; + + if (nor->spimem) + dev = nor->spimem->spi->controller->dev.parent; + else + dev = nor->dev; + + module_put(dev->driver->owner); +} + +static void spi_nor_restore(struct spi_nor *nor) +{ + int ret; + + /* restore the addressing mode */ + if (nor->addr_nbytes == 4 && !(nor->flags & SNOR_F_4B_OPCODES) && + nor->flags & SNOR_F_BROKEN_RESET) { + ret = spi_nor_set_4byte_addr_mode(nor, false); + if (ret) + /* + * Do not stop the execution in the hope that the flash + * will default to the 3-byte address mode after the + * software reset. + */ + dev_err(nor->dev, "Failed to exit 4-byte address mode, err = %d\n", ret); + } + + if (nor->flags & SNOR_F_SOFT_RESET) + spi_nor_soft_reset(nor); +} + +static const struct flash_info *spi_nor_match_name(struct spi_nor *nor, + const char *name) +{ + unsigned int i, j; + + for (i = 0; i < ARRAY_SIZE(manufacturers); i++) { + for (j = 0; j < manufacturers[i]->nparts; j++) { + if (!strcmp(name, manufacturers[i]->parts[j].name)) { + nor->manufacturer = manufacturers[i]; + return &manufacturers[i]->parts[j]; + } + } + } + + return NULL; +} + +static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor, + const char *name) +{ + const struct flash_info *info = NULL; + + if (name) + info = spi_nor_match_name(nor, name); + /* Try to auto-detect if chip name wasn't specified or not found */ + if (!info) + return spi_nor_detect(nor); + + /* + * If caller has specified name of flash model that can normally be + * detected using JEDEC, let's verify it. + */ + if (name && info->id_len) { + const struct flash_info *jinfo; + + jinfo = spi_nor_detect(nor); + if (IS_ERR(jinfo)) { + return jinfo; + } else if (jinfo != info) { + /* + * JEDEC knows better, so overwrite platform ID. We + * can't trust partitions any longer, but we'll let + * mtd apply them anyway, since some partitions may be + * marked read-only, and we don't want to loose that + * information, even if it's not 100% accurate. + */ + dev_warn(nor->dev, "found %s, expected %s\n", + jinfo->name, info->name); + info = jinfo; + } + } + + return info; +} + +static void spi_nor_set_mtd_info(struct spi_nor *nor) +{ + struct mtd_info *mtd = &nor->mtd; + struct device *dev = nor->dev; + + spi_nor_set_mtd_locking_ops(nor); + spi_nor_set_mtd_otp_ops(nor); + + mtd->dev.parent = dev; + if (!mtd->name) + mtd->name = dev_name(dev); + mtd->type = MTD_NORFLASH; + mtd->flags = MTD_CAP_NORFLASH; + /* Unset BIT_WRITEABLE to enable JFFS2 write buffer for ECC'd NOR */ + if (nor->flags & SNOR_F_ECC) + mtd->flags &= ~MTD_BIT_WRITEABLE; + if (nor->info->flags & SPI_NOR_NO_ERASE) + mtd->flags |= MTD_NO_ERASE; + else + mtd->_erase = spi_nor_erase; + mtd->writesize = nor->params->writesize; + mtd->writebufsize = nor->params->page_size; + mtd->size = nor->params->size; + mtd->_read = spi_nor_read; + /* Might be already set by some SST flashes. */ + if (!mtd->_write) + mtd->_write = spi_nor_write; + mtd->_suspend = spi_nor_suspend; + mtd->_resume = spi_nor_resume; + mtd->_get_device = spi_nor_get_device; + mtd->_put_device = spi_nor_put_device; +} + +static int spi_nor_hw_reset(struct spi_nor *nor) +{ + struct gpio_desc *reset; + + reset = devm_gpiod_get_optional(nor->dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR_OR_NULL(reset)) + return PTR_ERR_OR_ZERO(reset); + + /* + * Experimental delay values by looking at different flash device + * vendors datasheets. + */ + usleep_range(1, 5); + gpiod_set_value_cansleep(reset, 1); + usleep_range(100, 150); + gpiod_set_value_cansleep(reset, 0); + usleep_range(1000, 1200); + + return 0; +} + +int spi_nor_scan(struct spi_nor *nor, const char *name, + const struct spi_nor_hwcaps *hwcaps) +{ + const struct flash_info *info; + struct device *dev = nor->dev; + struct mtd_info *mtd = &nor->mtd; + int ret; + int i; + + ret = spi_nor_check(nor); + if (ret) + return ret; + + /* Reset SPI protocol for all commands. */ + nor->reg_proto = SNOR_PROTO_1_1_1; + nor->read_proto = SNOR_PROTO_1_1_1; + nor->write_proto = SNOR_PROTO_1_1_1; + + /* + * We need the bounce buffer early to read/write registers when going + * through the spi-mem layer (buffers have to be DMA-able). + * For spi-mem drivers, we'll reallocate a new buffer if + * nor->params->page_size turns out to be greater than PAGE_SIZE (which + * shouldn't happen before long since NOR pages are usually less + * than 1KB) after spi_nor_scan() returns. + */ + nor->bouncebuf_size = PAGE_SIZE; + nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size, + GFP_KERNEL); + if (!nor->bouncebuf) + return -ENOMEM; + + ret = spi_nor_hw_reset(nor); + if (ret) + return ret; + + info = spi_nor_get_flash_info(nor, name); + if (IS_ERR(info)) + return PTR_ERR(info); + + nor->info = info; + + mutex_init(&nor->lock); + + /* Init flash parameters based on flash_info struct and SFDP */ + ret = spi_nor_init_params(nor); + if (ret) + return ret; + + if (spi_nor_use_parallel_locking(nor)) + init_waitqueue_head(&nor->rww.wait); + + /* + * Configure the SPI memory: + * - select op codes for (Fast) Read, Page Program and Sector Erase. + * - set the number of dummy cycles (mode cycles + wait states). + * - set the SPI protocols for register and memory accesses. + * - set the number of address bytes. + */ + ret = spi_nor_setup(nor, hwcaps); + if (ret) + return ret; + + /* Send all the required SPI flash commands to initialize device */ + ret = spi_nor_init(nor); + if (ret) + return ret; + + /* No mtd_info fields should be used up to this point. */ + spi_nor_set_mtd_info(nor); + + dev_info(dev, "%s (%lld Kbytes)\n", info->name, + (long long)mtd->size >> 10); + + dev_dbg(dev, + "mtd .name = %s, .size = 0x%llx (%lldMiB), " + ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", + mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20), + mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions); + + if (mtd->numeraseregions) + for (i = 0; i < mtd->numeraseregions; i++) + dev_dbg(dev, + "mtd.eraseregions[%d] = { .offset = 0x%llx, " + ".erasesize = 0x%.8x (%uKiB), " + ".numblocks = %d }\n", + i, (long long)mtd->eraseregions[i].offset, + mtd->eraseregions[i].erasesize, + mtd->eraseregions[i].erasesize / 1024, + mtd->eraseregions[i].numblocks); + return 0; +} +EXPORT_SYMBOL_GPL(spi_nor_scan); + +static int spi_nor_create_read_dirmap(struct spi_nor *nor) +{ + struct spi_mem_dirmap_info info = { + .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0), + SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0), + SPI_MEM_OP_DUMMY(nor->read_dummy, 0), + SPI_MEM_OP_DATA_IN(0, NULL, 0)), + .offset = 0, + .length = nor->params->size, + }; + struct spi_mem_op *op = &info.op_tmpl; + + spi_nor_spimem_setup_op(nor, op, nor->read_proto); + + /* convert the dummy cycles to the number of bytes */ + op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8; + if (spi_nor_protocol_is_dtr(nor->read_proto)) + op->dummy.nbytes *= 2; + + /* + * Since spi_nor_spimem_setup_op() only sets buswidth when the number + * of data bytes is non-zero, the data buswidth won't be set here. So, + * do it explicitly. + */ + op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto); + + nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem, + &info); + return PTR_ERR_OR_ZERO(nor->dirmap.rdesc); +} + +static int spi_nor_create_write_dirmap(struct spi_nor *nor) +{ + struct spi_mem_dirmap_info info = { + .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0), + SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0), + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_OUT(0, NULL, 0)), + .offset = 0, + .length = nor->params->size, + }; + struct spi_mem_op *op = &info.op_tmpl; + + if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second) + op->addr.nbytes = 0; + + spi_nor_spimem_setup_op(nor, op, nor->write_proto); + + /* + * Since spi_nor_spimem_setup_op() only sets buswidth when the number + * of data bytes is non-zero, the data buswidth won't be set here. So, + * do it explicitly. + */ + op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto); + + nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem, + &info); + return PTR_ERR_OR_ZERO(nor->dirmap.wdesc); +} + +static int spi_nor_probe(struct spi_mem *spimem) +{ + struct spi_device *spi = spimem->spi; + struct flash_platform_data *data = dev_get_platdata(&spi->dev); + struct spi_nor *nor; + /* + * Enable all caps by default. The core will mask them after + * checking what's really supported using spi_mem_supports_op(). + */ + const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL }; + char *flash_name; + int ret; + + nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL); + if (!nor) + return -ENOMEM; + + nor->spimem = spimem; + nor->dev = &spi->dev; + spi_nor_set_flash_node(nor, spi->dev.of_node); + + spi_mem_set_drvdata(spimem, nor); + + if (data && data->name) + nor->mtd.name = data->name; + + if (!nor->mtd.name) + nor->mtd.name = spi_mem_get_name(spimem); + + /* + * For some (historical?) reason many platforms provide two different + * names in flash_platform_data: "name" and "type". Quite often name is + * set to "m25p80" and then "type" provides a real chip name. + * If that's the case, respect "type" and ignore a "name". + */ + if (data && data->type) + flash_name = data->type; + else if (!strcmp(spi->modalias, "spi-nor")) + flash_name = NULL; /* auto-detect */ + else + flash_name = spi->modalias; + + ret = spi_nor_scan(nor, flash_name, &hwcaps); + if (ret) + return ret; + + spi_nor_debugfs_register(nor); + + /* + * None of the existing parts have > 512B pages, but let's play safe + * and add this logic so that if anyone ever adds support for such + * a NOR we don't end up with buffer overflows. + */ + if (nor->params->page_size > PAGE_SIZE) { + nor->bouncebuf_size = nor->params->page_size; + devm_kfree(nor->dev, nor->bouncebuf); + nor->bouncebuf = devm_kmalloc(nor->dev, + nor->bouncebuf_size, + GFP_KERNEL); + if (!nor->bouncebuf) + return -ENOMEM; + } + + ret = spi_nor_create_read_dirmap(nor); + if (ret) + return ret; + + ret = spi_nor_create_write_dirmap(nor); + if (ret) + return ret; + + return mtd_device_register(&nor->mtd, data ? data->parts : NULL, + data ? data->nr_parts : 0); +} + +static int spi_nor_remove(struct spi_mem *spimem) +{ + struct spi_nor *nor = spi_mem_get_drvdata(spimem); + + spi_nor_restore(nor); + + /* Clean up MTD stuff. */ + return mtd_device_unregister(&nor->mtd); +} + +static void spi_nor_shutdown(struct spi_mem *spimem) +{ + struct spi_nor *nor = spi_mem_get_drvdata(spimem); + + spi_nor_restore(nor); +} + +/* + * Do NOT add to this array without reading the following: + * + * Historically, many flash devices are bound to this driver by their name. But + * since most of these flash are compatible to some extent, and their + * differences can often be differentiated by the JEDEC read-ID command, we + * encourage new users to add support to the spi-nor library, and simply bind + * against a generic string here (e.g., "jedec,spi-nor"). + * + * Many flash names are kept here in this list to keep them available + * as module aliases for existing platforms. + */ +static const struct spi_device_id spi_nor_dev_ids[] = { + /* + * Allow non-DT platform devices to bind to the "spi-nor" modalias, and + * hack around the fact that the SPI core does not provide uevent + * matching for .of_match_table + */ + {"spi-nor"}, + + /* + * Entries not used in DTs that should be safe to drop after replacing + * them with "spi-nor" in platform data. + */ + {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"}, + + /* + * Entries that were used in DTs without "jedec,spi-nor" fallback and + * should be kept for backward compatibility. + */ + {"at25df321a"}, {"at25df641"}, {"at26df081a"}, + {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"}, + {"mx25l25635e"},{"mx66l51235l"}, + {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"}, + {"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"}, + {"s25fl064k"}, + {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"}, + {"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"}, + {"m25p64"}, {"m25p128"}, + {"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"}, + {"w25q80bl"}, {"w25q128"}, {"w25q256"}, + + /* Flashes that can't be detected using JEDEC */ + {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"}, + {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"}, + {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"}, + + /* Everspin MRAMs (non-JEDEC) */ + { "mr25h128" }, /* 128 Kib, 40 MHz */ + { "mr25h256" }, /* 256 Kib, 40 MHz */ + { "mr25h10" }, /* 1 Mib, 40 MHz */ + { "mr25h40" }, /* 4 Mib, 40 MHz */ + + { }, +}; +MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids); + +static const struct of_device_id spi_nor_of_table[] = { + /* + * Generic compatibility for SPI NOR that can be identified by the + * JEDEC READ ID opcode (0x9F). Use this, if possible. + */ + { .compatible = "jedec,spi-nor" }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, spi_nor_of_table); + +/* + * REVISIT: many of these chips have deep power-down modes, which + * should clearly be entered on suspend() to minimize power use. + * And also when they're otherwise idle... + */ +static struct spi_mem_driver spi_nor_driver = { + .spidrv = { + .driver = { + .name = "spi-nor", + .of_match_table = spi_nor_of_table, + .dev_groups = spi_nor_sysfs_groups, + }, + .id_table = spi_nor_dev_ids, + }, + .probe = spi_nor_probe, + .remove = spi_nor_remove, + .shutdown = spi_nor_shutdown, +}; + +static int __init spi_nor_module_init(void) +{ + return spi_mem_driver_register(&spi_nor_driver); +} +module_init(spi_nor_module_init); + +static void __exit spi_nor_module_exit(void) +{ + spi_mem_driver_unregister(&spi_nor_driver); + spi_nor_debugfs_shutdown(); +} +module_exit(spi_nor_module_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>"); +MODULE_AUTHOR("Mike Lavender"); +MODULE_DESCRIPTION("framework for SPI NOR"); diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h new file mode 100644 index 0000000000..9217379b9c --- /dev/null +++ b/drivers/mtd/spi-nor/core.h @@ -0,0 +1,745 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#ifndef __LINUX_MTD_SPI_NOR_INTERNAL_H +#define __LINUX_MTD_SPI_NOR_INTERNAL_H + +#include "sfdp.h" + +#define SPI_NOR_MAX_ID_LEN 6 + +/* Standard SPI NOR flash operations. */ +#define SPI_NOR_READID_OP(naddr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 0), \ + SPI_MEM_OP_ADDR(naddr, 0, 0), \ + SPI_MEM_OP_DUMMY(ndummy, 0), \ + SPI_MEM_OP_DATA_IN(len, buf, 0)) + +#define SPI_NOR_WREN_OP \ + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 0), \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_NO_DATA) + +#define SPI_NOR_WRDI_OP \ + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 0), \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_NO_DATA) + +#define SPI_NOR_RDSR_OP(buf) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 0), \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_DATA_IN(1, buf, 0)) + +#define SPI_NOR_WRSR_OP(buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 0), \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_DATA_OUT(len, buf, 0)) + +#define SPI_NOR_RDSR2_OP(buf) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 0), \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_DATA_OUT(1, buf, 0)) + +#define SPI_NOR_WRSR2_OP(buf) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 0), \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_DATA_OUT(1, buf, 0)) + +#define SPI_NOR_RDCR_OP(buf) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 0), \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_DATA_IN(1, buf, 0)) + +#define SPI_NOR_EN4B_EX4B_OP(enable) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B, 0), \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_NO_DATA) + +#define SPI_NOR_BRWR_OP(buf) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 0), \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_DATA_OUT(1, buf, 0)) + +#define SPI_NOR_GBULK_OP \ + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_GBULK, 0), \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_NO_DATA) + +#define SPI_NOR_CHIP_ERASE_OP \ + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 0), \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_NO_DATA) + +#define SPI_NOR_SECTOR_ERASE_OP(opcode, addr_nbytes, addr) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 0), \ + SPI_MEM_OP_ADDR(addr_nbytes, addr, 0), \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_NO_DATA) + +#define SPI_NOR_READ_OP(opcode) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 0), \ + SPI_MEM_OP_ADDR(3, 0, 0), \ + SPI_MEM_OP_DUMMY(1, 0), \ + SPI_MEM_OP_DATA_IN(2, NULL, 0)) + +#define SPI_NOR_PP_OP(opcode) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 0), \ + SPI_MEM_OP_ADDR(3, 0, 0), \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_DATA_OUT(2, NULL, 0)) + +#define SPINOR_SRSTEN_OP \ + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRSTEN, 0), \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_NO_DATA) + +#define SPINOR_SRST_OP \ + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRST, 0), \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_NO_DATA) + +/* Keep these in sync with the list in debugfs.c */ +enum spi_nor_option_flags { + SNOR_F_HAS_SR_TB = BIT(0), + SNOR_F_NO_OP_CHIP_ERASE = BIT(1), + SNOR_F_BROKEN_RESET = BIT(2), + SNOR_F_4B_OPCODES = BIT(3), + SNOR_F_HAS_4BAIT = BIT(4), + SNOR_F_HAS_LOCK = BIT(5), + SNOR_F_HAS_16BIT_SR = BIT(6), + SNOR_F_NO_READ_CR = BIT(7), + SNOR_F_HAS_SR_TB_BIT6 = BIT(8), + SNOR_F_HAS_4BIT_BP = BIT(9), + SNOR_F_HAS_SR_BP3_BIT6 = BIT(10), + SNOR_F_IO_MODE_EN_VOLATILE = BIT(11), + SNOR_F_SOFT_RESET = BIT(12), + SNOR_F_SWP_IS_VOLATILE = BIT(13), + SNOR_F_RWW = BIT(14), + SNOR_F_ECC = BIT(15), + SNOR_F_NO_WP = BIT(16), +}; + +struct spi_nor_read_command { + u8 num_mode_clocks; + u8 num_wait_states; + u8 opcode; + enum spi_nor_protocol proto; +}; + +struct spi_nor_pp_command { + u8 opcode; + enum spi_nor_protocol proto; +}; + +enum spi_nor_read_command_index { + SNOR_CMD_READ, + SNOR_CMD_READ_FAST, + SNOR_CMD_READ_1_1_1_DTR, + + /* Dual SPI */ + SNOR_CMD_READ_1_1_2, + SNOR_CMD_READ_1_2_2, + SNOR_CMD_READ_2_2_2, + SNOR_CMD_READ_1_2_2_DTR, + + /* Quad SPI */ + SNOR_CMD_READ_1_1_4, + SNOR_CMD_READ_1_4_4, + SNOR_CMD_READ_4_4_4, + SNOR_CMD_READ_1_4_4_DTR, + + /* Octal SPI */ + SNOR_CMD_READ_1_1_8, + SNOR_CMD_READ_1_8_8, + SNOR_CMD_READ_8_8_8, + SNOR_CMD_READ_1_8_8_DTR, + SNOR_CMD_READ_8_8_8_DTR, + + SNOR_CMD_READ_MAX +}; + +enum spi_nor_pp_command_index { + SNOR_CMD_PP, + + /* Quad SPI */ + SNOR_CMD_PP_1_1_4, + SNOR_CMD_PP_1_4_4, + SNOR_CMD_PP_4_4_4, + + /* Octal SPI */ + SNOR_CMD_PP_1_1_8, + SNOR_CMD_PP_1_8_8, + SNOR_CMD_PP_8_8_8, + SNOR_CMD_PP_8_8_8_DTR, + + SNOR_CMD_PP_MAX +}; + +/** + * struct spi_nor_erase_type - Structure to describe a SPI NOR erase type + * @size: the size of the sector/block erased by the erase type. + * JEDEC JESD216B imposes erase sizes to be a power of 2. + * @size_shift: @size is a power of 2, the shift is stored in + * @size_shift. + * @size_mask: the size mask based on @size_shift. + * @opcode: the SPI command op code to erase the sector/block. + * @idx: Erase Type index as sorted in the Basic Flash Parameter + * Table. It will be used to synchronize the supported + * Erase Types with the ones identified in the SFDP + * optional tables. + */ +struct spi_nor_erase_type { + u32 size; + u32 size_shift; + u32 size_mask; + u8 opcode; + u8 idx; +}; + +/** + * struct spi_nor_erase_command - Used for non-uniform erases + * The structure is used to describe a list of erase commands to be executed + * once we validate that the erase can be performed. The elements in the list + * are run-length encoded. + * @list: for inclusion into the list of erase commands. + * @count: how many times the same erase command should be + * consecutively used. + * @size: the size of the sector/block erased by the command. + * @opcode: the SPI command op code to erase the sector/block. + */ +struct spi_nor_erase_command { + struct list_head list; + u32 count; + u32 size; + u8 opcode; +}; + +/** + * struct spi_nor_erase_region - Structure to describe a SPI NOR erase region + * @offset: the offset in the data array of erase region start. + * LSB bits are used as a bitmask encoding flags to + * determine if this region is overlaid, if this region is + * the last in the SPI NOR flash memory and to indicate + * all the supported erase commands inside this region. + * The erase types are sorted in ascending order with the + * smallest Erase Type size being at BIT(0). + * @size: the size of the region in bytes. + */ +struct spi_nor_erase_region { + u64 offset; + u64 size; +}; + +#define SNOR_ERASE_TYPE_MAX 4 +#define SNOR_ERASE_TYPE_MASK GENMASK_ULL(SNOR_ERASE_TYPE_MAX - 1, 0) + +#define SNOR_LAST_REGION BIT(4) +#define SNOR_OVERLAID_REGION BIT(5) + +#define SNOR_ERASE_FLAGS_MAX 6 +#define SNOR_ERASE_FLAGS_MASK GENMASK_ULL(SNOR_ERASE_FLAGS_MAX - 1, 0) + +/** + * struct spi_nor_erase_map - Structure to describe the SPI NOR erase map + * @regions: array of erase regions. The regions are consecutive in + * address space. Walking through the regions is done + * incrementally. + * @uniform_region: a pre-allocated erase region for SPI NOR with a uniform + * sector size (legacy implementation). + * @erase_type: an array of erase types shared by all the regions. + * The erase types are sorted in ascending order, with the + * smallest Erase Type size being the first member in the + * erase_type array. + * @uniform_erase_type: bitmask encoding erase types that can erase the + * entire memory. This member is completed at init by + * uniform and non-uniform SPI NOR flash memories if they + * support at least one erase type that can erase the + * entire memory. + */ +struct spi_nor_erase_map { + struct spi_nor_erase_region *regions; + struct spi_nor_erase_region uniform_region; + struct spi_nor_erase_type erase_type[SNOR_ERASE_TYPE_MAX]; + u8 uniform_erase_type; +}; + +/** + * struct spi_nor_locking_ops - SPI NOR locking methods + * @lock: lock a region of the SPI NOR. + * @unlock: unlock a region of the SPI NOR. + * @is_locked: check if a region of the SPI NOR is completely locked + */ +struct spi_nor_locking_ops { + int (*lock)(struct spi_nor *nor, loff_t ofs, uint64_t len); + int (*unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len); + int (*is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len); +}; + +/** + * struct spi_nor_otp_organization - Structure to describe the SPI NOR OTP regions + * @len: size of one OTP region in bytes. + * @base: start address of the OTP area. + * @offset: offset between consecutive OTP regions if there are more + * than one. + * @n_regions: number of individual OTP regions. + */ +struct spi_nor_otp_organization { + size_t len; + loff_t base; + loff_t offset; + unsigned int n_regions; +}; + +/** + * struct spi_nor_otp_ops - SPI NOR OTP methods + * @read: read from the SPI NOR OTP area. + * @write: write to the SPI NOR OTP area. + * @lock: lock an OTP region. + * @erase: erase an OTP region. + * @is_locked: check if an OTP region of the SPI NOR is locked. + */ +struct spi_nor_otp_ops { + int (*read)(struct spi_nor *nor, loff_t addr, size_t len, u8 *buf); + int (*write)(struct spi_nor *nor, loff_t addr, size_t len, + const u8 *buf); + int (*lock)(struct spi_nor *nor, unsigned int region); + int (*erase)(struct spi_nor *nor, loff_t addr); + int (*is_locked)(struct spi_nor *nor, unsigned int region); +}; + +/** + * struct spi_nor_otp - SPI NOR OTP grouping structure + * @org: OTP region organization + * @ops: OTP access ops + */ +struct spi_nor_otp { + const struct spi_nor_otp_organization *org; + const struct spi_nor_otp_ops *ops; +}; + +/** + * struct spi_nor_flash_parameter - SPI NOR flash parameters and settings. + * Includes legacy flash parameters and settings that can be overwritten + * by the spi_nor_fixups hooks, or dynamically when parsing the JESD216 + * Serial Flash Discoverable Parameters (SFDP) tables. + * + * @bank_size: the flash memory bank density in bytes. + * @size: the total flash memory density in bytes. + * @writesize Minimal writable flash unit size. Defaults to 1. Set to + * ECC unit size for ECC-ed flashes. + * @page_size: the page size of the SPI NOR flash memory. + * @addr_nbytes: number of address bytes to send. + * @addr_mode_nbytes: number of address bytes of current address mode. Useful + * when the flash operates with 4B opcodes but needs the + * internal address mode for opcodes that don't have a 4B + * opcode correspondent. + * @rdsr_dummy: dummy cycles needed for Read Status Register command + * in octal DTR mode. + * @rdsr_addr_nbytes: dummy address bytes needed for Read Status Register + * command in octal DTR mode. + * @n_dice: number of dice in the flash memory. + * @vreg_offset: volatile register offset for each die. + * @hwcaps: describes the read and page program hardware + * capabilities. + * @reads: read capabilities ordered by priority: the higher index + * in the array, the higher priority. + * @page_programs: page program capabilities ordered by priority: the + * higher index in the array, the higher priority. + * @erase_map: the erase map parsed from the SFDP Sector Map Parameter + * Table. + * @otp: SPI NOR OTP info. + * @set_octal_dtr: enables or disables SPI NOR octal DTR mode. + * @quad_enable: enables SPI NOR quad mode. + * @set_4byte_addr_mode: puts the SPI NOR in 4 byte addressing mode. + * @convert_addr: converts an absolute address into something the flash + * will understand. Particularly useful when pagesize is + * not a power-of-2. + * @setup: (optional) configures the SPI NOR memory. Useful for + * SPI NOR flashes that have peculiarities to the SPI NOR + * standard e.g. different opcodes, specific address + * calculation, page size, etc. + * @ready: (optional) flashes might use a different mechanism + * than reading the status register to indicate they + * are ready for a new command + * @locking_ops: SPI NOR locking methods. + * @priv: flash's private data. + */ +struct spi_nor_flash_parameter { + u64 bank_size; + u64 size; + u32 writesize; + u32 page_size; + u8 addr_nbytes; + u8 addr_mode_nbytes; + u8 rdsr_dummy; + u8 rdsr_addr_nbytes; + u8 n_dice; + u32 *vreg_offset; + + struct spi_nor_hwcaps hwcaps; + struct spi_nor_read_command reads[SNOR_CMD_READ_MAX]; + struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX]; + + struct spi_nor_erase_map erase_map; + struct spi_nor_otp otp; + + int (*set_octal_dtr)(struct spi_nor *nor, bool enable); + int (*quad_enable)(struct spi_nor *nor); + int (*set_4byte_addr_mode)(struct spi_nor *nor, bool enable); + u32 (*convert_addr)(struct spi_nor *nor, u32 addr); + int (*setup)(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps); + int (*ready)(struct spi_nor *nor); + + const struct spi_nor_locking_ops *locking_ops; + void *priv; +}; + +/** + * struct spi_nor_fixups - SPI NOR fixup hooks + * @default_init: called after default flash parameters init. Used to tweak + * flash parameters when information provided by the flash_info + * table is incomplete or wrong. + * @post_bfpt: called after the BFPT table has been parsed + * @post_sfdp: called after SFDP has been parsed (is also called for SPI NORs + * that do not support RDSFDP). Typically used to tweak various + * parameters that could not be extracted by other means (i.e. + * when information provided by the SFDP/flash_info tables are + * incomplete or wrong). + * @late_init: used to initialize flash parameters that are not declared in the + * JESD216 SFDP standard, or where SFDP tables not defined at all. + * Will replace the default_init() hook. + * + * Those hooks can be used to tweak the SPI NOR configuration when the SFDP + * table is broken or not available. + */ +struct spi_nor_fixups { + void (*default_init)(struct spi_nor *nor); + int (*post_bfpt)(struct spi_nor *nor, + const struct sfdp_parameter_header *bfpt_header, + const struct sfdp_bfpt *bfpt); + int (*post_sfdp)(struct spi_nor *nor); + int (*late_init)(struct spi_nor *nor); +}; + +/** + * struct flash_info - SPI NOR flash_info entry. + * @name: the name of the flash. + * @id: the flash's ID bytes. The first three bytes are the + * JEDIC ID. JEDEC ID zero means "no ID" (mostly older chips). + * @id_len: the number of bytes of ID. + * @sector_size: the size listed here is what works with SPINOR_OP_SE, which + * isn't necessarily called a "sector" by the vendor. + * @n_sectors: the number of sectors. + * @n_banks: the number of banks. + * @page_size: the flash's page size. + * @addr_nbytes: number of address bytes to send. + * + * @parse_sfdp: true when flash supports SFDP tables. The false value has no + * meaning. If one wants to skip the SFDP tables, one should + * instead use the SPI_NOR_SKIP_SFDP sfdp_flag. + * @flags: flags that indicate support that is not defined by the + * JESD216 standard in its SFDP tables. Flag meanings: + * SPI_NOR_HAS_LOCK: flash supports lock/unlock via SR + * SPI_NOR_HAS_TB: flash SR has Top/Bottom (TB) protect bit. Must be + * used with SPI_NOR_HAS_LOCK. + * SPI_NOR_TB_SR_BIT6: Top/Bottom (TB) is bit 6 of status register. + * Must be used with SPI_NOR_HAS_TB. + * SPI_NOR_4BIT_BP: flash SR has 4 bit fields (BP0-3) for block + * protection. + * SPI_NOR_BP3_SR_BIT6: BP3 is bit 6 of status register. Must be used with + * SPI_NOR_4BIT_BP. + * SPI_NOR_SWP_IS_VOLATILE: flash has volatile software write protection bits. + * Usually these will power-up in a write-protected + * state. + * SPI_NOR_NO_ERASE: no erase command needed. + * NO_CHIP_ERASE: chip does not support chip erase. + * SPI_NOR_NO_FR: can't do fastread. + * SPI_NOR_QUAD_PP: flash supports Quad Input Page Program. + * SPI_NOR_RWW: flash supports reads while write. + * + * @no_sfdp_flags: flags that indicate support that can be discovered via SFDP. + * Used when SFDP tables are not defined in the flash. These + * flags are used together with the SPI_NOR_SKIP_SFDP flag. + * SPI_NOR_SKIP_SFDP: skip parsing of SFDP tables. + * SECT_4K: SPINOR_OP_BE_4K works uniformly. + * SPI_NOR_DUAL_READ: flash supports Dual Read. + * SPI_NOR_QUAD_READ: flash supports Quad Read. + * SPI_NOR_OCTAL_READ: flash supports Octal Read. + * SPI_NOR_OCTAL_DTR_READ: flash supports octal DTR Read. + * SPI_NOR_OCTAL_DTR_PP: flash supports Octal DTR Page Program. + * + * @fixup_flags: flags that indicate support that can be discovered via SFDP + * ideally, but can not be discovered for this particular flash + * because the SFDP table that indicates this support is not + * defined by the flash. In case the table for this support is + * defined but has wrong values, one should instead use a + * post_sfdp() hook to set the SNOR_F equivalent flag. + * + * SPI_NOR_4B_OPCODES: use dedicated 4byte address op codes to support + * memory size above 128Mib. + * SPI_NOR_IO_MODE_EN_VOLATILE: flash enables the best available I/O mode + * via a volatile bit. + * @mfr_flags: manufacturer private flags. Used in the manufacturer fixup + * hooks to differentiate support between flashes of the same + * manufacturer. + * @otp_org: flash's OTP organization. + * @fixups: part specific fixup hooks. + */ +struct flash_info { + char *name; + u8 id[SPI_NOR_MAX_ID_LEN]; + u8 id_len; + unsigned sector_size; + u16 n_sectors; + u16 page_size; + u8 n_banks; + u8 addr_nbytes; + + bool parse_sfdp; + u16 flags; +#define SPI_NOR_HAS_LOCK BIT(0) +#define SPI_NOR_HAS_TB BIT(1) +#define SPI_NOR_TB_SR_BIT6 BIT(2) +#define SPI_NOR_4BIT_BP BIT(3) +#define SPI_NOR_BP3_SR_BIT6 BIT(4) +#define SPI_NOR_SWP_IS_VOLATILE BIT(5) +#define SPI_NOR_NO_ERASE BIT(6) +#define NO_CHIP_ERASE BIT(7) +#define SPI_NOR_NO_FR BIT(8) +#define SPI_NOR_QUAD_PP BIT(9) +#define SPI_NOR_RWW BIT(10) + + u8 no_sfdp_flags; +#define SPI_NOR_SKIP_SFDP BIT(0) +#define SECT_4K BIT(1) +#define SPI_NOR_DUAL_READ BIT(3) +#define SPI_NOR_QUAD_READ BIT(4) +#define SPI_NOR_OCTAL_READ BIT(5) +#define SPI_NOR_OCTAL_DTR_READ BIT(6) +#define SPI_NOR_OCTAL_DTR_PP BIT(7) + + u8 fixup_flags; +#define SPI_NOR_4B_OPCODES BIT(0) +#define SPI_NOR_IO_MODE_EN_VOLATILE BIT(1) + + u8 mfr_flags; + + const struct spi_nor_otp_organization otp_org; + const struct spi_nor_fixups *fixups; +}; + +#define SPI_NOR_ID_2ITEMS(_id) ((_id) >> 8) & 0xff, (_id) & 0xff +#define SPI_NOR_ID_3ITEMS(_id) ((_id) >> 16) & 0xff, SPI_NOR_ID_2ITEMS(_id) + +#define SPI_NOR_ID(_jedec_id, _ext_id) \ + .id = { SPI_NOR_ID_3ITEMS(_jedec_id), SPI_NOR_ID_2ITEMS(_ext_id) }, \ + .id_len = !(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0)) + +#define SPI_NOR_ID6(_jedec_id, _ext_id) \ + .id = { SPI_NOR_ID_3ITEMS(_jedec_id), SPI_NOR_ID_3ITEMS(_ext_id) }, \ + .id_len = 6 + +#define SPI_NOR_GEOMETRY(_sector_size, _n_sectors, _n_banks) \ + .sector_size = (_sector_size), \ + .n_sectors = (_n_sectors), \ + .page_size = 256, \ + .n_banks = (_n_banks) + +/* Used when the "_ext_id" is two bytes at most */ +#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors) \ + SPI_NOR_ID((_jedec_id), (_ext_id)), \ + SPI_NOR_GEOMETRY((_sector_size), (_n_sectors), 1), + +#define INFOB(_jedec_id, _ext_id, _sector_size, _n_sectors, _n_banks) \ + SPI_NOR_ID((_jedec_id), (_ext_id)), \ + SPI_NOR_GEOMETRY((_sector_size), (_n_sectors), (_n_banks)), + +#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors) \ + SPI_NOR_ID6((_jedec_id), (_ext_id)), \ + SPI_NOR_GEOMETRY((_sector_size), (_n_sectors), 1), + +#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_nbytes) \ + .sector_size = (_sector_size), \ + .n_sectors = (_n_sectors), \ + .page_size = (_page_size), \ + .n_banks = 1, \ + .addr_nbytes = (_addr_nbytes), \ + .flags = SPI_NOR_NO_ERASE | SPI_NOR_NO_FR, \ + +#define OTP_INFO(_len, _n_regions, _base, _offset) \ + .otp_org = { \ + .len = (_len), \ + .base = (_base), \ + .offset = (_offset), \ + .n_regions = (_n_regions), \ + }, + +#define PARSE_SFDP \ + .parse_sfdp = true, \ + +#define FLAGS(_flags) \ + .flags = (_flags), \ + +#define NO_SFDP_FLAGS(_no_sfdp_flags) \ + .no_sfdp_flags = (_no_sfdp_flags), \ + +#define FIXUP_FLAGS(_fixup_flags) \ + .fixup_flags = (_fixup_flags), \ + +#define MFR_FLAGS(_mfr_flags) \ + .mfr_flags = (_mfr_flags), \ + +/** + * struct spi_nor_manufacturer - SPI NOR manufacturer object + * @name: manufacturer name + * @parts: array of parts supported by this manufacturer + * @nparts: number of entries in the parts array + * @fixups: hooks called at various points in time during spi_nor_scan() + */ +struct spi_nor_manufacturer { + const char *name; + const struct flash_info *parts; + unsigned int nparts; + const struct spi_nor_fixups *fixups; +}; + +/** + * struct sfdp - SFDP data + * @num_dwords: number of entries in the dwords array + * @dwords: array of double words of the SFDP data + */ +struct sfdp { + size_t num_dwords; + u32 *dwords; +}; + +/* Manufacturer drivers. */ +extern const struct spi_nor_manufacturer spi_nor_atmel; +extern const struct spi_nor_manufacturer spi_nor_catalyst; +extern const struct spi_nor_manufacturer spi_nor_eon; +extern const struct spi_nor_manufacturer spi_nor_esmt; +extern const struct spi_nor_manufacturer spi_nor_everspin; +extern const struct spi_nor_manufacturer spi_nor_fujitsu; +extern const struct spi_nor_manufacturer spi_nor_gigadevice; +extern const struct spi_nor_manufacturer spi_nor_intel; +extern const struct spi_nor_manufacturer spi_nor_issi; +extern const struct spi_nor_manufacturer spi_nor_macronix; +extern const struct spi_nor_manufacturer spi_nor_micron; +extern const struct spi_nor_manufacturer spi_nor_st; +extern const struct spi_nor_manufacturer spi_nor_spansion; +extern const struct spi_nor_manufacturer spi_nor_sst; +extern const struct spi_nor_manufacturer spi_nor_winbond; +extern const struct spi_nor_manufacturer spi_nor_xilinx; +extern const struct spi_nor_manufacturer spi_nor_xmc; + +extern const struct attribute_group *spi_nor_sysfs_groups[]; + +void spi_nor_spimem_setup_op(const struct spi_nor *nor, + struct spi_mem_op *op, + const enum spi_nor_protocol proto); +int spi_nor_write_enable(struct spi_nor *nor); +int spi_nor_write_disable(struct spi_nor *nor); +int spi_nor_set_4byte_addr_mode_en4b_ex4b(struct spi_nor *nor, bool enable); +int spi_nor_set_4byte_addr_mode_wren_en4b_ex4b(struct spi_nor *nor, + bool enable); +int spi_nor_set_4byte_addr_mode_brwr(struct spi_nor *nor, bool enable); +int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable); +int spi_nor_wait_till_ready(struct spi_nor *nor); +int spi_nor_global_block_unlock(struct spi_nor *nor); +int spi_nor_prep_and_lock(struct spi_nor *nor); +void spi_nor_unlock_and_unprep(struct spi_nor *nor); +int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor); +int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor); +int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor); +int spi_nor_read_id(struct spi_nor *nor, u8 naddr, u8 ndummy, u8 *id, + enum spi_nor_protocol reg_proto); +int spi_nor_read_sr(struct spi_nor *nor, u8 *sr); +int spi_nor_sr_ready(struct spi_nor *nor); +int spi_nor_read_cr(struct spi_nor *nor, u8 *cr); +int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len); +int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1); +int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr); + +ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, + u8 *buf); +ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len, + const u8 *buf); +int spi_nor_read_any_reg(struct spi_nor *nor, struct spi_mem_op *op, + enum spi_nor_protocol proto); +int spi_nor_write_any_volatile_reg(struct spi_nor *nor, struct spi_mem_op *op, + enum spi_nor_protocol proto); +int spi_nor_erase_sector(struct spi_nor *nor, u32 addr); + +int spi_nor_otp_read_secr(struct spi_nor *nor, loff_t addr, size_t len, u8 *buf); +int spi_nor_otp_write_secr(struct spi_nor *nor, loff_t addr, size_t len, + const u8 *buf); +int spi_nor_otp_erase_secr(struct spi_nor *nor, loff_t addr); +int spi_nor_otp_lock_sr2(struct spi_nor *nor, unsigned int region); +int spi_nor_otp_is_locked_sr2(struct spi_nor *nor, unsigned int region); + +int spi_nor_hwcaps_read2cmd(u32 hwcaps); +int spi_nor_hwcaps_pp2cmd(u32 hwcaps); +u8 spi_nor_convert_3to4_read(u8 opcode); +void spi_nor_set_read_settings(struct spi_nor_read_command *read, + u8 num_mode_clocks, + u8 num_wait_states, + u8 opcode, + enum spi_nor_protocol proto); +void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode, + enum spi_nor_protocol proto); + +void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size, + u8 opcode); +void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase); +struct spi_nor_erase_region * +spi_nor_region_next(struct spi_nor_erase_region *region); +void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map, + u8 erase_mask, u64 flash_size); + +int spi_nor_post_bfpt_fixups(struct spi_nor *nor, + const struct sfdp_parameter_header *bfpt_header, + const struct sfdp_bfpt *bfpt); + +void spi_nor_init_default_locking_ops(struct spi_nor *nor); +void spi_nor_try_unlock_all(struct spi_nor *nor); +void spi_nor_set_mtd_locking_ops(struct spi_nor *nor); +void spi_nor_set_mtd_otp_ops(struct spi_nor *nor); + +int spi_nor_controller_ops_read_reg(struct spi_nor *nor, u8 opcode, + u8 *buf, size_t len); +int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode, + const u8 *buf, size_t len); + +int spi_nor_check_sfdp_signature(struct spi_nor *nor); +int spi_nor_parse_sfdp(struct spi_nor *nor); + +static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd) +{ + return container_of(mtd, struct spi_nor, mtd); +} + +#ifdef CONFIG_DEBUG_FS +void spi_nor_debugfs_register(struct spi_nor *nor); +void spi_nor_debugfs_shutdown(void); +#else +static inline void spi_nor_debugfs_register(struct spi_nor *nor) {} +static inline void spi_nor_debugfs_shutdown(void) {} +#endif + +#endif /* __LINUX_MTD_SPI_NOR_INTERNAL_H */ diff --git a/drivers/mtd/spi-nor/debugfs.c b/drivers/mtd/spi-nor/debugfs.c new file mode 100644 index 0000000000..6e163cb5b4 --- /dev/null +++ b/drivers/mtd/spi-nor/debugfs.c @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/debugfs.h> +#include <linux/mtd/spi-nor.h> +#include <linux/spi/spi.h> +#include <linux/spi/spi-mem.h> + +#include "core.h" + +#define SPI_NOR_DEBUGFS_ROOT "spi-nor" + +#define SNOR_F_NAME(name) [ilog2(SNOR_F_##name)] = #name +static const char *const snor_f_names[] = { + SNOR_F_NAME(HAS_SR_TB), + SNOR_F_NAME(NO_OP_CHIP_ERASE), + SNOR_F_NAME(BROKEN_RESET), + SNOR_F_NAME(4B_OPCODES), + SNOR_F_NAME(HAS_4BAIT), + SNOR_F_NAME(HAS_LOCK), + SNOR_F_NAME(HAS_16BIT_SR), + SNOR_F_NAME(NO_READ_CR), + SNOR_F_NAME(HAS_SR_TB_BIT6), + SNOR_F_NAME(HAS_4BIT_BP), + SNOR_F_NAME(HAS_SR_BP3_BIT6), + SNOR_F_NAME(IO_MODE_EN_VOLATILE), + SNOR_F_NAME(SOFT_RESET), + SNOR_F_NAME(SWP_IS_VOLATILE), + SNOR_F_NAME(RWW), + SNOR_F_NAME(ECC), + SNOR_F_NAME(NO_WP), +}; +#undef SNOR_F_NAME + +static const char *spi_nor_protocol_name(enum spi_nor_protocol proto) +{ + switch (proto) { + case SNOR_PROTO_1_1_1: return "1S-1S-1S"; + case SNOR_PROTO_1_1_2: return "1S-1S-2S"; + case SNOR_PROTO_1_1_4: return "1S-1S-4S"; + case SNOR_PROTO_1_1_8: return "1S-1S-8S"; + case SNOR_PROTO_1_2_2: return "1S-2S-2S"; + case SNOR_PROTO_1_4_4: return "1S-4S-4S"; + case SNOR_PROTO_1_8_8: return "1S-8S-8S"; + case SNOR_PROTO_2_2_2: return "2S-2S-2S"; + case SNOR_PROTO_4_4_4: return "4S-4S-4S"; + case SNOR_PROTO_8_8_8: return "8S-8S-8S"; + case SNOR_PROTO_1_1_1_DTR: return "1D-1D-1D"; + case SNOR_PROTO_1_2_2_DTR: return "1D-2D-2D"; + case SNOR_PROTO_1_4_4_DTR: return "1D-4D-4D"; + case SNOR_PROTO_1_8_8_DTR: return "1D-8D-8D"; + case SNOR_PROTO_8_8_8_DTR: return "8D-8D-8D"; + } + + return "<unknown>"; +} + +static void spi_nor_print_flags(struct seq_file *s, unsigned long flags, + const char *const *names, int names_len) +{ + bool sep = false; + int i; + + for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) { + if (!(flags & BIT(i))) + continue; + if (sep) + seq_puts(s, " | "); + sep = true; + if (i < names_len && names[i]) + seq_puts(s, names[i]); + else + seq_printf(s, "1<<%d", i); + } +} + +static int spi_nor_params_show(struct seq_file *s, void *data) +{ + struct spi_nor *nor = s->private; + struct spi_nor_flash_parameter *params = nor->params; + struct spi_nor_erase_map *erase_map = ¶ms->erase_map; + struct spi_nor_erase_region *region; + const struct flash_info *info = nor->info; + char buf[16], *str; + int i; + + seq_printf(s, "name\t\t%s\n", info->name); + seq_printf(s, "id\t\t%*ph\n", SPI_NOR_MAX_ID_LEN, nor->id); + string_get_size(params->size, 1, STRING_UNITS_2, buf, sizeof(buf)); + seq_printf(s, "size\t\t%s\n", buf); + seq_printf(s, "write size\t%u\n", params->writesize); + seq_printf(s, "page size\t%u\n", params->page_size); + seq_printf(s, "address nbytes\t%u\n", nor->addr_nbytes); + + seq_puts(s, "flags\t\t"); + spi_nor_print_flags(s, nor->flags, snor_f_names, sizeof(snor_f_names)); + seq_puts(s, "\n"); + + seq_puts(s, "\nopcodes\n"); + seq_printf(s, " read\t\t0x%02x\n", nor->read_opcode); + seq_printf(s, " dummy cycles\t%u\n", nor->read_dummy); + seq_printf(s, " erase\t\t0x%02x\n", nor->erase_opcode); + seq_printf(s, " program\t0x%02x\n", nor->program_opcode); + + switch (nor->cmd_ext_type) { + case SPI_NOR_EXT_NONE: + str = "none"; + break; + case SPI_NOR_EXT_REPEAT: + str = "repeat"; + break; + case SPI_NOR_EXT_INVERT: + str = "invert"; + break; + default: + str = "<unknown>"; + break; + } + seq_printf(s, " 8D extension\t%s\n", str); + + seq_puts(s, "\nprotocols\n"); + seq_printf(s, " read\t\t%s\n", + spi_nor_protocol_name(nor->read_proto)); + seq_printf(s, " write\t\t%s\n", + spi_nor_protocol_name(nor->write_proto)); + seq_printf(s, " register\t%s\n", + spi_nor_protocol_name(nor->reg_proto)); + + seq_puts(s, "\nerase commands\n"); + for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { + struct spi_nor_erase_type *et = &erase_map->erase_type[i]; + + if (et->size) { + string_get_size(et->size, 1, STRING_UNITS_2, buf, + sizeof(buf)); + seq_printf(s, " %02x (%s) [%d]\n", et->opcode, buf, i); + } + } + + if (!(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) { + string_get_size(params->size, 1, STRING_UNITS_2, buf, sizeof(buf)); + seq_printf(s, " %02x (%s)\n", SPINOR_OP_CHIP_ERASE, buf); + } + + seq_puts(s, "\nsector map\n"); + seq_puts(s, " region (in hex) | erase mask | flags\n"); + seq_puts(s, " ------------------+------------+----------\n"); + for (region = erase_map->regions; + region; + region = spi_nor_region_next(region)) { + u64 start = region->offset & ~SNOR_ERASE_FLAGS_MASK; + u64 flags = region->offset & SNOR_ERASE_FLAGS_MASK; + u64 end = start + region->size - 1; + + seq_printf(s, " %08llx-%08llx | [%c%c%c%c] | %s\n", + start, end, + flags & BIT(0) ? '0' : ' ', + flags & BIT(1) ? '1' : ' ', + flags & BIT(2) ? '2' : ' ', + flags & BIT(3) ? '3' : ' ', + flags & SNOR_OVERLAID_REGION ? "overlaid" : ""); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(spi_nor_params); + +static void spi_nor_print_read_cmd(struct seq_file *s, u32 cap, + struct spi_nor_read_command *cmd) +{ + seq_printf(s, " %s%s\n", spi_nor_protocol_name(cmd->proto), + cap == SNOR_HWCAPS_READ_FAST ? " (fast read)" : ""); + seq_printf(s, " opcode\t0x%02x\n", cmd->opcode); + seq_printf(s, " mode cycles\t%u\n", cmd->num_mode_clocks); + seq_printf(s, " dummy cycles\t%u\n", cmd->num_wait_states); +} + +static void spi_nor_print_pp_cmd(struct seq_file *s, + struct spi_nor_pp_command *cmd) +{ + seq_printf(s, " %s\n", spi_nor_protocol_name(cmd->proto)); + seq_printf(s, " opcode\t0x%02x\n", cmd->opcode); +} + +static int spi_nor_capabilities_show(struct seq_file *s, void *data) +{ + struct spi_nor *nor = s->private; + struct spi_nor_flash_parameter *params = nor->params; + u32 hwcaps = params->hwcaps.mask; + int i, cmd; + + seq_puts(s, "Supported read modes by the flash\n"); + for (i = 0; i < sizeof(hwcaps) * BITS_PER_BYTE; i++) { + if (!(hwcaps & BIT(i))) + continue; + + cmd = spi_nor_hwcaps_read2cmd(BIT(i)); + if (cmd < 0) + continue; + + spi_nor_print_read_cmd(s, BIT(i), ¶ms->reads[cmd]); + hwcaps &= ~BIT(i); + } + + seq_puts(s, "\nSupported page program modes by the flash\n"); + for (i = 0; i < sizeof(hwcaps) * BITS_PER_BYTE; i++) { + if (!(hwcaps & BIT(i))) + continue; + + cmd = spi_nor_hwcaps_pp2cmd(BIT(i)); + if (cmd < 0) + continue; + + spi_nor_print_pp_cmd(s, ¶ms->page_programs[cmd]); + hwcaps &= ~BIT(i); + } + + if (hwcaps) + seq_printf(s, "\nunknown hwcaps 0x%x\n", hwcaps); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(spi_nor_capabilities); + +static void spi_nor_debugfs_unregister(void *data) +{ + struct spi_nor *nor = data; + + debugfs_remove(nor->debugfs_root); + nor->debugfs_root = NULL; +} + +static struct dentry *rootdir; + +void spi_nor_debugfs_register(struct spi_nor *nor) +{ + struct dentry *d; + int ret; + + if (!rootdir) + rootdir = debugfs_create_dir(SPI_NOR_DEBUGFS_ROOT, NULL); + + ret = devm_add_action(nor->dev, spi_nor_debugfs_unregister, nor); + if (ret) + return; + + d = debugfs_create_dir(dev_name(nor->dev), rootdir); + nor->debugfs_root = d; + + debugfs_create_file("params", 0444, d, nor, &spi_nor_params_fops); + debugfs_create_file("capabilities", 0444, d, nor, + &spi_nor_capabilities_fops); +} + +void spi_nor_debugfs_shutdown(void) +{ + debugfs_remove(rootdir); +} diff --git a/drivers/mtd/spi-nor/eon.c b/drivers/mtd/spi-nor/eon.c new file mode 100644 index 0000000000..50a1105371 --- /dev/null +++ b/drivers/mtd/spi-nor/eon.c @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +static const struct flash_info eon_nor_parts[] = { + /* EON -- en25xxx */ + { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64) + NO_SFDP_FLAGS(SECT_4K) }, + { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64) }, + { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64) }, + { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128) }, + { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128) + NO_SFDP_FLAGS(SECT_4K) }, + { "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) }, + { "en25qh16", INFO(0x1c7015, 0, 64 * 1024, 32) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) }, + { "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64) }, + { "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) }, + { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256) }, + { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512) + PARSE_SFDP }, + { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128) + NO_SFDP_FLAGS(SECT_4K) }, +}; + +const struct spi_nor_manufacturer spi_nor_eon = { + .name = "eon", + .parts = eon_nor_parts, + .nparts = ARRAY_SIZE(eon_nor_parts), +}; diff --git a/drivers/mtd/spi-nor/esmt.c b/drivers/mtd/spi-nor/esmt.c new file mode 100644 index 0000000000..fcc3b0e7cd --- /dev/null +++ b/drivers/mtd/spi-nor/esmt.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +static const struct flash_info esmt_nor_parts[] = { + /* ESMT */ + { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) + NO_SFDP_FLAGS(SECT_4K) }, + { "f25l32qa-2s", INFO(0x8c4116, 0, 64 * 1024, 64) + FLAGS(SPI_NOR_HAS_LOCK) + NO_SFDP_FLAGS(SECT_4K) }, + { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128) + FLAGS(SPI_NOR_HAS_LOCK) + NO_SFDP_FLAGS(SECT_4K) }, +}; + +const struct spi_nor_manufacturer spi_nor_esmt = { + .name = "esmt", + .parts = esmt_nor_parts, + .nparts = ARRAY_SIZE(esmt_nor_parts), +}; diff --git a/drivers/mtd/spi-nor/everspin.c b/drivers/mtd/spi-nor/everspin.c new file mode 100644 index 0000000000..84a07c2e05 --- /dev/null +++ b/drivers/mtd/spi-nor/everspin.c @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +static const struct flash_info everspin_nor_parts[] = { + /* Everspin */ + { "mr25h128", CAT25_INFO(16 * 1024, 1, 256, 2) }, + { "mr25h256", CAT25_INFO(32 * 1024, 1, 256, 2) }, + { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3) }, + { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3) }, +}; + +const struct spi_nor_manufacturer spi_nor_everspin = { + .name = "everspin", + .parts = everspin_nor_parts, + .nparts = ARRAY_SIZE(everspin_nor_parts), +}; diff --git a/drivers/mtd/spi-nor/fujitsu.c b/drivers/mtd/spi-nor/fujitsu.c new file mode 100644 index 0000000000..69cffc5c73 --- /dev/null +++ b/drivers/mtd/spi-nor/fujitsu.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +static const struct flash_info fujitsu_nor_parts[] = { + /* Fujitsu */ + { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1) + FLAGS(SPI_NOR_NO_ERASE) }, +}; + +const struct spi_nor_manufacturer spi_nor_fujitsu = { + .name = "fujitsu", + .parts = fujitsu_nor_parts, + .nparts = ARRAY_SIZE(fujitsu_nor_parts), +}; diff --git a/drivers/mtd/spi-nor/gigadevice.c b/drivers/mtd/spi-nor/gigadevice.c new file mode 100644 index 0000000000..d57ddaf152 --- /dev/null +++ b/drivers/mtd/spi-nor/gigadevice.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +static int +gd25q256_post_bfpt(struct spi_nor *nor, + const struct sfdp_parameter_header *bfpt_header, + const struct sfdp_bfpt *bfpt) +{ + /* + * GD25Q256C supports the first version of JESD216 which does not define + * the Quad Enable methods. Overwrite the default Quad Enable method. + * + * GD25Q256 GENERATION | SFDP MAJOR VERSION | SFDP MINOR VERSION + * GD25Q256C | SFDP_JESD216_MAJOR | SFDP_JESD216_MINOR + * GD25Q256D | SFDP_JESD216_MAJOR | SFDP_JESD216B_MINOR + * GD25Q256E | SFDP_JESD216_MAJOR | SFDP_JESD216B_MINOR + */ + if (bfpt_header->major == SFDP_JESD216_MAJOR && + bfpt_header->minor == SFDP_JESD216_MINOR) + nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable; + + return 0; +} + +static const struct spi_nor_fixups gd25q256_fixups = { + .post_bfpt = gd25q256_post_bfpt, +}; + +static const struct flash_info gigadevice_nor_parts[] = { + { "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "gd25lq128d", INFO(0xc86018, 0, 64 * 1024, 256) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512) + PARSE_SFDP + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6) + FIXUP_FLAGS(SPI_NOR_4B_OPCODES) + .fixups = &gd25q256_fixups }, +}; + +const struct spi_nor_manufacturer spi_nor_gigadevice = { + .name = "gigadevice", + .parts = gigadevice_nor_parts, + .nparts = ARRAY_SIZE(gigadevice_nor_parts), +}; diff --git a/drivers/mtd/spi-nor/intel.c b/drivers/mtd/spi-nor/intel.c new file mode 100644 index 0000000000..9179f2d09c --- /dev/null +++ b/drivers/mtd/spi-nor/intel.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +static const struct flash_info intel_nor_parts[] = { + /* Intel/Numonyx -- xxxs33b */ + { "160s33b", INFO(0x898911, 0, 64 * 1024, 32) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) }, + { "320s33b", INFO(0x898912, 0, 64 * 1024, 64) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) }, + { "640s33b", INFO(0x898913, 0, 64 * 1024, 128) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) }, +}; + +const struct spi_nor_manufacturer spi_nor_intel = { + .name = "intel", + .parts = intel_nor_parts, + .nparts = ARRAY_SIZE(intel_nor_parts), +}; diff --git a/drivers/mtd/spi-nor/issi.c b/drivers/mtd/spi-nor/issi.c new file mode 100644 index 0000000000..accdf7aa2b --- /dev/null +++ b/drivers/mtd/spi-nor/issi.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +static int +is25lp256_post_bfpt_fixups(struct spi_nor *nor, + const struct sfdp_parameter_header *bfpt_header, + const struct sfdp_bfpt *bfpt) +{ + /* + * IS25LP256 supports 4B opcodes, but the BFPT advertises + * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY. + * Overwrite the number of address bytes advertised by the BFPT. + */ + if ((bfpt->dwords[SFDP_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) == + BFPT_DWORD1_ADDRESS_BYTES_3_ONLY) + nor->params->addr_nbytes = 4; + + return 0; +} + +static const struct spi_nor_fixups is25lp256_fixups = { + .post_bfpt = is25lp256_post_bfpt_fixups, +}; + +static int pm25lv_nor_late_init(struct spi_nor *nor) +{ + struct spi_nor_erase_map *map = &nor->params->erase_map; + int i; + + /* The PM25LV series has a different 4k sector erase opcode */ + for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) + if (map->erase_type[i].size == 4096) + map->erase_type[i].opcode = SPINOR_OP_BE_4K_PMC; + + return 0; +} + +static const struct spi_nor_fixups pm25lv_nor_fixups = { + .late_init = pm25lv_nor_late_init, +}; + +static const struct flash_info issi_nor_parts[] = { + /* ISSI */ + { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2) + NO_SFDP_FLAGS(SECT_4K) }, + { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "is25lp016d", INFO(0x9d6015, 0, 64 * 1024, 32) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "is25lp032", INFO(0x9d6016, 0, 64 * 1024, 64) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) }, + { "is25lp064", INFO(0x9d6017, 0, 64 * 1024, 128) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) }, + { "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) }, + { "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512) + PARSE_SFDP + FIXUP_FLAGS(SPI_NOR_4B_OPCODES) + .fixups = &is25lp256_fixups }, + { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "is25wp256", INFO(0x9d7019, 0, 0, 0) + PARSE_SFDP + FIXUP_FLAGS(SPI_NOR_4B_OPCODES) + FLAGS(SPI_NOR_QUAD_PP) + .fixups = &is25lp256_fixups }, + + /* PMC */ + { "pm25lv512", INFO(0, 0, 32 * 1024, 2) + NO_SFDP_FLAGS(SECT_4K) + .fixups = &pm25lv_nor_fixups + }, + { "pm25lv010", INFO(0, 0, 32 * 1024, 4) + NO_SFDP_FLAGS(SECT_4K) + .fixups = &pm25lv_nor_fixups + }, + { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64) + NO_SFDP_FLAGS(SECT_4K) }, +}; + +static void issi_nor_default_init(struct spi_nor *nor) +{ + nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable; +} + +static const struct spi_nor_fixups issi_fixups = { + .default_init = issi_nor_default_init, +}; + +const struct spi_nor_manufacturer spi_nor_issi = { + .name = "issi", + .parts = issi_nor_parts, + .nparts = ARRAY_SIZE(issi_nor_parts), + .fixups = &issi_fixups, +}; diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c new file mode 100644 index 0000000000..eb149e517c --- /dev/null +++ b/drivers/mtd/spi-nor/macronix.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +static int +mx25l25635_post_bfpt_fixups(struct spi_nor *nor, + const struct sfdp_parameter_header *bfpt_header, + const struct sfdp_bfpt *bfpt) +{ + /* + * MX25L25635F supports 4B opcodes but MX25L25635E does not. + * Unfortunately, Macronix has re-used the same JEDEC ID for both + * variants which prevents us from defining a new entry in the parts + * table. + * We need a way to differentiate MX25L25635E and MX25L25635F, and it + * seems that the F version advertises support for Fast Read 4-4-4 in + * its BFPT table. + */ + if (bfpt->dwords[SFDP_DWORD(5)] & BFPT_DWORD5_FAST_READ_4_4_4) + nor->flags |= SNOR_F_4B_OPCODES; + + return 0; +} + +static const struct spi_nor_fixups mx25l25635_fixups = { + .post_bfpt = mx25l25635_post_bfpt_fixups, +}; + +static const struct flash_info macronix_nor_parts[] = { + /* Macronix */ + { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1) + NO_SFDP_FLAGS(SECT_4K) }, + { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4) + NO_SFDP_FLAGS(SECT_4K) }, + { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8) + NO_SFDP_FLAGS(SECT_4K) }, + { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16) }, + { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32) + NO_SFDP_FLAGS(SECT_4K) }, + { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64) + NO_SFDP_FLAGS(SECT_4K) }, + { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64) + NO_SFDP_FLAGS(SECT_4K) }, + { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128) + NO_SFDP_FLAGS(SECT_4K) }, + { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4) + NO_SFDP_FLAGS(SECT_4K) }, + { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8) + NO_SFDP_FLAGS(SECT_4K) }, + { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16) + NO_SFDP_FLAGS(SECT_4K) }, + { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128) + NO_SFDP_FLAGS(SECT_4K) }, + { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP) + NO_SFDP_FLAGS(SECT_4K) }, + { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256) }, + { "mx25r1635f", INFO(0xc22815, 0, 64 * 1024, 32) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "mx25r3235f", INFO(0xc22816, 0, 64 * 1024, 64) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512) + NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + .fixups = &mx25l25635_fixups }, + { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512) + NO_SFDP_FLAGS(SECT_4K) + FIXUP_FLAGS(SPI_NOR_4B_OPCODES) }, + { "mx25u51245g", INFO(0xc2253a, 0, 64 * 1024, 1024) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + FIXUP_FLAGS(SPI_NOR_4B_OPCODES) }, + { "mx25uw51245g", INFOB(0xc2813a, 0, 0, 0, 4) + PARSE_SFDP + FLAGS(SPI_NOR_RWW) }, + { "mx25v8035f", INFO(0xc22314, 0, 64 * 1024, 16) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512) }, + { "mx66l51235f", INFO(0xc2201a, 0, 64 * 1024, 1024) + NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + FIXUP_FLAGS(SPI_NOR_4B_OPCODES) }, + { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + FIXUP_FLAGS(SPI_NOR_4B_OPCODES) }, + { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048) + NO_SFDP_FLAGS(SPI_NOR_QUAD_READ) }, + { "mx66u2g45g", INFO(0xc2253c, 0, 64 * 1024, 4096) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + FIXUP_FLAGS(SPI_NOR_4B_OPCODES) }, +}; + +static void macronix_nor_default_init(struct spi_nor *nor) +{ + nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable; +} + +static int macronix_nor_late_init(struct spi_nor *nor) +{ + if (!nor->params->set_4byte_addr_mode) + nor->params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_en4b_ex4b; + + return 0; +} + +static const struct spi_nor_fixups macronix_nor_fixups = { + .default_init = macronix_nor_default_init, + .late_init = macronix_nor_late_init, +}; + +const struct spi_nor_manufacturer spi_nor_macronix = { + .name = "macronix", + .parts = macronix_nor_parts, + .nparts = ARRAY_SIZE(macronix_nor_parts), + .fixups = ¯onix_nor_fixups, +}; diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c new file mode 100644 index 0000000000..6ad080c52a --- /dev/null +++ b/drivers/mtd/spi-nor/micron-st.c @@ -0,0 +1,462 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +/* flash_info mfr_flag. Used to read proprietary FSR register. */ +#define USE_FSR BIT(0) + +#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */ +#define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */ +#define SPINOR_OP_MT_DTR_RD 0xfd /* Fast Read opcode in DTR mode */ +#define SPINOR_OP_MT_RD_ANY_REG 0x85 /* Read volatile register */ +#define SPINOR_OP_MT_WR_ANY_REG 0x81 /* Write volatile register */ +#define SPINOR_REG_MT_CFR0V 0x00 /* For setting octal DTR mode */ +#define SPINOR_REG_MT_CFR1V 0x01 /* For setting dummy cycles */ +#define SPINOR_REG_MT_CFR1V_DEF 0x1f /* Default dummy cycles */ +#define SPINOR_MT_OCT_DTR 0xe7 /* Enable Octal DTR. */ +#define SPINOR_MT_EXSPI 0xff /* Enable Extended SPI (default) */ + +/* Flag Status Register bits */ +#define FSR_READY BIT(7) /* Device status, 0 = Busy, 1 = Ready */ +#define FSR_E_ERR BIT(5) /* Erase operation status */ +#define FSR_P_ERR BIT(4) /* Program operation status */ +#define FSR_PT_ERR BIT(1) /* Protection error bit */ + +/* Micron ST SPI NOR flash operations. */ +#define MICRON_ST_NOR_WR_ANY_REG_OP(naddr, addr, ndata, buf) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_MT_WR_ANY_REG, 0), \ + SPI_MEM_OP_ADDR(naddr, addr, 0), \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_DATA_OUT(ndata, buf, 0)) + +#define MICRON_ST_RDFSR_OP(buf) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 0), \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_DATA_IN(1, buf, 0)) + +#define MICRON_ST_CLFSR_OP \ + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 0), \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_NO_DATA) + +static int micron_st_nor_octal_dtr_en(struct spi_nor *nor) +{ + struct spi_mem_op op; + u8 *buf = nor->bouncebuf; + int ret; + u8 addr_mode_nbytes = nor->params->addr_mode_nbytes; + + /* Use 20 dummy cycles for memory array reads. */ + *buf = 20; + op = (struct spi_mem_op) + MICRON_ST_NOR_WR_ANY_REG_OP(addr_mode_nbytes, + SPINOR_REG_MT_CFR1V, 1, buf); + ret = spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto); + if (ret) + return ret; + + buf[0] = SPINOR_MT_OCT_DTR; + op = (struct spi_mem_op) + MICRON_ST_NOR_WR_ANY_REG_OP(addr_mode_nbytes, + SPINOR_REG_MT_CFR0V, 1, buf); + ret = spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto); + if (ret) + return ret; + + /* Read flash ID to make sure the switch was successful. */ + ret = spi_nor_read_id(nor, 0, 8, buf, SNOR_PROTO_8_8_8_DTR); + if (ret) { + dev_dbg(nor->dev, "error %d reading JEDEC ID after enabling 8D-8D-8D mode\n", ret); + return ret; + } + + if (memcmp(buf, nor->info->id, nor->info->id_len)) + return -EINVAL; + + return 0; +} + +static int micron_st_nor_octal_dtr_dis(struct spi_nor *nor) +{ + struct spi_mem_op op; + u8 *buf = nor->bouncebuf; + int ret; + + /* + * The register is 1-byte wide, but 1-byte transactions are not allowed + * in 8D-8D-8D mode. The next register is the dummy cycle configuration + * register. Since the transaction needs to be at least 2 bytes wide, + * set the next register to its default value. This also makes sense + * because the value was changed when enabling 8D-8D-8D mode, it should + * be reset when disabling. + */ + buf[0] = SPINOR_MT_EXSPI; + buf[1] = SPINOR_REG_MT_CFR1V_DEF; + op = (struct spi_mem_op) + MICRON_ST_NOR_WR_ANY_REG_OP(nor->addr_nbytes, + SPINOR_REG_MT_CFR0V, 2, buf); + ret = spi_nor_write_any_volatile_reg(nor, &op, SNOR_PROTO_8_8_8_DTR); + if (ret) + return ret; + + /* Read flash ID to make sure the switch was successful. */ + ret = spi_nor_read_id(nor, 0, 0, buf, SNOR_PROTO_1_1_1); + if (ret) { + dev_dbg(nor->dev, "error %d reading JEDEC ID after disabling 8D-8D-8D mode\n", ret); + return ret; + } + + if (memcmp(buf, nor->info->id, nor->info->id_len)) + return -EINVAL; + + return 0; +} + +static int micron_st_nor_set_octal_dtr(struct spi_nor *nor, bool enable) +{ + return enable ? micron_st_nor_octal_dtr_en(nor) : + micron_st_nor_octal_dtr_dis(nor); +} + +static void mt35xu512aba_default_init(struct spi_nor *nor) +{ + nor->params->set_octal_dtr = micron_st_nor_set_octal_dtr; +} + +static int mt35xu512aba_post_sfdp_fixup(struct spi_nor *nor) +{ + /* Set the Fast Read settings. */ + nor->params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR; + spi_nor_set_read_settings(&nor->params->reads[SNOR_CMD_READ_8_8_8_DTR], + 0, 20, SPINOR_OP_MT_DTR_RD, + SNOR_PROTO_8_8_8_DTR); + + nor->cmd_ext_type = SPI_NOR_EXT_REPEAT; + nor->params->rdsr_dummy = 8; + nor->params->rdsr_addr_nbytes = 0; + + /* + * The BFPT quad enable field is set to a reserved value so the quad + * enable function is ignored by spi_nor_parse_bfpt(). Make sure we + * disable it. + */ + nor->params->quad_enable = NULL; + + return 0; +} + +static const struct spi_nor_fixups mt35xu512aba_fixups = { + .default_init = mt35xu512aba_default_init, + .post_sfdp = mt35xu512aba_post_sfdp_fixup, +}; + +static const struct flash_info micron_nor_parts[] = { + { "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_OCTAL_READ | + SPI_NOR_OCTAL_DTR_READ | SPI_NOR_OCTAL_DTR_PP) + FIXUP_FLAGS(SPI_NOR_4B_OPCODES | SPI_NOR_IO_MODE_EN_VOLATILE) + MFR_FLAGS(USE_FSR) + .fixups = &mt35xu512aba_fixups + }, + { "mt35xu02g", INFO(0x2c5b1c, 0, 128 * 1024, 2048) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_OCTAL_READ) + FIXUP_FLAGS(SPI_NOR_4B_OPCODES) + MFR_FLAGS(USE_FSR) + }, +}; + +static const struct flash_info st_nor_parts[] = { + { "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) }, + { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64) + NO_SFDP_FLAGS(SPI_NOR_QUAD_READ) }, + { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64) + NO_SFDP_FLAGS(SPI_NOR_QUAD_READ) }, + { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) }, + { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) }, + { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP | + SPI_NOR_BP3_SR_BIT6) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) + MFR_FLAGS(USE_FSR) + }, + { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP | + SPI_NOR_BP3_SR_BIT6) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) + MFR_FLAGS(USE_FSR) + }, + { "mt25ql256a", INFO6(0x20ba19, 0x104400, 64 * 1024, 512) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + FIXUP_FLAGS(SPI_NOR_4B_OPCODES) + MFR_FLAGS(USE_FSR) + }, + { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) + MFR_FLAGS(USE_FSR) + }, + { "mt25qu256a", INFO6(0x20bb19, 0x104400, 64 * 1024, 512) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP | + SPI_NOR_BP3_SR_BIT6) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + FIXUP_FLAGS(SPI_NOR_4B_OPCODES) + MFR_FLAGS(USE_FSR) + }, + { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) + MFR_FLAGS(USE_FSR) + }, + { "mt25ql512a", INFO6(0x20ba20, 0x104400, 64 * 1024, 1024) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + FIXUP_FLAGS(SPI_NOR_4B_OPCODES) + MFR_FLAGS(USE_FSR) + }, + { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP | + SPI_NOR_BP3_SR_BIT6) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) + MFR_FLAGS(USE_FSR) + }, + { "mt25qu512a", INFO6(0x20bb20, 0x104400, 64 * 1024, 1024) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + FIXUP_FLAGS(SPI_NOR_4B_OPCODES) + MFR_FLAGS(USE_FSR) + }, + { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP | + SPI_NOR_BP3_SR_BIT6) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) + MFR_FLAGS(USE_FSR) + }, + { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP | + SPI_NOR_BP3_SR_BIT6 | NO_CHIP_ERASE) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) + MFR_FLAGS(USE_FSR) + }, + { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048) + FLAGS(NO_CHIP_ERASE) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) + MFR_FLAGS(USE_FSR) + }, + { "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096) + FLAGS(NO_CHIP_ERASE) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) + MFR_FLAGS(USE_FSR) + }, + { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096) + FLAGS(NO_CHIP_ERASE) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) + MFR_FLAGS(USE_FSR) + }, + + { "m25p05", INFO(0x202010, 0, 32 * 1024, 2) }, + { "m25p10", INFO(0x202011, 0, 32 * 1024, 4) }, + { "m25p20", INFO(0x202012, 0, 64 * 1024, 4) }, + { "m25p40", INFO(0x202013, 0, 64 * 1024, 8) }, + { "m25p80", INFO(0x202014, 0, 64 * 1024, 16) }, + { "m25p16", INFO(0x202015, 0, 64 * 1024, 32) }, + { "m25p32", INFO(0x202016, 0, 64 * 1024, 64) }, + { "m25p64", INFO(0x202017, 0, 64 * 1024, 128) }, + { "m25p128", INFO(0x202018, 0, 256 * 1024, 64) }, + + { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2) }, + { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4) }, + { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4) }, + { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8) }, + { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16) }, + { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32) }, + { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64) }, + { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128) }, + { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64) }, + + { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2) }, + { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16) }, + { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32) }, + + { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4) }, + { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16) }, + { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32) + NO_SFDP_FLAGS(SECT_4K) }, + + { "m25px16", INFO(0x207115, 0, 64 * 1024, 32) + NO_SFDP_FLAGS(SECT_4K) }, + { "m25px32", INFO(0x207116, 0, 64 * 1024, 64) + NO_SFDP_FLAGS(SECT_4K) }, + { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64) + NO_SFDP_FLAGS(SECT_4K) }, + { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64) + NO_SFDP_FLAGS(SECT_4K) }, + { "m25px64", INFO(0x207117, 0, 64 * 1024, 128) }, + { "m25px80", INFO(0x207114, 0, 64 * 1024, 16) }, +}; + +/** + * micron_st_nor_read_fsr() - Read the Flag Status Register. + * @nor: pointer to 'struct spi_nor' + * @fsr: pointer to a DMA-able buffer where the value of the + * Flag Status Register will be written. Should be at least 2 + * bytes. + * + * Return: 0 on success, -errno otherwise. + */ +static int micron_st_nor_read_fsr(struct spi_nor *nor, u8 *fsr) +{ + int ret; + + if (nor->spimem) { + struct spi_mem_op op = MICRON_ST_RDFSR_OP(fsr); + + if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) { + op.addr.nbytes = nor->params->rdsr_addr_nbytes; + op.dummy.nbytes = nor->params->rdsr_dummy; + /* + * We don't want to read only one byte in DTR mode. So, + * read 2 and then discard the second byte. + */ + op.data.nbytes = 2; + } + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDFSR, fsr, + 1); + } + + if (ret) + dev_dbg(nor->dev, "error %d reading FSR\n", ret); + + return ret; +} + +/** + * micron_st_nor_clear_fsr() - Clear the Flag Status Register. + * @nor: pointer to 'struct spi_nor'. + */ +static void micron_st_nor_clear_fsr(struct spi_nor *nor) +{ + int ret; + + if (nor->spimem) { + struct spi_mem_op op = MICRON_ST_CLFSR_OP; + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLFSR, + NULL, 0); + } + + if (ret) + dev_dbg(nor->dev, "error %d clearing FSR\n", ret); +} + +/** + * micron_st_nor_ready() - Query the Status Register as well as the Flag Status + * Register to see if the flash is ready for new commands. If there are any + * errors in the FSR clear them. + * @nor: pointer to 'struct spi_nor'. + * + * Return: 1 if ready, 0 if not ready, -errno on errors. + */ +static int micron_st_nor_ready(struct spi_nor *nor) +{ + int sr_ready, ret; + + sr_ready = spi_nor_sr_ready(nor); + if (sr_ready < 0) + return sr_ready; + + ret = micron_st_nor_read_fsr(nor, nor->bouncebuf); + if (ret) { + /* + * Some controllers, such as Intel SPI, do not support low + * level operations such as reading the flag status + * register. They only expose small amount of high level + * operations to the software. If this is the case we use + * only the status register value. + */ + return ret == -EOPNOTSUPP ? sr_ready : ret; + } + + if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) { + if (nor->bouncebuf[0] & FSR_E_ERR) + dev_err(nor->dev, "Erase operation failed.\n"); + else + dev_err(nor->dev, "Program operation failed.\n"); + + if (nor->bouncebuf[0] & FSR_PT_ERR) + dev_err(nor->dev, + "Attempted to modify a protected sector.\n"); + + micron_st_nor_clear_fsr(nor); + + /* + * WEL bit remains set to one when an erase or page program + * error occurs. Issue a Write Disable command to protect + * against inadvertent writes that can possibly corrupt the + * contents of the memory. + */ + ret = spi_nor_write_disable(nor); + if (ret) + return ret; + + return -EIO; + } + + return sr_ready && !!(nor->bouncebuf[0] & FSR_READY); +} + +static void micron_st_nor_default_init(struct spi_nor *nor) +{ + nor->flags |= SNOR_F_HAS_LOCK; + nor->flags &= ~SNOR_F_HAS_16BIT_SR; + nor->params->quad_enable = NULL; +} + +static int micron_st_nor_late_init(struct spi_nor *nor) +{ + struct spi_nor_flash_parameter *params = nor->params; + + if (nor->info->mfr_flags & USE_FSR) + params->ready = micron_st_nor_ready; + + if (!params->set_4byte_addr_mode) + params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_wren_en4b_ex4b; + + return 0; +} + +static const struct spi_nor_fixups micron_st_nor_fixups = { + .default_init = micron_st_nor_default_init, + .late_init = micron_st_nor_late_init, +}; + +const struct spi_nor_manufacturer spi_nor_micron = { + .name = "micron", + .parts = micron_nor_parts, + .nparts = ARRAY_SIZE(micron_nor_parts), + .fixups = µn_st_nor_fixups, +}; + +const struct spi_nor_manufacturer spi_nor_st = { + .name = "st", + .parts = st_nor_parts, + .nparts = ARRAY_SIZE(st_nor_parts), + .fixups = µn_st_nor_fixups, +}; diff --git a/drivers/mtd/spi-nor/otp.c b/drivers/mtd/spi-nor/otp.c new file mode 100644 index 0000000000..9a729aa345 --- /dev/null +++ b/drivers/mtd/spi-nor/otp.c @@ -0,0 +1,507 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * OTP support for SPI NOR flashes + * + * Copyright (C) 2021 Michael Walle <michael@walle.cc> + */ + +#include <linux/log2.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +#define spi_nor_otp_region_len(nor) ((nor)->params->otp.org->len) +#define spi_nor_otp_n_regions(nor) ((nor)->params->otp.org->n_regions) + +/** + * spi_nor_otp_read_secr() - read security register + * @nor: pointer to 'struct spi_nor' + * @addr: offset to read from + * @len: number of bytes to read + * @buf: pointer to dst buffer + * + * Read a security register by using the SPINOR_OP_RSECR commands. + * + * In Winbond/GigaDevice datasheets the term "security register" stands for + * an one-time-programmable memory area, consisting of multiple bytes (usually + * 256). Thus one "security register" maps to one OTP region. + * + * This method is used on GigaDevice and Winbond flashes. + * + * Please note, the read must not span multiple registers. + * + * Return: number of bytes read successfully, -errno otherwise + */ +int spi_nor_otp_read_secr(struct spi_nor *nor, loff_t addr, size_t len, u8 *buf) +{ + u8 addr_nbytes, read_opcode, read_dummy; + struct spi_mem_dirmap_desc *rdesc; + enum spi_nor_protocol read_proto; + int ret; + + read_opcode = nor->read_opcode; + addr_nbytes = nor->addr_nbytes; + read_dummy = nor->read_dummy; + read_proto = nor->read_proto; + rdesc = nor->dirmap.rdesc; + + nor->read_opcode = SPINOR_OP_RSECR; + nor->read_dummy = 8; + nor->read_proto = SNOR_PROTO_1_1_1; + nor->dirmap.rdesc = NULL; + + ret = spi_nor_read_data(nor, addr, len, buf); + + nor->read_opcode = read_opcode; + nor->addr_nbytes = addr_nbytes; + nor->read_dummy = read_dummy; + nor->read_proto = read_proto; + nor->dirmap.rdesc = rdesc; + + return ret; +} + +/** + * spi_nor_otp_write_secr() - write security register + * @nor: pointer to 'struct spi_nor' + * @addr: offset to write to + * @len: number of bytes to write + * @buf: pointer to src buffer + * + * Write a security register by using the SPINOR_OP_PSECR commands. + * + * For more information on the term "security register", see the documentation + * of spi_nor_otp_read_secr(). + * + * This method is used on GigaDevice and Winbond flashes. + * + * Please note, the write must not span multiple registers. + * + * Return: number of bytes written successfully, -errno otherwise + */ +int spi_nor_otp_write_secr(struct spi_nor *nor, loff_t addr, size_t len, + const u8 *buf) +{ + enum spi_nor_protocol write_proto; + struct spi_mem_dirmap_desc *wdesc; + u8 addr_nbytes, program_opcode; + int ret, written; + + program_opcode = nor->program_opcode; + addr_nbytes = nor->addr_nbytes; + write_proto = nor->write_proto; + wdesc = nor->dirmap.wdesc; + + nor->program_opcode = SPINOR_OP_PSECR; + nor->write_proto = SNOR_PROTO_1_1_1; + nor->dirmap.wdesc = NULL; + + /* + * We only support a write to one single page. For now all winbond + * flashes only have one page per security register. + */ + ret = spi_nor_write_enable(nor); + if (ret) + goto out; + + written = spi_nor_write_data(nor, addr, len, buf); + if (written < 0) + goto out; + + ret = spi_nor_wait_till_ready(nor); + +out: + nor->program_opcode = program_opcode; + nor->addr_nbytes = addr_nbytes; + nor->write_proto = write_proto; + nor->dirmap.wdesc = wdesc; + + return ret ?: written; +} + +/** + * spi_nor_otp_erase_secr() - erase a security register + * @nor: pointer to 'struct spi_nor' + * @addr: offset of the security register to be erased + * + * Erase a security register by using the SPINOR_OP_ESECR command. + * + * For more information on the term "security register", see the documentation + * of spi_nor_otp_read_secr(). + * + * This method is used on GigaDevice and Winbond flashes. + * + * Return: 0 on success, -errno otherwise + */ +int spi_nor_otp_erase_secr(struct spi_nor *nor, loff_t addr) +{ + u8 erase_opcode = nor->erase_opcode; + int ret; + + ret = spi_nor_write_enable(nor); + if (ret) + return ret; + + nor->erase_opcode = SPINOR_OP_ESECR; + ret = spi_nor_erase_sector(nor, addr); + nor->erase_opcode = erase_opcode; + if (ret) + return ret; + + return spi_nor_wait_till_ready(nor); +} + +static int spi_nor_otp_lock_bit_cr(unsigned int region) +{ + static const int lock_bits[] = { SR2_LB1, SR2_LB2, SR2_LB3 }; + + if (region >= ARRAY_SIZE(lock_bits)) + return -EINVAL; + + return lock_bits[region]; +} + +/** + * spi_nor_otp_lock_sr2() - lock the OTP region + * @nor: pointer to 'struct spi_nor' + * @region: OTP region + * + * Lock the OTP region by writing the status register-2. This method is used on + * GigaDevice and Winbond flashes. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_otp_lock_sr2(struct spi_nor *nor, unsigned int region) +{ + u8 *cr = nor->bouncebuf; + int ret, lock_bit; + + lock_bit = spi_nor_otp_lock_bit_cr(region); + if (lock_bit < 0) + return lock_bit; + + ret = spi_nor_read_cr(nor, cr); + if (ret) + return ret; + + /* no need to write the register if region is already locked */ + if (cr[0] & lock_bit) + return 0; + + cr[0] |= lock_bit; + + return spi_nor_write_16bit_cr_and_check(nor, cr[0]); +} + +/** + * spi_nor_otp_is_locked_sr2() - get the OTP region lock status + * @nor: pointer to 'struct spi_nor' + * @region: OTP region + * + * Retrieve the OTP region lock bit by reading the status register-2. This + * method is used on GigaDevice and Winbond flashes. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_otp_is_locked_sr2(struct spi_nor *nor, unsigned int region) +{ + u8 *cr = nor->bouncebuf; + int ret, lock_bit; + + lock_bit = spi_nor_otp_lock_bit_cr(region); + if (lock_bit < 0) + return lock_bit; + + ret = spi_nor_read_cr(nor, cr); + if (ret) + return ret; + + return cr[0] & lock_bit; +} + +static loff_t spi_nor_otp_region_start(const struct spi_nor *nor, unsigned int region) +{ + const struct spi_nor_otp_organization *org = nor->params->otp.org; + + return org->base + region * org->offset; +} + +static size_t spi_nor_otp_size(struct spi_nor *nor) +{ + return spi_nor_otp_n_regions(nor) * spi_nor_otp_region_len(nor); +} + +/* Translate the file offsets from and to OTP regions. */ +static loff_t spi_nor_otp_region_to_offset(struct spi_nor *nor, unsigned int region) +{ + return region * spi_nor_otp_region_len(nor); +} + +static unsigned int spi_nor_otp_offset_to_region(struct spi_nor *nor, loff_t ofs) +{ + return div64_u64(ofs, spi_nor_otp_region_len(nor)); +} + +static int spi_nor_mtd_otp_info(struct mtd_info *mtd, size_t len, + size_t *retlen, struct otp_info *buf) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + const struct spi_nor_otp_ops *ops = nor->params->otp.ops; + unsigned int n_regions = spi_nor_otp_n_regions(nor); + unsigned int i; + int ret, locked; + + if (len < n_regions * sizeof(*buf)) + return -ENOSPC; + + ret = spi_nor_prep_and_lock(nor); + if (ret) + return ret; + + for (i = 0; i < n_regions; i++) { + buf->start = spi_nor_otp_region_to_offset(nor, i); + buf->length = spi_nor_otp_region_len(nor); + + locked = ops->is_locked(nor, i); + if (locked < 0) { + ret = locked; + goto out; + } + + buf->locked = !!locked; + buf++; + } + + *retlen = n_regions * sizeof(*buf); + +out: + spi_nor_unlock_and_unprep(nor); + + return ret; +} + +static int spi_nor_mtd_otp_range_is_locked(struct spi_nor *nor, loff_t ofs, + size_t len) +{ + const struct spi_nor_otp_ops *ops = nor->params->otp.ops; + unsigned int region; + int locked; + + /* + * If any of the affected OTP regions are locked the entire range is + * considered locked. + */ + for (region = spi_nor_otp_offset_to_region(nor, ofs); + region <= spi_nor_otp_offset_to_region(nor, ofs + len - 1); + region++) { + locked = ops->is_locked(nor, region); + /* take the branch it is locked or in case of an error */ + if (locked) + return locked; + } + + return 0; +} + +static int spi_nor_mtd_otp_read_write(struct mtd_info *mtd, loff_t ofs, + size_t total_len, size_t *retlen, + const u8 *buf, bool is_write) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + const struct spi_nor_otp_ops *ops = nor->params->otp.ops; + const size_t rlen = spi_nor_otp_region_len(nor); + loff_t rstart, rofs; + unsigned int region; + size_t len; + int ret; + + if (ofs < 0 || ofs >= spi_nor_otp_size(nor)) + return 0; + + /* don't access beyond the end */ + total_len = min_t(size_t, total_len, spi_nor_otp_size(nor) - ofs); + + if (!total_len) + return 0; + + ret = spi_nor_prep_and_lock(nor); + if (ret) + return ret; + + if (is_write) { + ret = spi_nor_mtd_otp_range_is_locked(nor, ofs, total_len); + if (ret < 0) { + goto out; + } else if (ret) { + ret = -EROFS; + goto out; + } + } + + while (total_len) { + /* + * The OTP regions are mapped into a contiguous area starting + * at 0 as expected by the MTD layer. This will map the MTD + * file offsets to the address of an OTP region as used in the + * actual SPI commands. + */ + region = spi_nor_otp_offset_to_region(nor, ofs); + rstart = spi_nor_otp_region_start(nor, region); + + /* + * The size of a OTP region is expected to be a power of two, + * thus we can just mask the lower bits and get the offset into + * a region. + */ + rofs = ofs & (rlen - 1); + + /* don't access beyond one OTP region */ + len = min_t(size_t, total_len, rlen - rofs); + + if (is_write) + ret = ops->write(nor, rstart + rofs, len, buf); + else + ret = ops->read(nor, rstart + rofs, len, (u8 *)buf); + if (ret == 0) + ret = -EIO; + if (ret < 0) + goto out; + + *retlen += ret; + ofs += ret; + buf += ret; + total_len -= ret; + } + ret = 0; + +out: + spi_nor_unlock_and_unprep(nor); + return ret; +} + +static int spi_nor_mtd_otp_read(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u8 *buf) +{ + return spi_nor_mtd_otp_read_write(mtd, from, len, retlen, buf, false); +} + +static int spi_nor_mtd_otp_write(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u8 *buf) +{ + return spi_nor_mtd_otp_read_write(mtd, to, len, retlen, buf, true); +} + +static int spi_nor_mtd_otp_erase(struct mtd_info *mtd, loff_t from, size_t len) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + const struct spi_nor_otp_ops *ops = nor->params->otp.ops; + const size_t rlen = spi_nor_otp_region_len(nor); + unsigned int region; + loff_t rstart; + int ret; + + /* OTP erase is optional */ + if (!ops->erase) + return -EOPNOTSUPP; + + if (!len) + return 0; + + if (from < 0 || (from + len) > spi_nor_otp_size(nor)) + return -EINVAL; + + /* the user has to explicitly ask for whole regions */ + if (!IS_ALIGNED(len, rlen) || !IS_ALIGNED(from, rlen)) + return -EINVAL; + + ret = spi_nor_prep_and_lock(nor); + if (ret) + return ret; + + ret = spi_nor_mtd_otp_range_is_locked(nor, from, len); + if (ret < 0) { + goto out; + } else if (ret) { + ret = -EROFS; + goto out; + } + + while (len) { + region = spi_nor_otp_offset_to_region(nor, from); + rstart = spi_nor_otp_region_start(nor, region); + + ret = ops->erase(nor, rstart); + if (ret) + goto out; + + len -= rlen; + from += rlen; + } + +out: + spi_nor_unlock_and_unprep(nor); + + return ret; +} + +static int spi_nor_mtd_otp_lock(struct mtd_info *mtd, loff_t from, size_t len) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + const struct spi_nor_otp_ops *ops = nor->params->otp.ops; + const size_t rlen = spi_nor_otp_region_len(nor); + unsigned int region; + int ret; + + if (from < 0 || (from + len) > spi_nor_otp_size(nor)) + return -EINVAL; + + /* the user has to explicitly ask for whole regions */ + if (!IS_ALIGNED(len, rlen) || !IS_ALIGNED(from, rlen)) + return -EINVAL; + + ret = spi_nor_prep_and_lock(nor); + if (ret) + return ret; + + while (len) { + region = spi_nor_otp_offset_to_region(nor, from); + ret = ops->lock(nor, region); + if (ret) + goto out; + + len -= rlen; + from += rlen; + } + +out: + spi_nor_unlock_and_unprep(nor); + + return ret; +} + +void spi_nor_set_mtd_otp_ops(struct spi_nor *nor) +{ + struct mtd_info *mtd = &nor->mtd; + + if (!nor->params->otp.ops) + return; + + if (WARN_ON(!is_power_of_2(spi_nor_otp_region_len(nor)))) + return; + + /* + * We only support user_prot callbacks (yet). + * + * Some SPI NOR flashes like Macronix ones can be ordered in two + * different variants. One with a factory locked OTP area and one where + * it is left to the user to write to it. The factory locked OTP is + * usually preprogrammed with an "electrical serial number". We don't + * support these for now. + */ + mtd->_get_user_prot_info = spi_nor_mtd_otp_info; + mtd->_read_user_prot_reg = spi_nor_mtd_otp_read; + mtd->_write_user_prot_reg = spi_nor_mtd_otp_write; + mtd->_lock_user_prot_reg = spi_nor_mtd_otp_lock; + mtd->_erase_user_prot_reg = spi_nor_mtd_otp_erase; +} diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c new file mode 100644 index 0000000000..b3b11dfed7 --- /dev/null +++ b/drivers/mtd/spi-nor/sfdp.c @@ -0,0 +1,1570 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/bitfield.h> +#include <linux/mtd/spi-nor.h> +#include <linux/slab.h> +#include <linux/sort.h> + +#include "core.h" + +#define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb) +#define SFDP_PARAM_HEADER_PTP(p) \ + (((p)->parameter_table_pointer[2] << 16) | \ + ((p)->parameter_table_pointer[1] << 8) | \ + ((p)->parameter_table_pointer[0] << 0)) +#define SFDP_PARAM_HEADER_PARAM_LEN(p) ((p)->length * 4) + +#define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */ +#define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */ +#define SFDP_4BAIT_ID 0xff84 /* 4-byte Address Instruction Table */ +#define SFDP_PROFILE1_ID 0xff05 /* xSPI Profile 1.0 table. */ +#define SFDP_SCCR_MAP_ID 0xff87 /* + * Status, Control and Configuration + * Register Map. + */ +#define SFDP_SCCR_MAP_MC_ID 0xff88 /* + * Status, Control and Configuration + * Register Map Offsets for Multi-Chip + * SPI Memory Devices. + */ + +#define SFDP_SIGNATURE 0x50444653U + +struct sfdp_header { + u32 signature; /* Ox50444653U <=> "SFDP" */ + u8 minor; + u8 major; + u8 nph; /* 0-base number of parameter headers */ + u8 unused; + + /* Basic Flash Parameter Table. */ + struct sfdp_parameter_header bfpt_header; +}; + +/* Fast Read settings. */ +struct sfdp_bfpt_read { + /* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */ + u32 hwcaps; + + /* + * The <supported_bit> bit in <supported_dword> BFPT DWORD tells us + * whether the Fast Read x-y-z command is supported. + */ + u32 supported_dword; + u32 supported_bit; + + /* + * The half-word at offset <setting_shift> in <setting_dword> BFPT DWORD + * encodes the op code, the number of mode clocks and the number of wait + * states to be used by Fast Read x-y-z command. + */ + u32 settings_dword; + u32 settings_shift; + + /* The SPI protocol for this Fast Read x-y-z command. */ + enum spi_nor_protocol proto; +}; + +struct sfdp_bfpt_erase { + /* + * The half-word at offset <shift> in DWORD <dword> encodes the + * op code and erase sector size to be used by Sector Erase commands. + */ + u32 dword; + u32 shift; +}; + +#define SMPT_CMD_ADDRESS_LEN_MASK GENMASK(23, 22) +#define SMPT_CMD_ADDRESS_LEN_0 (0x0UL << 22) +#define SMPT_CMD_ADDRESS_LEN_3 (0x1UL << 22) +#define SMPT_CMD_ADDRESS_LEN_4 (0x2UL << 22) +#define SMPT_CMD_ADDRESS_LEN_USE_CURRENT (0x3UL << 22) + +#define SMPT_CMD_READ_DUMMY_MASK GENMASK(19, 16) +#define SMPT_CMD_READ_DUMMY_SHIFT 16 +#define SMPT_CMD_READ_DUMMY(_cmd) \ + (((_cmd) & SMPT_CMD_READ_DUMMY_MASK) >> SMPT_CMD_READ_DUMMY_SHIFT) +#define SMPT_CMD_READ_DUMMY_IS_VARIABLE 0xfUL + +#define SMPT_CMD_READ_DATA_MASK GENMASK(31, 24) +#define SMPT_CMD_READ_DATA_SHIFT 24 +#define SMPT_CMD_READ_DATA(_cmd) \ + (((_cmd) & SMPT_CMD_READ_DATA_MASK) >> SMPT_CMD_READ_DATA_SHIFT) + +#define SMPT_CMD_OPCODE_MASK GENMASK(15, 8) +#define SMPT_CMD_OPCODE_SHIFT 8 +#define SMPT_CMD_OPCODE(_cmd) \ + (((_cmd) & SMPT_CMD_OPCODE_MASK) >> SMPT_CMD_OPCODE_SHIFT) + +#define SMPT_MAP_REGION_COUNT_MASK GENMASK(23, 16) +#define SMPT_MAP_REGION_COUNT_SHIFT 16 +#define SMPT_MAP_REGION_COUNT(_header) \ + ((((_header) & SMPT_MAP_REGION_COUNT_MASK) >> \ + SMPT_MAP_REGION_COUNT_SHIFT) + 1) + +#define SMPT_MAP_ID_MASK GENMASK(15, 8) +#define SMPT_MAP_ID_SHIFT 8 +#define SMPT_MAP_ID(_header) \ + (((_header) & SMPT_MAP_ID_MASK) >> SMPT_MAP_ID_SHIFT) + +#define SMPT_MAP_REGION_SIZE_MASK GENMASK(31, 8) +#define SMPT_MAP_REGION_SIZE_SHIFT 8 +#define SMPT_MAP_REGION_SIZE(_region) \ + (((((_region) & SMPT_MAP_REGION_SIZE_MASK) >> \ + SMPT_MAP_REGION_SIZE_SHIFT) + 1) * 256) + +#define SMPT_MAP_REGION_ERASE_TYPE_MASK GENMASK(3, 0) +#define SMPT_MAP_REGION_ERASE_TYPE(_region) \ + ((_region) & SMPT_MAP_REGION_ERASE_TYPE_MASK) + +#define SMPT_DESC_TYPE_MAP BIT(1) +#define SMPT_DESC_END BIT(0) + +#define SFDP_4BAIT_DWORD_MAX 2 + +struct sfdp_4bait { + /* The hardware capability. */ + u32 hwcaps; + + /* + * The <supported_bit> bit in DWORD1 of the 4BAIT tells us whether + * the associated 4-byte address op code is supported. + */ + u32 supported_bit; +}; + +/** + * spi_nor_read_raw() - raw read of serial flash memory. read_opcode, + * addr_nbytes and read_dummy members of the struct spi_nor + * should be previously set. + * @nor: pointer to a 'struct spi_nor' + * @addr: offset in the serial flash memory + * @len: number of bytes to read + * @buf: buffer where the data is copied into (dma-safe memory) + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf) +{ + ssize_t ret; + + while (len) { + ret = spi_nor_read_data(nor, addr, len, buf); + if (ret < 0) + return ret; + if (!ret || ret > len) + return -EIO; + + buf += ret; + addr += ret; + len -= ret; + } + return 0; +} + +/** + * spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters. + * @nor: pointer to a 'struct spi_nor' + * @addr: offset in the SFDP area to start reading data from + * @len: number of bytes to read + * @buf: buffer where the SFDP data are copied into (dma-safe memory) + * + * Whatever the actual numbers of bytes for address and dummy cycles are + * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always + * followed by a 3-byte address and 8 dummy clock cycles. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr, + size_t len, void *buf) +{ + u8 addr_nbytes, read_opcode, read_dummy; + int ret; + + read_opcode = nor->read_opcode; + addr_nbytes = nor->addr_nbytes; + read_dummy = nor->read_dummy; + + nor->read_opcode = SPINOR_OP_RDSFDP; + nor->addr_nbytes = 3; + nor->read_dummy = 8; + + ret = spi_nor_read_raw(nor, addr, len, buf); + + nor->read_opcode = read_opcode; + nor->addr_nbytes = addr_nbytes; + nor->read_dummy = read_dummy; + + return ret; +} + +/** + * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters. + * @nor: pointer to a 'struct spi_nor' + * @addr: offset in the SFDP area to start reading data from + * @len: number of bytes to read + * @buf: buffer where the SFDP data are copied into + * + * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not + * guaranteed to be dma-safe. + * + * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp() + * otherwise. + */ +static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr, + size_t len, void *buf) +{ + void *dma_safe_buf; + int ret; + + dma_safe_buf = kmalloc(len, GFP_KERNEL); + if (!dma_safe_buf) + return -ENOMEM; + + ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf); + memcpy(buf, dma_safe_buf, len); + kfree(dma_safe_buf); + + return ret; +} + +static void +spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read, + u16 half, + enum spi_nor_protocol proto) +{ + read->num_mode_clocks = (half >> 5) & 0x07; + read->num_wait_states = (half >> 0) & 0x1f; + read->opcode = (half >> 8) & 0xff; + read->proto = proto; +} + +static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = { + /* Fast Read 1-1-2 */ + { + SNOR_HWCAPS_READ_1_1_2, + SFDP_DWORD(1), BIT(16), /* Supported bit */ + SFDP_DWORD(4), 0, /* Settings */ + SNOR_PROTO_1_1_2, + }, + + /* Fast Read 1-2-2 */ + { + SNOR_HWCAPS_READ_1_2_2, + SFDP_DWORD(1), BIT(20), /* Supported bit */ + SFDP_DWORD(4), 16, /* Settings */ + SNOR_PROTO_1_2_2, + }, + + /* Fast Read 2-2-2 */ + { + SNOR_HWCAPS_READ_2_2_2, + SFDP_DWORD(5), BIT(0), /* Supported bit */ + SFDP_DWORD(6), 16, /* Settings */ + SNOR_PROTO_2_2_2, + }, + + /* Fast Read 1-1-4 */ + { + SNOR_HWCAPS_READ_1_1_4, + SFDP_DWORD(1), BIT(22), /* Supported bit */ + SFDP_DWORD(3), 16, /* Settings */ + SNOR_PROTO_1_1_4, + }, + + /* Fast Read 1-4-4 */ + { + SNOR_HWCAPS_READ_1_4_4, + SFDP_DWORD(1), BIT(21), /* Supported bit */ + SFDP_DWORD(3), 0, /* Settings */ + SNOR_PROTO_1_4_4, + }, + + /* Fast Read 4-4-4 */ + { + SNOR_HWCAPS_READ_4_4_4, + SFDP_DWORD(5), BIT(4), /* Supported bit */ + SFDP_DWORD(7), 16, /* Settings */ + SNOR_PROTO_4_4_4, + }, +}; + +static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = { + /* Erase Type 1 in DWORD8 bits[15:0] */ + {SFDP_DWORD(8), 0}, + + /* Erase Type 2 in DWORD8 bits[31:16] */ + {SFDP_DWORD(8), 16}, + + /* Erase Type 3 in DWORD9 bits[15:0] */ + {SFDP_DWORD(9), 0}, + + /* Erase Type 4 in DWORD9 bits[31:16] */ + {SFDP_DWORD(9), 16}, +}; + +/** + * spi_nor_set_erase_settings_from_bfpt() - set erase type settings from BFPT + * @erase: pointer to a structure that describes a SPI NOR erase type + * @size: the size of the sector/block erased by the erase type + * @opcode: the SPI command op code to erase the sector/block + * @i: erase type index as sorted in the Basic Flash Parameter Table + * + * The supported Erase Types will be sorted at init in ascending order, with + * the smallest Erase Type size being the first member in the erase_type array + * of the spi_nor_erase_map structure. Save the Erase Type index as sorted in + * the Basic Flash Parameter Table since it will be used later on to + * synchronize with the supported Erase Types defined in SFDP optional tables. + */ +static void +spi_nor_set_erase_settings_from_bfpt(struct spi_nor_erase_type *erase, + u32 size, u8 opcode, u8 i) +{ + erase->idx = i; + spi_nor_set_erase_type(erase, size, opcode); +} + +/** + * spi_nor_map_cmp_erase_type() - compare the map's erase types by size + * @l: member in the left half of the map's erase_type array + * @r: member in the right half of the map's erase_type array + * + * Comparison function used in the sort() call to sort in ascending order the + * map's erase types, the smallest erase type size being the first member in the + * sorted erase_type array. + * + * Return: the result of @l->size - @r->size + */ +static int spi_nor_map_cmp_erase_type(const void *l, const void *r) +{ + const struct spi_nor_erase_type *left = l, *right = r; + + return left->size - right->size; +} + +/** + * spi_nor_sort_erase_mask() - sort erase mask + * @map: the erase map of the SPI NOR + * @erase_mask: the erase type mask to be sorted + * + * Replicate the sort done for the map's erase types in BFPT: sort the erase + * mask in ascending order with the smallest erase type size starting from + * BIT(0) in the sorted erase mask. + * + * Return: sorted erase mask. + */ +static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask) +{ + struct spi_nor_erase_type *erase_type = map->erase_type; + int i; + u8 sorted_erase_mask = 0; + + if (!erase_mask) + return 0; + + /* Replicate the sort done for the map's erase types. */ + for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) + if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx)) + sorted_erase_mask |= BIT(i); + + return sorted_erase_mask; +} + +/** + * spi_nor_regions_sort_erase_types() - sort erase types in each region + * @map: the erase map of the SPI NOR + * + * Function assumes that the erase types defined in the erase map are already + * sorted in ascending order, with the smallest erase type size being the first + * member in the erase_type array. It replicates the sort done for the map's + * erase types. Each region's erase bitmask will indicate which erase types are + * supported from the sorted erase types defined in the erase map. + * Sort the all region's erase type at init in order to speed up the process of + * finding the best erase command at runtime. + */ +static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map) +{ + struct spi_nor_erase_region *region = map->regions; + u8 region_erase_mask, sorted_erase_mask; + + while (region) { + region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK; + + sorted_erase_mask = spi_nor_sort_erase_mask(map, + region_erase_mask); + + /* Overwrite erase mask. */ + region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) | + sorted_erase_mask; + + region = spi_nor_region_next(region); + } +} + +/** + * spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table. + * @nor: pointer to a 'struct spi_nor' + * @bfpt_header: pointer to the 'struct sfdp_parameter_header' describing + * the Basic Flash Parameter Table length and version + * + * The Basic Flash Parameter Table is the main and only mandatory table as + * defined by the SFDP (JESD216) specification. + * It provides us with the total size (memory density) of the data array and + * the number of address bytes for Fast Read, Page Program and Sector Erase + * commands. + * For Fast READ commands, it also gives the number of mode clock cycles and + * wait states (regrouped in the number of dummy clock cycles) for each + * supported instruction op code. + * For Page Program, the page size is now available since JESD216 rev A, however + * the supported instruction op codes are still not provided. + * For Sector Erase commands, this table stores the supported instruction op + * codes and the associated sector sizes. + * Finally, the Quad Enable Requirements (QER) are also available since JESD216 + * rev A. The QER bits encode the manufacturer dependent procedure to be + * executed to set the Quad Enable (QE) bit in some internal register of the + * Quad SPI memory. Indeed the QE bit, when it exists, must be set before + * sending any Quad SPI command to the memory. Actually, setting the QE bit + * tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2 + * and IO3 hence enabling 4 (Quad) I/O lines. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_parse_bfpt(struct spi_nor *nor, + const struct sfdp_parameter_header *bfpt_header) +{ + struct spi_nor_flash_parameter *params = nor->params; + struct spi_nor_erase_map *map = ¶ms->erase_map; + struct spi_nor_erase_type *erase_type = map->erase_type; + struct sfdp_bfpt bfpt; + size_t len; + int i, cmd, err; + u32 addr, val; + u32 dword; + u16 half; + u8 erase_mask; + + /* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */ + if (bfpt_header->length < BFPT_DWORD_MAX_JESD216) + return -EINVAL; + + /* Read the Basic Flash Parameter Table. */ + len = min_t(size_t, sizeof(bfpt), + bfpt_header->length * sizeof(u32)); + addr = SFDP_PARAM_HEADER_PTP(bfpt_header); + memset(&bfpt, 0, sizeof(bfpt)); + err = spi_nor_read_sfdp_dma_unsafe(nor, addr, len, &bfpt); + if (err < 0) + return err; + + /* Fix endianness of the BFPT DWORDs. */ + le32_to_cpu_array(bfpt.dwords, BFPT_DWORD_MAX); + + /* Number of address bytes. */ + switch (bfpt.dwords[SFDP_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) { + case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY: + case BFPT_DWORD1_ADDRESS_BYTES_3_OR_4: + params->addr_nbytes = 3; + params->addr_mode_nbytes = 3; + break; + + case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY: + params->addr_nbytes = 4; + params->addr_mode_nbytes = 4; + break; + + default: + break; + } + + /* Flash Memory Density (in bits). */ + val = bfpt.dwords[SFDP_DWORD(2)]; + if (val & BIT(31)) { + val &= ~BIT(31); + + /* + * Prevent overflows on params->size. Anyway, a NOR of 2^64 + * bits is unlikely to exist so this error probably means + * the BFPT we are reading is corrupted/wrong. + */ + if (val > 63) + return -EINVAL; + + params->size = 1ULL << val; + } else { + params->size = val + 1; + } + params->size >>= 3; /* Convert to bytes. */ + + /* Fast Read settings. */ + for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) { + const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i]; + struct spi_nor_read_command *read; + + if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) { + params->hwcaps.mask &= ~rd->hwcaps; + continue; + } + + params->hwcaps.mask |= rd->hwcaps; + cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps); + read = ¶ms->reads[cmd]; + half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift; + spi_nor_set_read_settings_from_bfpt(read, half, rd->proto); + } + + /* + * Sector Erase settings. Reinitialize the uniform erase map using the + * Erase Types defined in the bfpt table. + */ + erase_mask = 0; + memset(¶ms->erase_map, 0, sizeof(params->erase_map)); + for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) { + const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i]; + u32 erasesize; + u8 opcode; + + half = bfpt.dwords[er->dword] >> er->shift; + erasesize = half & 0xff; + + /* erasesize == 0 means this Erase Type is not supported. */ + if (!erasesize) + continue; + + erasesize = 1U << erasesize; + opcode = (half >> 8) & 0xff; + erase_mask |= BIT(i); + spi_nor_set_erase_settings_from_bfpt(&erase_type[i], erasesize, + opcode, i); + } + spi_nor_init_uniform_erase_map(map, erase_mask, params->size); + /* + * Sort all the map's Erase Types in ascending order with the smallest + * erase size being the first member in the erase_type array. + */ + sort(erase_type, SNOR_ERASE_TYPE_MAX, sizeof(erase_type[0]), + spi_nor_map_cmp_erase_type, NULL); + /* + * Sort the erase types in the uniform region in order to update the + * uniform_erase_type bitmask. The bitmask will be used later on when + * selecting the uniform erase. + */ + spi_nor_regions_sort_erase_types(map); + map->uniform_erase_type = map->uniform_region.offset & + SNOR_ERASE_TYPE_MASK; + + /* Stop here if not JESD216 rev A or later. */ + if (bfpt_header->length == BFPT_DWORD_MAX_JESD216) + return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt); + + /* Page size: this field specifies 'N' so the page size = 2^N bytes. */ + val = bfpt.dwords[SFDP_DWORD(11)]; + val &= BFPT_DWORD11_PAGE_SIZE_MASK; + val >>= BFPT_DWORD11_PAGE_SIZE_SHIFT; + params->page_size = 1U << val; + + /* Quad Enable Requirements. */ + switch (bfpt.dwords[SFDP_DWORD(15)] & BFPT_DWORD15_QER_MASK) { + case BFPT_DWORD15_QER_NONE: + params->quad_enable = NULL; + break; + + case BFPT_DWORD15_QER_SR2_BIT1_BUGGY: + /* + * Writing only one byte to the Status Register has the + * side-effect of clearing Status Register 2. + */ + case BFPT_DWORD15_QER_SR2_BIT1_NO_RD: + /* + * Read Configuration Register (35h) instruction is not + * supported. + */ + nor->flags |= SNOR_F_HAS_16BIT_SR | SNOR_F_NO_READ_CR; + params->quad_enable = spi_nor_sr2_bit1_quad_enable; + break; + + case BFPT_DWORD15_QER_SR1_BIT6: + nor->flags &= ~SNOR_F_HAS_16BIT_SR; + params->quad_enable = spi_nor_sr1_bit6_quad_enable; + break; + + case BFPT_DWORD15_QER_SR2_BIT7: + nor->flags &= ~SNOR_F_HAS_16BIT_SR; + params->quad_enable = spi_nor_sr2_bit7_quad_enable; + break; + + case BFPT_DWORD15_QER_SR2_BIT1: + /* + * JESD216 rev B or later does not specify if writing only one + * byte to the Status Register clears or not the Status + * Register 2, so let's be cautious and keep the default + * assumption of a 16-bit Write Status (01h) command. + */ + nor->flags |= SNOR_F_HAS_16BIT_SR; + + params->quad_enable = spi_nor_sr2_bit1_quad_enable; + break; + + default: + dev_dbg(nor->dev, "BFPT QER reserved value used\n"); + break; + } + + dword = bfpt.dwords[SFDP_DWORD(16)] & BFPT_DWORD16_4B_ADDR_MODE_MASK; + if (SFDP_MASK_CHECK(dword, BFPT_DWORD16_4B_ADDR_MODE_BRWR)) + params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_brwr; + else if (SFDP_MASK_CHECK(dword, BFPT_DWORD16_4B_ADDR_MODE_WREN_EN4B_EX4B)) + params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_wren_en4b_ex4b; + else if (SFDP_MASK_CHECK(dword, BFPT_DWORD16_4B_ADDR_MODE_EN4B_EX4B)) + params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_en4b_ex4b; + else + dev_dbg(nor->dev, "BFPT: 4-Byte Address Mode method is not recognized or not implemented\n"); + + /* Soft Reset support. */ + if (bfpt.dwords[SFDP_DWORD(16)] & BFPT_DWORD16_SWRST_EN_RST) + nor->flags |= SNOR_F_SOFT_RESET; + + /* Stop here if not JESD216 rev C or later. */ + if (bfpt_header->length == BFPT_DWORD_MAX_JESD216B) + return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt); + + /* 8D-8D-8D command extension. */ + switch (bfpt.dwords[SFDP_DWORD(18)] & BFPT_DWORD18_CMD_EXT_MASK) { + case BFPT_DWORD18_CMD_EXT_REP: + nor->cmd_ext_type = SPI_NOR_EXT_REPEAT; + break; + + case BFPT_DWORD18_CMD_EXT_INV: + nor->cmd_ext_type = SPI_NOR_EXT_INVERT; + break; + + case BFPT_DWORD18_CMD_EXT_RES: + dev_dbg(nor->dev, "Reserved command extension used\n"); + break; + + case BFPT_DWORD18_CMD_EXT_16B: + dev_dbg(nor->dev, "16-bit opcodes not supported\n"); + return -EOPNOTSUPP; + } + + return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt); +} + +/** + * spi_nor_smpt_addr_nbytes() - return the number of address bytes used in the + * configuration detection command. + * @nor: pointer to a 'struct spi_nor' + * @settings: configuration detection command descriptor, dword1 + */ +static u8 spi_nor_smpt_addr_nbytes(const struct spi_nor *nor, const u32 settings) +{ + switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) { + case SMPT_CMD_ADDRESS_LEN_0: + return 0; + case SMPT_CMD_ADDRESS_LEN_3: + return 3; + case SMPT_CMD_ADDRESS_LEN_4: + return 4; + case SMPT_CMD_ADDRESS_LEN_USE_CURRENT: + default: + return nor->params->addr_mode_nbytes; + } +} + +/** + * spi_nor_smpt_read_dummy() - return the configuration detection command read + * latency, in clock cycles. + * @nor: pointer to a 'struct spi_nor' + * @settings: configuration detection command descriptor, dword1 + * + * Return: the number of dummy cycles for an SMPT read + */ +static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings) +{ + u8 read_dummy = SMPT_CMD_READ_DUMMY(settings); + + if (read_dummy == SMPT_CMD_READ_DUMMY_IS_VARIABLE) + return nor->read_dummy; + return read_dummy; +} + +/** + * spi_nor_get_map_in_use() - get the configuration map in use + * @nor: pointer to a 'struct spi_nor' + * @smpt: pointer to the sector map parameter table + * @smpt_len: sector map parameter table length + * + * Return: pointer to the map in use, ERR_PTR(-errno) otherwise. + */ +static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt, + u8 smpt_len) +{ + const u32 *ret; + u8 *buf; + u32 addr; + int err; + u8 i; + u8 addr_nbytes, read_opcode, read_dummy; + u8 read_data_mask, map_id; + + /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */ + buf = kmalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + addr_nbytes = nor->addr_nbytes; + read_dummy = nor->read_dummy; + read_opcode = nor->read_opcode; + + map_id = 0; + /* Determine if there are any optional Detection Command Descriptors */ + for (i = 0; i < smpt_len; i += 2) { + if (smpt[i] & SMPT_DESC_TYPE_MAP) + break; + + read_data_mask = SMPT_CMD_READ_DATA(smpt[i]); + nor->addr_nbytes = spi_nor_smpt_addr_nbytes(nor, smpt[i]); + nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]); + nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]); + addr = smpt[i + 1]; + + err = spi_nor_read_raw(nor, addr, 1, buf); + if (err) { + ret = ERR_PTR(err); + goto out; + } + + /* + * Build an index value that is used to select the Sector Map + * Configuration that is currently in use. + */ + map_id = map_id << 1 | !!(*buf & read_data_mask); + } + + /* + * If command descriptors are provided, they always precede map + * descriptors in the table. There is no need to start the iteration + * over smpt array all over again. + * + * Find the matching configuration map. + */ + ret = ERR_PTR(-EINVAL); + while (i < smpt_len) { + if (SMPT_MAP_ID(smpt[i]) == map_id) { + ret = smpt + i; + break; + } + + /* + * If there are no more configuration map descriptors and no + * configuration ID matched the configuration identifier, the + * sector address map is unknown. + */ + if (smpt[i] & SMPT_DESC_END) + break; + + /* increment the table index to the next map */ + i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1; + } + + /* fall through */ +out: + kfree(buf); + nor->addr_nbytes = addr_nbytes; + nor->read_dummy = read_dummy; + nor->read_opcode = read_opcode; + return ret; +} + +static void spi_nor_region_mark_end(struct spi_nor_erase_region *region) +{ + region->offset |= SNOR_LAST_REGION; +} + +static void spi_nor_region_mark_overlay(struct spi_nor_erase_region *region) +{ + region->offset |= SNOR_OVERLAID_REGION; +} + +/** + * spi_nor_region_check_overlay() - set overlay bit when the region is overlaid + * @region: pointer to a structure that describes a SPI NOR erase region + * @erase: pointer to a structure that describes a SPI NOR erase type + * @erase_type: erase type bitmask + */ +static void +spi_nor_region_check_overlay(struct spi_nor_erase_region *region, + const struct spi_nor_erase_type *erase, + const u8 erase_type) +{ + int i; + + for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { + if (!(erase[i].size && erase_type & BIT(erase[i].idx))) + continue; + if (region->size & erase[i].size_mask) { + spi_nor_region_mark_overlay(region); + return; + } + } +} + +/** + * spi_nor_init_non_uniform_erase_map() - initialize the non-uniform erase map + * @nor: pointer to a 'struct spi_nor' + * @smpt: pointer to the sector map parameter table + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor, + const u32 *smpt) +{ + struct spi_nor_erase_map *map = &nor->params->erase_map; + struct spi_nor_erase_type *erase = map->erase_type; + struct spi_nor_erase_region *region; + u64 offset; + u32 region_count; + int i, j; + u8 uniform_erase_type, save_uniform_erase_type; + u8 erase_type, regions_erase_type; + + region_count = SMPT_MAP_REGION_COUNT(*smpt); + /* + * The regions will be freed when the driver detaches from the + * device. + */ + region = devm_kcalloc(nor->dev, region_count, sizeof(*region), + GFP_KERNEL); + if (!region) + return -ENOMEM; + map->regions = region; + + uniform_erase_type = 0xff; + regions_erase_type = 0; + offset = 0; + /* Populate regions. */ + for (i = 0; i < region_count; i++) { + j = i + 1; /* index for the region dword */ + region[i].size = SMPT_MAP_REGION_SIZE(smpt[j]); + erase_type = SMPT_MAP_REGION_ERASE_TYPE(smpt[j]); + region[i].offset = offset | erase_type; + + spi_nor_region_check_overlay(®ion[i], erase, erase_type); + + /* + * Save the erase types that are supported in all regions and + * can erase the entire flash memory. + */ + uniform_erase_type &= erase_type; + + /* + * regions_erase_type mask will indicate all the erase types + * supported in this configuration map. + */ + regions_erase_type |= erase_type; + + offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) + + region[i].size; + } + spi_nor_region_mark_end(®ion[i - 1]); + + save_uniform_erase_type = map->uniform_erase_type; + map->uniform_erase_type = spi_nor_sort_erase_mask(map, + uniform_erase_type); + + if (!regions_erase_type) { + /* + * Roll back to the previous uniform_erase_type mask, SMPT is + * broken. + */ + map->uniform_erase_type = save_uniform_erase_type; + return -EINVAL; + } + + /* + * BFPT advertises all the erase types supported by all the possible + * map configurations. Mask out the erase types that are not supported + * by the current map configuration. + */ + for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) + if (!(regions_erase_type & BIT(erase[i].idx))) + spi_nor_mask_erase_type(&erase[i]); + + return 0; +} + +/** + * spi_nor_parse_smpt() - parse Sector Map Parameter Table + * @nor: pointer to a 'struct spi_nor' + * @smpt_header: sector map parameter table header + * + * This table is optional, but when available, we parse it to identify the + * location and size of sectors within the main data array of the flash memory + * device and to identify which Erase Types are supported by each sector. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_parse_smpt(struct spi_nor *nor, + const struct sfdp_parameter_header *smpt_header) +{ + const u32 *sector_map; + u32 *smpt; + size_t len; + u32 addr; + int ret; + + /* Read the Sector Map Parameter Table. */ + len = smpt_header->length * sizeof(*smpt); + smpt = kmalloc(len, GFP_KERNEL); + if (!smpt) + return -ENOMEM; + + addr = SFDP_PARAM_HEADER_PTP(smpt_header); + ret = spi_nor_read_sfdp(nor, addr, len, smpt); + if (ret) + goto out; + + /* Fix endianness of the SMPT DWORDs. */ + le32_to_cpu_array(smpt, smpt_header->length); + + sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length); + if (IS_ERR(sector_map)) { + ret = PTR_ERR(sector_map); + goto out; + } + + ret = spi_nor_init_non_uniform_erase_map(nor, sector_map); + if (ret) + goto out; + + spi_nor_regions_sort_erase_types(&nor->params->erase_map); + /* fall through */ +out: + kfree(smpt); + return ret; +} + +/** + * spi_nor_parse_4bait() - parse the 4-Byte Address Instruction Table + * @nor: pointer to a 'struct spi_nor'. + * @param_header: pointer to the 'struct sfdp_parameter_header' describing + * the 4-Byte Address Instruction Table length and version. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_parse_4bait(struct spi_nor *nor, + const struct sfdp_parameter_header *param_header) +{ + static const struct sfdp_4bait reads[] = { + { SNOR_HWCAPS_READ, BIT(0) }, + { SNOR_HWCAPS_READ_FAST, BIT(1) }, + { SNOR_HWCAPS_READ_1_1_2, BIT(2) }, + { SNOR_HWCAPS_READ_1_2_2, BIT(3) }, + { SNOR_HWCAPS_READ_1_1_4, BIT(4) }, + { SNOR_HWCAPS_READ_1_4_4, BIT(5) }, + { SNOR_HWCAPS_READ_1_1_1_DTR, BIT(13) }, + { SNOR_HWCAPS_READ_1_2_2_DTR, BIT(14) }, + { SNOR_HWCAPS_READ_1_4_4_DTR, BIT(15) }, + }; + static const struct sfdp_4bait programs[] = { + { SNOR_HWCAPS_PP, BIT(6) }, + { SNOR_HWCAPS_PP_1_1_4, BIT(7) }, + { SNOR_HWCAPS_PP_1_4_4, BIT(8) }, + }; + static const struct sfdp_4bait erases[SNOR_ERASE_TYPE_MAX] = { + { 0u /* not used */, BIT(9) }, + { 0u /* not used */, BIT(10) }, + { 0u /* not used */, BIT(11) }, + { 0u /* not used */, BIT(12) }, + }; + struct spi_nor_flash_parameter *params = nor->params; + struct spi_nor_pp_command *params_pp = params->page_programs; + struct spi_nor_erase_map *map = ¶ms->erase_map; + struct spi_nor_erase_type *erase_type = map->erase_type; + u32 *dwords; + size_t len; + u32 addr, discard_hwcaps, read_hwcaps, pp_hwcaps, erase_mask; + int i, ret; + + if (param_header->major != SFDP_JESD216_MAJOR || + param_header->length < SFDP_4BAIT_DWORD_MAX) + return -EINVAL; + + /* Read the 4-byte Address Instruction Table. */ + len = sizeof(*dwords) * SFDP_4BAIT_DWORD_MAX; + + /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */ + dwords = kmalloc(len, GFP_KERNEL); + if (!dwords) + return -ENOMEM; + + addr = SFDP_PARAM_HEADER_PTP(param_header); + ret = spi_nor_read_sfdp(nor, addr, len, dwords); + if (ret) + goto out; + + /* Fix endianness of the 4BAIT DWORDs. */ + le32_to_cpu_array(dwords, SFDP_4BAIT_DWORD_MAX); + + /* + * Compute the subset of (Fast) Read commands for which the 4-byte + * version is supported. + */ + discard_hwcaps = 0; + read_hwcaps = 0; + for (i = 0; i < ARRAY_SIZE(reads); i++) { + const struct sfdp_4bait *read = &reads[i]; + + discard_hwcaps |= read->hwcaps; + if ((params->hwcaps.mask & read->hwcaps) && + (dwords[SFDP_DWORD(1)] & read->supported_bit)) + read_hwcaps |= read->hwcaps; + } + + /* + * Compute the subset of Page Program commands for which the 4-byte + * version is supported. + */ + pp_hwcaps = 0; + for (i = 0; i < ARRAY_SIZE(programs); i++) { + const struct sfdp_4bait *program = &programs[i]; + + /* + * The 4 Byte Address Instruction (Optional) Table is the only + * SFDP table that indicates support for Page Program Commands. + * Bypass the params->hwcaps.mask and consider 4BAIT the biggest + * authority for specifying Page Program support. + */ + discard_hwcaps |= program->hwcaps; + if (dwords[SFDP_DWORD(1)] & program->supported_bit) + pp_hwcaps |= program->hwcaps; + } + + /* + * Compute the subset of Sector Erase commands for which the 4-byte + * version is supported. + */ + erase_mask = 0; + for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { + const struct sfdp_4bait *erase = &erases[i]; + + if (dwords[SFDP_DWORD(1)] & erase->supported_bit) + erase_mask |= BIT(i); + } + + /* Replicate the sort done for the map's erase types in BFPT. */ + erase_mask = spi_nor_sort_erase_mask(map, erase_mask); + + /* + * We need at least one 4-byte op code per read, program and erase + * operation; the .read(), .write() and .erase() hooks share the + * nor->addr_nbytes value. + */ + if (!read_hwcaps || !pp_hwcaps || !erase_mask) + goto out; + + /* + * Discard all operations from the 4-byte instruction set which are + * not supported by this memory. + */ + params->hwcaps.mask &= ~discard_hwcaps; + params->hwcaps.mask |= (read_hwcaps | pp_hwcaps); + + /* Use the 4-byte address instruction set. */ + for (i = 0; i < SNOR_CMD_READ_MAX; i++) { + struct spi_nor_read_command *read_cmd = ¶ms->reads[i]; + + read_cmd->opcode = spi_nor_convert_3to4_read(read_cmd->opcode); + } + + /* 4BAIT is the only SFDP table that indicates page program support. */ + if (pp_hwcaps & SNOR_HWCAPS_PP) { + spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP], + SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1); + /* + * Since xSPI Page Program opcode is backward compatible with + * Legacy SPI, use Legacy SPI opcode there as well. + */ + spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_8_8_8_DTR], + SPINOR_OP_PP_4B, SNOR_PROTO_8_8_8_DTR); + } + if (pp_hwcaps & SNOR_HWCAPS_PP_1_1_4) + spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_1_1_4], + SPINOR_OP_PP_1_1_4_4B, + SNOR_PROTO_1_1_4); + if (pp_hwcaps & SNOR_HWCAPS_PP_1_4_4) + spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_1_4_4], + SPINOR_OP_PP_1_4_4_4B, + SNOR_PROTO_1_4_4); + + for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { + if (erase_mask & BIT(i)) + erase_type[i].opcode = (dwords[SFDP_DWORD(2)] >> + erase_type[i].idx * 8) & 0xFF; + else + spi_nor_mask_erase_type(&erase_type[i]); + } + + /* + * We set SNOR_F_HAS_4BAIT in order to skip spi_nor_set_4byte_opcodes() + * later because we already did the conversion to 4byte opcodes. Also, + * this latest function implements a legacy quirk for the erase size of + * Spansion memory. However this quirk is no longer needed with new + * SFDP compliant memories. + */ + params->addr_nbytes = 4; + nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT; + + /* fall through */ +out: + kfree(dwords); + return ret; +} + +#define PROFILE1_DWORD1_RDSR_ADDR_BYTES BIT(29) +#define PROFILE1_DWORD1_RDSR_DUMMY BIT(28) +#define PROFILE1_DWORD1_RD_FAST_CMD GENMASK(15, 8) +#define PROFILE1_DWORD4_DUMMY_200MHZ GENMASK(11, 7) +#define PROFILE1_DWORD5_DUMMY_166MHZ GENMASK(31, 27) +#define PROFILE1_DWORD5_DUMMY_133MHZ GENMASK(21, 17) +#define PROFILE1_DWORD5_DUMMY_100MHZ GENMASK(11, 7) + +/** + * spi_nor_parse_profile1() - parse the xSPI Profile 1.0 table + * @nor: pointer to a 'struct spi_nor' + * @profile1_header: pointer to the 'struct sfdp_parameter_header' describing + * the Profile 1.0 Table length and version. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_parse_profile1(struct spi_nor *nor, + const struct sfdp_parameter_header *profile1_header) +{ + u32 *dwords, addr; + size_t len; + int ret; + u8 dummy, opcode; + + len = profile1_header->length * sizeof(*dwords); + dwords = kmalloc(len, GFP_KERNEL); + if (!dwords) + return -ENOMEM; + + addr = SFDP_PARAM_HEADER_PTP(profile1_header); + ret = spi_nor_read_sfdp(nor, addr, len, dwords); + if (ret) + goto out; + + le32_to_cpu_array(dwords, profile1_header->length); + + /* Get 8D-8D-8D fast read opcode and dummy cycles. */ + opcode = FIELD_GET(PROFILE1_DWORD1_RD_FAST_CMD, dwords[SFDP_DWORD(1)]); + + /* Set the Read Status Register dummy cycles and dummy address bytes. */ + if (dwords[SFDP_DWORD(1)] & PROFILE1_DWORD1_RDSR_DUMMY) + nor->params->rdsr_dummy = 8; + else + nor->params->rdsr_dummy = 4; + + if (dwords[SFDP_DWORD(1)] & PROFILE1_DWORD1_RDSR_ADDR_BYTES) + nor->params->rdsr_addr_nbytes = 4; + else + nor->params->rdsr_addr_nbytes = 0; + + /* + * We don't know what speed the controller is running at. Find the + * dummy cycles for the fastest frequency the flash can run at to be + * sure we are never short of dummy cycles. A value of 0 means the + * frequency is not supported. + * + * Default to PROFILE1_DUMMY_DEFAULT if we don't find anything, and let + * flashes set the correct value if needed in their fixup hooks. + */ + dummy = FIELD_GET(PROFILE1_DWORD4_DUMMY_200MHZ, dwords[SFDP_DWORD(4)]); + if (!dummy) + dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_166MHZ, + dwords[SFDP_DWORD(5)]); + if (!dummy) + dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_133MHZ, + dwords[SFDP_DWORD(5)]); + if (!dummy) + dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_100MHZ, + dwords[SFDP_DWORD(5)]); + if (!dummy) + dev_dbg(nor->dev, + "Can't find dummy cycles from Profile 1.0 table\n"); + + /* Round up to an even value to avoid tripping controllers up. */ + dummy = round_up(dummy, 2); + + /* Update the fast read settings. */ + nor->params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR; + spi_nor_set_read_settings(&nor->params->reads[SNOR_CMD_READ_8_8_8_DTR], + 0, dummy, opcode, + SNOR_PROTO_8_8_8_DTR); + + /* + * Page Program is "Required Command" in the xSPI Profile 1.0. Update + * the params->hwcaps.mask here. + */ + nor->params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR; + +out: + kfree(dwords); + return ret; +} + +#define SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE BIT(31) + +/** + * spi_nor_parse_sccr() - Parse the Status, Control and Configuration Register + * Map. + * @nor: pointer to a 'struct spi_nor' + * @sccr_header: pointer to the 'struct sfdp_parameter_header' describing + * the SCCR Map table length and version. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_parse_sccr(struct spi_nor *nor, + const struct sfdp_parameter_header *sccr_header) +{ + struct spi_nor_flash_parameter *params = nor->params; + u32 *dwords, addr; + size_t len; + int ret; + + len = sccr_header->length * sizeof(*dwords); + dwords = kmalloc(len, GFP_KERNEL); + if (!dwords) + return -ENOMEM; + + addr = SFDP_PARAM_HEADER_PTP(sccr_header); + ret = spi_nor_read_sfdp(nor, addr, len, dwords); + if (ret) + goto out; + + le32_to_cpu_array(dwords, sccr_header->length); + + /* Address offset for volatile registers (die 0) */ + if (!params->vreg_offset) { + params->vreg_offset = devm_kmalloc(nor->dev, sizeof(*dwords), + GFP_KERNEL); + if (!params->vreg_offset) { + ret = -ENOMEM; + goto out; + } + } + params->vreg_offset[0] = dwords[SFDP_DWORD(1)]; + params->n_dice = 1; + + if (FIELD_GET(SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE, + dwords[SFDP_DWORD(22)])) + nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE; + +out: + kfree(dwords); + return ret; +} + +/** + * spi_nor_parse_sccr_mc() - Parse the Status, Control and Configuration + * Register Map Offsets for Multi-Chip SPI Memory + * Devices. + * @nor: pointer to a 'struct spi_nor' + * @sccr_mc_header: pointer to the 'struct sfdp_parameter_header' describing + * the SCCR Map offsets table length and version. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_parse_sccr_mc(struct spi_nor *nor, + const struct sfdp_parameter_header *sccr_mc_header) +{ + struct spi_nor_flash_parameter *params = nor->params; + u32 *dwords, addr; + u8 i, n_dice; + size_t len; + int ret; + + len = sccr_mc_header->length * sizeof(*dwords); + dwords = kmalloc(len, GFP_KERNEL); + if (!dwords) + return -ENOMEM; + + addr = SFDP_PARAM_HEADER_PTP(sccr_mc_header); + ret = spi_nor_read_sfdp(nor, addr, len, dwords); + if (ret) + goto out; + + le32_to_cpu_array(dwords, sccr_mc_header->length); + + /* + * Pair of DOWRDs (volatile and non-volatile register offsets) per + * additional die. Hence, length = 2 * (number of additional dice). + */ + n_dice = 1 + sccr_mc_header->length / 2; + + /* Address offset for volatile registers of additional dice */ + params->vreg_offset = + devm_krealloc(nor->dev, params->vreg_offset, + n_dice * sizeof(*dwords), + GFP_KERNEL); + if (!params->vreg_offset) { + ret = -ENOMEM; + goto out; + } + + for (i = 1; i < n_dice; i++) + params->vreg_offset[i] = dwords[SFDP_DWORD(i) * 2]; + + params->n_dice = n_dice; + +out: + kfree(dwords); + return ret; +} + +/** + * spi_nor_post_sfdp_fixups() - Updates the flash's parameters and settings + * after SFDP has been parsed. Called only for flashes that define JESD216 SFDP + * tables. + * @nor: pointer to a 'struct spi_nor' + * + * Used to tweak various flash parameters when information provided by the SFDP + * tables are wrong. + */ +static int spi_nor_post_sfdp_fixups(struct spi_nor *nor) +{ + int ret; + + if (nor->manufacturer && nor->manufacturer->fixups && + nor->manufacturer->fixups->post_sfdp) { + ret = nor->manufacturer->fixups->post_sfdp(nor); + if (ret) + return ret; + } + + if (nor->info->fixups && nor->info->fixups->post_sfdp) + return nor->info->fixups->post_sfdp(nor); + + return 0; +} + +/** + * spi_nor_check_sfdp_signature() - check for a valid SFDP signature + * @nor: pointer to a 'struct spi_nor' + * + * Used to detect if the flash supports the RDSFDP command as well as the + * presence of a valid SFDP table. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_check_sfdp_signature(struct spi_nor *nor) +{ + u32 signature; + int err; + + /* Get the SFDP header. */ + err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(signature), + &signature); + if (err < 0) + return err; + + /* Check the SFDP signature. */ + if (le32_to_cpu(signature) != SFDP_SIGNATURE) + return -EINVAL; + + return 0; +} + +/** + * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters. + * @nor: pointer to a 'struct spi_nor' + * + * The Serial Flash Discoverable Parameters are described by the JEDEC JESD216 + * specification. This is a standard which tends to supported by almost all + * (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at + * runtime the main parameters needed to perform basic SPI flash operations such + * as Fast Read, Page Program or Sector Erase commands. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_parse_sfdp(struct spi_nor *nor) +{ + const struct sfdp_parameter_header *param_header, *bfpt_header; + struct sfdp_parameter_header *param_headers = NULL; + struct sfdp_header header; + struct device *dev = nor->dev; + struct sfdp *sfdp; + size_t sfdp_size; + size_t psize; + int i, err; + + /* Get the SFDP header. */ + err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header); + if (err < 0) + return err; + + /* Check the SFDP header version. */ + if (le32_to_cpu(header.signature) != SFDP_SIGNATURE || + header.major != SFDP_JESD216_MAJOR) + return -EINVAL; + + /* + * Verify that the first and only mandatory parameter header is a + * Basic Flash Parameter Table header as specified in JESD216. + */ + bfpt_header = &header.bfpt_header; + if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID || + bfpt_header->major != SFDP_JESD216_MAJOR) + return -EINVAL; + + sfdp_size = SFDP_PARAM_HEADER_PTP(bfpt_header) + + SFDP_PARAM_HEADER_PARAM_LEN(bfpt_header); + + /* + * Allocate memory then read all parameter headers with a single + * Read SFDP command. These parameter headers will actually be parsed + * twice: a first time to get the latest revision of the basic flash + * parameter table, then a second time to handle the supported optional + * tables. + * Hence we read the parameter headers once for all to reduce the + * processing time. Also we use kmalloc() instead of devm_kmalloc() + * because we don't need to keep these parameter headers: the allocated + * memory is always released with kfree() before exiting this function. + */ + if (header.nph) { + psize = header.nph * sizeof(*param_headers); + + param_headers = kmalloc(psize, GFP_KERNEL); + if (!param_headers) + return -ENOMEM; + + err = spi_nor_read_sfdp(nor, sizeof(header), + psize, param_headers); + if (err < 0) { + dev_dbg(dev, "failed to read SFDP parameter headers\n"); + goto exit; + } + } + + /* + * Cache the complete SFDP data. It is not (easily) possible to fetch + * SFDP after probe time and we need it for the sysfs access. + */ + for (i = 0; i < header.nph; i++) { + param_header = ¶m_headers[i]; + sfdp_size = max_t(size_t, sfdp_size, + SFDP_PARAM_HEADER_PTP(param_header) + + SFDP_PARAM_HEADER_PARAM_LEN(param_header)); + } + + /* + * Limit the total size to a reasonable value to avoid allocating too + * much memory just of because the flash returned some insane values. + */ + if (sfdp_size > PAGE_SIZE) { + dev_dbg(dev, "SFDP data (%zu) too big, truncating\n", + sfdp_size); + sfdp_size = PAGE_SIZE; + } + + sfdp = devm_kzalloc(dev, sizeof(*sfdp), GFP_KERNEL); + if (!sfdp) { + err = -ENOMEM; + goto exit; + } + + /* + * The SFDP is organized in chunks of DWORDs. Thus, in theory, the + * sfdp_size should be a multiple of DWORDs. But in case a flash + * is not spec compliant, make sure that we have enough space to store + * the complete SFDP data. + */ + sfdp->num_dwords = DIV_ROUND_UP(sfdp_size, sizeof(*sfdp->dwords)); + sfdp->dwords = devm_kcalloc(dev, sfdp->num_dwords, + sizeof(*sfdp->dwords), GFP_KERNEL); + if (!sfdp->dwords) { + err = -ENOMEM; + devm_kfree(dev, sfdp); + goto exit; + } + + err = spi_nor_read_sfdp(nor, 0, sfdp_size, sfdp->dwords); + if (err < 0) { + dev_dbg(dev, "failed to read SFDP data\n"); + devm_kfree(dev, sfdp->dwords); + devm_kfree(dev, sfdp); + goto exit; + } + + nor->sfdp = sfdp; + + /* + * Check other parameter headers to get the latest revision of + * the basic flash parameter table. + */ + for (i = 0; i < header.nph; i++) { + param_header = ¶m_headers[i]; + + if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID && + param_header->major == SFDP_JESD216_MAJOR && + (param_header->minor > bfpt_header->minor || + (param_header->minor == bfpt_header->minor && + param_header->length > bfpt_header->length))) + bfpt_header = param_header; + } + + err = spi_nor_parse_bfpt(nor, bfpt_header); + if (err) + goto exit; + + /* Parse optional parameter tables. */ + for (i = 0; i < header.nph; i++) { + param_header = ¶m_headers[i]; + + switch (SFDP_PARAM_HEADER_ID(param_header)) { + case SFDP_SECTOR_MAP_ID: + err = spi_nor_parse_smpt(nor, param_header); + break; + + case SFDP_4BAIT_ID: + err = spi_nor_parse_4bait(nor, param_header); + break; + + case SFDP_PROFILE1_ID: + err = spi_nor_parse_profile1(nor, param_header); + break; + + case SFDP_SCCR_MAP_ID: + err = spi_nor_parse_sccr(nor, param_header); + break; + + case SFDP_SCCR_MAP_MC_ID: + err = spi_nor_parse_sccr_mc(nor, param_header); + break; + + default: + break; + } + + if (err) { + dev_warn(dev, "Failed to parse optional parameter table: %04x\n", + SFDP_PARAM_HEADER_ID(param_header)); + /* + * Let's not drop all information we extracted so far + * if optional table parsers fail. In case of failing, + * each optional parser is responsible to roll back to + * the previously known spi_nor data. + */ + err = 0; + } + } + + err = spi_nor_post_sfdp_fixups(nor); +exit: + kfree(param_headers); + return err; +} diff --git a/drivers/mtd/spi-nor/sfdp.h b/drivers/mtd/spi-nor/sfdp.h new file mode 100644 index 0000000000..6eb99e1cdd --- /dev/null +++ b/drivers/mtd/spi-nor/sfdp.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#ifndef __LINUX_MTD_SFDP_H +#define __LINUX_MTD_SFDP_H + +/* SFDP revisions */ +#define SFDP_JESD216_MAJOR 1 +#define SFDP_JESD216_MINOR 0 +#define SFDP_JESD216A_MINOR 5 +#define SFDP_JESD216B_MINOR 6 + +/* SFDP DWORDS are indexed from 1 but C arrays are indexed from 0. */ +#define SFDP_DWORD(i) ((i) - 1) +#define SFDP_MASK_CHECK(dword, mask) (((dword) & (mask)) == (mask)) + +/* Basic Flash Parameter Table */ + +/* JESD216 rev D defines a Basic Flash Parameter Table of 20 DWORDs. */ +#define BFPT_DWORD_MAX 20 + +struct sfdp_bfpt { + u32 dwords[BFPT_DWORD_MAX]; +}; + +/* The first version of JESD216 defined only 9 DWORDs. */ +#define BFPT_DWORD_MAX_JESD216 9 +#define BFPT_DWORD_MAX_JESD216B 16 + +/* 1st DWORD. */ +#define BFPT_DWORD1_FAST_READ_1_1_2 BIT(16) +#define BFPT_DWORD1_ADDRESS_BYTES_MASK GENMASK(18, 17) +#define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY (0x0UL << 17) +#define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4 (0x1UL << 17) +#define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY (0x2UL << 17) +#define BFPT_DWORD1_DTR BIT(19) +#define BFPT_DWORD1_FAST_READ_1_2_2 BIT(20) +#define BFPT_DWORD1_FAST_READ_1_4_4 BIT(21) +#define BFPT_DWORD1_FAST_READ_1_1_4 BIT(22) + +/* 5th DWORD. */ +#define BFPT_DWORD5_FAST_READ_2_2_2 BIT(0) +#define BFPT_DWORD5_FAST_READ_4_4_4 BIT(4) + +/* 11th DWORD. */ +#define BFPT_DWORD11_PAGE_SIZE_SHIFT 4 +#define BFPT_DWORD11_PAGE_SIZE_MASK GENMASK(7, 4) + +/* 15th DWORD. */ + +/* + * (from JESD216 rev B) + * Quad Enable Requirements (QER): + * - 000b: Device does not have a QE bit. Device detects 1-1-4 and 1-4-4 + * reads based on instruction. DQ3/HOLD# functions are hold during + * instruction phase. + * - 001b: QE is bit 1 of status register 2. It is set via Write Status with + * two data bytes where bit 1 of the second byte is one. + * [...] + * Writing only one byte to the status register has the side-effect of + * clearing status register 2, including the QE bit. The 100b code is + * used if writing one byte to the status register does not modify + * status register 2. + * - 010b: QE is bit 6 of status register 1. It is set via Write Status with + * one data byte where bit 6 is one. + * [...] + * - 011b: QE is bit 7 of status register 2. It is set via Write status + * register 2 instruction 3Eh with one data byte where bit 7 is one. + * [...] + * The status register 2 is read using instruction 3Fh. + * - 100b: QE is bit 1 of status register 2. It is set via Write Status with + * two data bytes where bit 1 of the second byte is one. + * [...] + * In contrast to the 001b code, writing one byte to the status + * register does not modify status register 2. + * - 101b: QE is bit 1 of status register 2. Status register 1 is read using + * Read Status instruction 05h. Status register2 is read using + * instruction 35h. QE is set via Write Status instruction 01h with + * two data bytes where bit 1 of the second byte is one. + * [...] + */ +#define BFPT_DWORD15_QER_MASK GENMASK(22, 20) +#define BFPT_DWORD15_QER_NONE (0x0UL << 20) /* Micron */ +#define BFPT_DWORD15_QER_SR2_BIT1_BUGGY (0x1UL << 20) +#define BFPT_DWORD15_QER_SR1_BIT6 (0x2UL << 20) /* Macronix */ +#define BFPT_DWORD15_QER_SR2_BIT7 (0x3UL << 20) +#define BFPT_DWORD15_QER_SR2_BIT1_NO_RD (0x4UL << 20) +#define BFPT_DWORD15_QER_SR2_BIT1 (0x5UL << 20) /* Spansion */ + +#define BFPT_DWORD16_EN4B_MASK GENMASK(31, 24) +#define BFPT_DWORD16_EN4B_ALWAYS_4B BIT(30) +#define BFPT_DWORD16_EN4B_4B_OPCODES BIT(29) +#define BFPT_DWORD16_EN4B_16BIT_NV_CR BIT(28) +#define BFPT_DWORD16_EN4B_BRWR BIT(27) +#define BFPT_DWORD16_EN4B_WREAR BIT(26) +#define BFPT_DWORD16_EN4B_WREN_EN4B BIT(25) +#define BFPT_DWORD16_EN4B_EN4B BIT(24) +#define BFPT_DWORD16_EX4B_MASK GENMASK(18, 14) +#define BFPT_DWORD16_EX4B_16BIT_NV_CR BIT(18) +#define BFPT_DWORD16_EX4B_BRWR BIT(17) +#define BFPT_DWORD16_EX4B_WREAR BIT(16) +#define BFPT_DWORD16_EX4B_WREN_EX4B BIT(15) +#define BFPT_DWORD16_EX4B_EX4B BIT(14) +#define BFPT_DWORD16_4B_ADDR_MODE_MASK \ + (BFPT_DWORD16_EN4B_MASK | BFPT_DWORD16_EX4B_MASK) +#define BFPT_DWORD16_4B_ADDR_MODE_16BIT_NV_CR \ + (BFPT_DWORD16_EN4B_16BIT_NV_CR | BFPT_DWORD16_EX4B_16BIT_NV_CR) +#define BFPT_DWORD16_4B_ADDR_MODE_BRWR \ + (BFPT_DWORD16_EN4B_BRWR | BFPT_DWORD16_EX4B_BRWR) +#define BFPT_DWORD16_4B_ADDR_MODE_WREAR \ + (BFPT_DWORD16_EN4B_WREAR | BFPT_DWORD16_EX4B_WREAR) +#define BFPT_DWORD16_4B_ADDR_MODE_WREN_EN4B_EX4B \ + (BFPT_DWORD16_EN4B_WREN_EN4B | BFPT_DWORD16_EX4B_WREN_EX4B) +#define BFPT_DWORD16_4B_ADDR_MODE_EN4B_EX4B \ + (BFPT_DWORD16_EN4B_EN4B | BFPT_DWORD16_EX4B_EX4B) +#define BFPT_DWORD16_SWRST_EN_RST BIT(12) + +#define BFPT_DWORD18_CMD_EXT_MASK GENMASK(30, 29) +#define BFPT_DWORD18_CMD_EXT_REP (0x0UL << 29) /* Repeat */ +#define BFPT_DWORD18_CMD_EXT_INV (0x1UL << 29) /* Invert */ +#define BFPT_DWORD18_CMD_EXT_RES (0x2UL << 29) /* Reserved */ +#define BFPT_DWORD18_CMD_EXT_16B (0x3UL << 29) /* 16-bit opcode */ + +struct sfdp_parameter_header { + u8 id_lsb; + u8 minor; + u8 major; + u8 length; /* in double words */ + u8 parameter_table_pointer[3]; /* byte address */ + u8 id_msb; +}; + +#endif /* __LINUX_MTD_SFDP_H */ diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c new file mode 100644 index 0000000000..709822fced --- /dev/null +++ b/drivers/mtd/spi-nor/spansion.c @@ -0,0 +1,989 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/bitfield.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +/* flash_info mfr_flag. Used to clear sticky prorietary SR bits. */ +#define USE_CLSR BIT(0) +#define USE_CLPEF BIT(1) + +#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */ +#define SPINOR_OP_CLPEF 0x82 /* Clear program/erase failure flags */ +#define SPINOR_OP_RD_ANY_REG 0x65 /* Read any register */ +#define SPINOR_OP_WR_ANY_REG 0x71 /* Write any register */ +#define SPINOR_REG_CYPRESS_VREG 0x00800000 +#define SPINOR_REG_CYPRESS_STR1 0x0 +#define SPINOR_REG_CYPRESS_STR1V \ + (SPINOR_REG_CYPRESS_VREG + SPINOR_REG_CYPRESS_STR1) +#define SPINOR_REG_CYPRESS_CFR1 0x2 +#define SPINOR_REG_CYPRESS_CFR1_QUAD_EN BIT(1) /* Quad Enable */ +#define SPINOR_REG_CYPRESS_CFR2 0x3 +#define SPINOR_REG_CYPRESS_CFR2V \ + (SPINOR_REG_CYPRESS_VREG + SPINOR_REG_CYPRESS_CFR2) +#define SPINOR_REG_CYPRESS_CFR2_MEMLAT_MASK GENMASK(3, 0) +#define SPINOR_REG_CYPRESS_CFR2_MEMLAT_11_24 0xb +#define SPINOR_REG_CYPRESS_CFR2_ADRBYT BIT(7) +#define SPINOR_REG_CYPRESS_CFR3 0x4 +#define SPINOR_REG_CYPRESS_CFR3_PGSZ BIT(4) /* Page size. */ +#define SPINOR_REG_CYPRESS_CFR5 0x6 +#define SPINOR_REG_CYPRESS_CFR5_BIT6 BIT(6) +#define SPINOR_REG_CYPRESS_CFR5_DDR BIT(1) +#define SPINOR_REG_CYPRESS_CFR5_OPI BIT(0) +#define SPINOR_REG_CYPRESS_CFR5_OCT_DTR_EN \ + (SPINOR_REG_CYPRESS_CFR5_BIT6 | SPINOR_REG_CYPRESS_CFR5_DDR | \ + SPINOR_REG_CYPRESS_CFR5_OPI) +#define SPINOR_REG_CYPRESS_CFR5_OCT_DTR_DS SPINOR_REG_CYPRESS_CFR5_BIT6 +#define SPINOR_OP_CYPRESS_RD_FAST 0xee +#define SPINOR_REG_CYPRESS_ARCFN 0x00000006 + +/* Cypress SPI NOR flash operations. */ +#define CYPRESS_NOR_WR_ANY_REG_OP(naddr, addr, ndata, buf) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WR_ANY_REG, 0), \ + SPI_MEM_OP_ADDR(naddr, addr, 0), \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_DATA_OUT(ndata, buf, 0)) + +#define CYPRESS_NOR_RD_ANY_REG_OP(naddr, addr, ndummy, buf) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RD_ANY_REG, 0), \ + SPI_MEM_OP_ADDR(naddr, addr, 0), \ + SPI_MEM_OP_DUMMY(ndummy, 0), \ + SPI_MEM_OP_DATA_IN(1, buf, 0)) + +#define SPANSION_OP(opcode) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 0), \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_NO_DATA) + +/** + * struct spansion_nor_params - Spansion private parameters. + * @clsr: Clear Status Register or Clear Program and Erase Failure Flag + * opcode. + */ +struct spansion_nor_params { + u8 clsr; +}; + +/** + * spansion_nor_clear_sr() - Clear the Status Register. + * @nor: pointer to 'struct spi_nor'. + */ +static void spansion_nor_clear_sr(struct spi_nor *nor) +{ + const struct spansion_nor_params *priv_params = nor->params->priv; + int ret; + + if (nor->spimem) { + struct spi_mem_op op = SPANSION_OP(priv_params->clsr); + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLSR, + NULL, 0); + } + + if (ret) + dev_dbg(nor->dev, "error %d clearing SR\n", ret); +} + +static int cypress_nor_sr_ready_and_clear_reg(struct spi_nor *nor, u64 addr) +{ + struct spi_nor_flash_parameter *params = nor->params; + struct spi_mem_op op = + CYPRESS_NOR_RD_ANY_REG_OP(params->addr_mode_nbytes, addr, + 0, nor->bouncebuf); + int ret; + + if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) { + op.dummy.nbytes = params->rdsr_dummy; + op.data.nbytes = 2; + } + + ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto); + if (ret) + return ret; + + if (nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) { + if (nor->bouncebuf[0] & SR_E_ERR) + dev_err(nor->dev, "Erase Error occurred\n"); + else + dev_err(nor->dev, "Programming Error occurred\n"); + + spansion_nor_clear_sr(nor); + + ret = spi_nor_write_disable(nor); + if (ret) + return ret; + + return -EIO; + } + + return !(nor->bouncebuf[0] & SR_WIP); +} +/** + * cypress_nor_sr_ready_and_clear() - Query the Status Register of each die by + * using Read Any Register command to see if the whole flash is ready for new + * commands and clear it if there are any errors. + * @nor: pointer to 'struct spi_nor'. + * + * Return: 1 if ready, 0 if not ready, -errno on errors. + */ +static int cypress_nor_sr_ready_and_clear(struct spi_nor *nor) +{ + struct spi_nor_flash_parameter *params = nor->params; + u64 addr; + int ret; + u8 i; + + for (i = 0; i < params->n_dice; i++) { + addr = params->vreg_offset[i] + SPINOR_REG_CYPRESS_STR1; + ret = cypress_nor_sr_ready_and_clear_reg(nor, addr); + if (ret < 0) + return ret; + else if (ret == 0) + return 0; + } + + return 1; +} + +static int cypress_nor_set_memlat(struct spi_nor *nor, u64 addr) +{ + struct spi_mem_op op; + u8 *buf = nor->bouncebuf; + int ret; + u8 addr_mode_nbytes = nor->params->addr_mode_nbytes; + + op = (struct spi_mem_op) + CYPRESS_NOR_RD_ANY_REG_OP(addr_mode_nbytes, addr, 0, buf); + + ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto); + if (ret) + return ret; + + /* Use 24 dummy cycles for memory array reads. */ + *buf &= ~SPINOR_REG_CYPRESS_CFR2_MEMLAT_MASK; + *buf |= FIELD_PREP(SPINOR_REG_CYPRESS_CFR2_MEMLAT_MASK, + SPINOR_REG_CYPRESS_CFR2_MEMLAT_11_24); + op = (struct spi_mem_op) + CYPRESS_NOR_WR_ANY_REG_OP(addr_mode_nbytes, addr, 1, buf); + + ret = spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto); + if (ret) + return ret; + + nor->read_dummy = 24; + + return 0; +} + +static int cypress_nor_set_octal_dtr_bits(struct spi_nor *nor, u64 addr) +{ + struct spi_mem_op op; + u8 *buf = nor->bouncebuf; + + /* Set the octal and DTR enable bits. */ + buf[0] = SPINOR_REG_CYPRESS_CFR5_OCT_DTR_EN; + op = (struct spi_mem_op) + CYPRESS_NOR_WR_ANY_REG_OP(nor->params->addr_mode_nbytes, + addr, 1, buf); + + return spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto); +} + +static int cypress_nor_octal_dtr_en(struct spi_nor *nor) +{ + const struct spi_nor_flash_parameter *params = nor->params; + u8 *buf = nor->bouncebuf; + u64 addr; + int i, ret; + + for (i = 0; i < params->n_dice; i++) { + addr = params->vreg_offset[i] + SPINOR_REG_CYPRESS_CFR2; + ret = cypress_nor_set_memlat(nor, addr); + if (ret) + return ret; + + addr = params->vreg_offset[i] + SPINOR_REG_CYPRESS_CFR5; + ret = cypress_nor_set_octal_dtr_bits(nor, addr); + if (ret) + return ret; + } + + /* Read flash ID to make sure the switch was successful. */ + ret = spi_nor_read_id(nor, nor->addr_nbytes, 3, buf, + SNOR_PROTO_8_8_8_DTR); + if (ret) { + dev_dbg(nor->dev, "error %d reading JEDEC ID after enabling 8D-8D-8D mode\n", ret); + return ret; + } + + if (memcmp(buf, nor->info->id, nor->info->id_len)) + return -EINVAL; + + return 0; +} + +static int cypress_nor_set_single_spi_bits(struct spi_nor *nor, u64 addr) +{ + struct spi_mem_op op; + u8 *buf = nor->bouncebuf; + + /* + * The register is 1-byte wide, but 1-byte transactions are not allowed + * in 8D-8D-8D mode. Since there is no register at the next location, + * just initialize the value to 0 and let the transaction go on. + */ + buf[0] = SPINOR_REG_CYPRESS_CFR5_OCT_DTR_DS; + buf[1] = 0; + op = (struct spi_mem_op) + CYPRESS_NOR_WR_ANY_REG_OP(nor->addr_nbytes, addr, 2, buf); + return spi_nor_write_any_volatile_reg(nor, &op, SNOR_PROTO_8_8_8_DTR); +} + +static int cypress_nor_octal_dtr_dis(struct spi_nor *nor) +{ + const struct spi_nor_flash_parameter *params = nor->params; + u8 *buf = nor->bouncebuf; + u64 addr; + int i, ret; + + for (i = 0; i < params->n_dice; i++) { + addr = params->vreg_offset[i] + SPINOR_REG_CYPRESS_CFR5; + ret = cypress_nor_set_single_spi_bits(nor, addr); + if (ret) + return ret; + } + + /* Read flash ID to make sure the switch was successful. */ + ret = spi_nor_read_id(nor, 0, 0, buf, SNOR_PROTO_1_1_1); + if (ret) { + dev_dbg(nor->dev, "error %d reading JEDEC ID after disabling 8D-8D-8D mode\n", ret); + return ret; + } + + if (memcmp(buf, nor->info->id, nor->info->id_len)) + return -EINVAL; + + return 0; +} + +static int cypress_nor_quad_enable_volatile_reg(struct spi_nor *nor, u64 addr) +{ + struct spi_mem_op op; + u8 addr_mode_nbytes = nor->params->addr_mode_nbytes; + u8 cfr1v_written; + int ret; + + op = (struct spi_mem_op) + CYPRESS_NOR_RD_ANY_REG_OP(addr_mode_nbytes, addr, 0, + nor->bouncebuf); + + ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto); + if (ret) + return ret; + + if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR1_QUAD_EN) + return 0; + + /* Update the Quad Enable bit. */ + nor->bouncebuf[0] |= SPINOR_REG_CYPRESS_CFR1_QUAD_EN; + op = (struct spi_mem_op) + CYPRESS_NOR_WR_ANY_REG_OP(addr_mode_nbytes, addr, 1, + nor->bouncebuf); + ret = spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto); + if (ret) + return ret; + + cfr1v_written = nor->bouncebuf[0]; + + /* Read back and check it. */ + op = (struct spi_mem_op) + CYPRESS_NOR_RD_ANY_REG_OP(addr_mode_nbytes, addr, 0, + nor->bouncebuf); + ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto); + if (ret) + return ret; + + if (nor->bouncebuf[0] != cfr1v_written) { + dev_err(nor->dev, "CFR1: Read back test failed\n"); + return -EIO; + } + + return 0; +} + +/** + * cypress_nor_quad_enable_volatile() - enable Quad I/O mode in volatile + * register. + * @nor: pointer to a 'struct spi_nor' + * + * It is recommended to update volatile registers in the field application due + * to a risk of the non-volatile registers corruption by power interrupt. This + * function sets Quad Enable bit in CFR1 volatile. If users set the Quad Enable + * bit in the CFR1 non-volatile in advance (typically by a Flash programmer + * before mounting Flash on PCB), the Quad Enable bit in the CFR1 volatile is + * also set during Flash power-up. + * + * Return: 0 on success, -errno otherwise. + */ +static int cypress_nor_quad_enable_volatile(struct spi_nor *nor) +{ + struct spi_nor_flash_parameter *params = nor->params; + u64 addr; + u8 i; + int ret; + + for (i = 0; i < params->n_dice; i++) { + addr = params->vreg_offset[i] + SPINOR_REG_CYPRESS_CFR1; + ret = cypress_nor_quad_enable_volatile_reg(nor, addr); + if (ret) + return ret; + } + + return 0; +} + +/** + * cypress_nor_determine_addr_mode_by_sr1() - Determine current address mode + * (3 or 4-byte) by querying status + * register 1 (SR1). + * @nor: pointer to a 'struct spi_nor' + * @addr_mode: ponter to a buffer where we return the determined + * address mode. + * + * This function tries to determine current address mode by comparing SR1 value + * from RDSR1(no address), RDAR(3-byte address), and RDAR(4-byte address). + * + * Return: 0 on success, -errno otherwise. + */ +static int cypress_nor_determine_addr_mode_by_sr1(struct spi_nor *nor, + u8 *addr_mode) +{ + struct spi_mem_op op = + CYPRESS_NOR_RD_ANY_REG_OP(3, SPINOR_REG_CYPRESS_STR1V, 0, + nor->bouncebuf); + bool is3byte, is4byte; + int ret; + + ret = spi_nor_read_sr(nor, &nor->bouncebuf[1]); + if (ret) + return ret; + + ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto); + if (ret) + return ret; + + is3byte = (nor->bouncebuf[0] == nor->bouncebuf[1]); + + op = (struct spi_mem_op) + CYPRESS_NOR_RD_ANY_REG_OP(4, SPINOR_REG_CYPRESS_STR1V, 0, + nor->bouncebuf); + ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto); + if (ret) + return ret; + + is4byte = (nor->bouncebuf[0] == nor->bouncebuf[1]); + + if (is3byte == is4byte) + return -EIO; + if (is3byte) + *addr_mode = 3; + else + *addr_mode = 4; + + return 0; +} + +/** + * cypress_nor_set_addr_mode_nbytes() - Set the number of address bytes mode of + * current address mode. + * @nor: pointer to a 'struct spi_nor' + * + * Determine current address mode by reading SR1 with different methods, then + * query CFR2V[7] to confirm. If determination is failed, force enter to 4-byte + * address mode. + * + * Return: 0 on success, -errno otherwise. + */ +static int cypress_nor_set_addr_mode_nbytes(struct spi_nor *nor) +{ + struct spi_mem_op op; + u8 addr_mode; + int ret; + + /* + * Read SR1 by RDSR1 and RDAR(3- AND 4-byte addr). Use write enable + * that sets bit-1 in SR1. + */ + ret = spi_nor_write_enable(nor); + if (ret) + return ret; + ret = cypress_nor_determine_addr_mode_by_sr1(nor, &addr_mode); + if (ret) { + ret = spi_nor_set_4byte_addr_mode(nor, true); + if (ret) + return ret; + return spi_nor_write_disable(nor); + } + ret = spi_nor_write_disable(nor); + if (ret) + return ret; + + /* + * Query CFR2V and make sure no contradiction between determined address + * mode and CFR2V[7]. + */ + op = (struct spi_mem_op) + CYPRESS_NOR_RD_ANY_REG_OP(addr_mode, SPINOR_REG_CYPRESS_CFR2V, + 0, nor->bouncebuf); + ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto); + if (ret) + return ret; + + if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR2_ADRBYT) { + if (addr_mode != 4) + return spi_nor_set_4byte_addr_mode(nor, true); + } else { + if (addr_mode != 3) + return spi_nor_set_4byte_addr_mode(nor, true); + } + + nor->params->addr_nbytes = addr_mode; + nor->params->addr_mode_nbytes = addr_mode; + + return 0; +} + +/** + * cypress_nor_get_page_size() - Get flash page size configuration. + * @nor: pointer to a 'struct spi_nor' + * + * The BFPT table advertises a 512B or 256B page size depending on part but the + * page size is actually configurable (with the default being 256B). Read from + * CFR3V[4] and set the correct size. + * + * Return: 0 on success, -errno otherwise. + */ +static int cypress_nor_get_page_size(struct spi_nor *nor) +{ + struct spi_mem_op op = + CYPRESS_NOR_RD_ANY_REG_OP(nor->params->addr_mode_nbytes, + 0, 0, nor->bouncebuf); + struct spi_nor_flash_parameter *params = nor->params; + int ret; + u8 i; + + /* + * Use the minimum common page size configuration. Programming 256-byte + * under 512-byte page size configuration is safe. + */ + params->page_size = 256; + for (i = 0; i < params->n_dice; i++) { + op.addr.val = params->vreg_offset[i] + SPINOR_REG_CYPRESS_CFR3; + + ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto); + if (ret) + return ret; + + if (!(nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR3_PGSZ)) + return 0; + } + + params->page_size = 512; + + return 0; +} + +static void cypress_nor_ecc_init(struct spi_nor *nor) +{ + /* + * Programming is supported only in 16-byte ECC data unit granularity. + * Byte-programming, bit-walking, or multiple program operations to the + * same ECC data unit without an erase are not allowed. + */ + nor->params->writesize = 16; + nor->flags |= SNOR_F_ECC; +} + +static int +s25fs256t_post_bfpt_fixup(struct spi_nor *nor, + const struct sfdp_parameter_header *bfpt_header, + const struct sfdp_bfpt *bfpt) +{ + struct spi_mem_op op; + int ret; + + ret = cypress_nor_set_addr_mode_nbytes(nor); + if (ret) + return ret; + + /* Read Architecture Configuration Register (ARCFN) */ + op = (struct spi_mem_op) + CYPRESS_NOR_RD_ANY_REG_OP(nor->params->addr_mode_nbytes, + SPINOR_REG_CYPRESS_ARCFN, 1, + nor->bouncebuf); + ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto); + if (ret) + return ret; + + /* ARCFN value must be 0 if uniform sector is selected */ + if (nor->bouncebuf[0]) + return -ENODEV; + + return 0; +} + +static int s25fs256t_post_sfdp_fixup(struct spi_nor *nor) +{ + struct spi_nor_flash_parameter *params = nor->params; + + /* + * S25FS256T does not define the SCCR map, but we would like to use the + * same code base for both single and multi chip package devices, thus + * set the vreg_offset and n_dice to be able to do so. + */ + params->vreg_offset = devm_kmalloc(nor->dev, sizeof(u32), GFP_KERNEL); + if (!params->vreg_offset) + return -ENOMEM; + + params->vreg_offset[0] = SPINOR_REG_CYPRESS_VREG; + params->n_dice = 1; + + /* PP_1_1_4_4B is supported but missing in 4BAIT. */ + params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4; + spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_1_1_4], + SPINOR_OP_PP_1_1_4_4B, + SNOR_PROTO_1_1_4); + + return cypress_nor_get_page_size(nor); +} + +static int s25fs256t_late_init(struct spi_nor *nor) +{ + cypress_nor_ecc_init(nor); + + return 0; +} + +static struct spi_nor_fixups s25fs256t_fixups = { + .post_bfpt = s25fs256t_post_bfpt_fixup, + .post_sfdp = s25fs256t_post_sfdp_fixup, + .late_init = s25fs256t_late_init, +}; + +static int +s25hx_t_post_bfpt_fixup(struct spi_nor *nor, + const struct sfdp_parameter_header *bfpt_header, + const struct sfdp_bfpt *bfpt) +{ + int ret; + + ret = cypress_nor_set_addr_mode_nbytes(nor); + if (ret) + return ret; + + /* Replace Quad Enable with volatile version */ + nor->params->quad_enable = cypress_nor_quad_enable_volatile; + + return 0; +} + +static int s25hx_t_post_sfdp_fixup(struct spi_nor *nor) +{ + struct spi_nor_flash_parameter *params = nor->params; + struct spi_nor_erase_type *erase_type = params->erase_map.erase_type; + unsigned int i; + + if (!params->n_dice || !params->vreg_offset) { + dev_err(nor->dev, "%s failed. The volatile register offset could not be retrieved from SFDP.\n", + __func__); + return -EOPNOTSUPP; + } + + /* The 2 Gb parts duplicate info and advertise 4 dice instead of 2. */ + if (params->size == SZ_256M) + params->n_dice = 2; + + /* + * In some parts, 3byte erase opcodes are advertised by 4BAIT. + * Convert them to 4byte erase opcodes. + */ + for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { + switch (erase_type[i].opcode) { + case SPINOR_OP_SE: + erase_type[i].opcode = SPINOR_OP_SE_4B; + break; + case SPINOR_OP_BE_4K: + erase_type[i].opcode = SPINOR_OP_BE_4K_4B; + break; + default: + break; + } + } + + return cypress_nor_get_page_size(nor); +} + +static int s25hx_t_late_init(struct spi_nor *nor) +{ + struct spi_nor_flash_parameter *params = nor->params; + + /* Fast Read 4B requires mode cycles */ + params->reads[SNOR_CMD_READ_FAST].num_mode_clocks = 8; + params->ready = cypress_nor_sr_ready_and_clear; + cypress_nor_ecc_init(nor); + + return 0; +} + +static struct spi_nor_fixups s25hx_t_fixups = { + .post_bfpt = s25hx_t_post_bfpt_fixup, + .post_sfdp = s25hx_t_post_sfdp_fixup, + .late_init = s25hx_t_late_init, +}; + +/** + * cypress_nor_set_octal_dtr() - Enable or disable octal DTR on Cypress flashes. + * @nor: pointer to a 'struct spi_nor' + * @enable: whether to enable or disable Octal DTR + * + * This also sets the memory access latency cycles to 24 to allow the flash to + * run at up to 200MHz. + * + * Return: 0 on success, -errno otherwise. + */ +static int cypress_nor_set_octal_dtr(struct spi_nor *nor, bool enable) +{ + return enable ? cypress_nor_octal_dtr_en(nor) : + cypress_nor_octal_dtr_dis(nor); +} + +static int s28hx_t_post_sfdp_fixup(struct spi_nor *nor) +{ + struct spi_nor_flash_parameter *params = nor->params; + + if (!params->n_dice || !params->vreg_offset) { + dev_err(nor->dev, "%s failed. The volatile register offset could not be retrieved from SFDP.\n", + __func__); + return -EOPNOTSUPP; + } + + /* The 2 Gb parts duplicate info and advertise 4 dice instead of 2. */ + if (params->size == SZ_256M) + params->n_dice = 2; + + /* + * On older versions of the flash the xSPI Profile 1.0 table has the + * 8D-8D-8D Fast Read opcode as 0x00. But it actually should be 0xEE. + */ + if (params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode == 0) + params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode = + SPINOR_OP_CYPRESS_RD_FAST; + + /* This flash is also missing the 4-byte Page Program opcode bit. */ + spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP], + SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1); + /* + * Since xSPI Page Program opcode is backward compatible with + * Legacy SPI, use Legacy SPI opcode there as well. + */ + spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_8_8_8_DTR], + SPINOR_OP_PP_4B, SNOR_PROTO_8_8_8_DTR); + + /* + * The xSPI Profile 1.0 table advertises the number of additional + * address bytes needed for Read Status Register command as 0 but the + * actual value for that is 4. + */ + params->rdsr_addr_nbytes = 4; + + return cypress_nor_get_page_size(nor); +} + +static int s28hx_t_post_bfpt_fixup(struct spi_nor *nor, + const struct sfdp_parameter_header *bfpt_header, + const struct sfdp_bfpt *bfpt) +{ + return cypress_nor_set_addr_mode_nbytes(nor); +} + +static int s28hx_t_late_init(struct spi_nor *nor) +{ + struct spi_nor_flash_parameter *params = nor->params; + + params->set_octal_dtr = cypress_nor_set_octal_dtr; + params->ready = cypress_nor_sr_ready_and_clear; + cypress_nor_ecc_init(nor); + + return 0; +} + +static const struct spi_nor_fixups s28hx_t_fixups = { + .post_sfdp = s28hx_t_post_sfdp_fixup, + .post_bfpt = s28hx_t_post_bfpt_fixup, + .late_init = s28hx_t_late_init, +}; + +static int +s25fs_s_nor_post_bfpt_fixups(struct spi_nor *nor, + const struct sfdp_parameter_header *bfpt_header, + const struct sfdp_bfpt *bfpt) +{ + /* + * The S25FS-S chip family reports 512-byte pages in BFPT but + * in reality the write buffer still wraps at the safe default + * of 256 bytes. Overwrite the page size advertised by BFPT + * to get the writes working. + */ + nor->params->page_size = 256; + + return 0; +} + +static const struct spi_nor_fixups s25fs_s_nor_fixups = { + .post_bfpt = s25fs_s_nor_post_bfpt_fixups, +}; + +static const struct flash_info spansion_nor_parts[] = { + /* Spansion/Cypress -- single (large) sector size only, at least + * for the chips listed here (without boot sectors). + */ + { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64) + NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128) + NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64) + NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + MFR_FLAGS(USE_CLSR) + }, + { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256) + NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + MFR_FLAGS(USE_CLSR) + }, + { "s25fl256s0", INFO6(0x010219, 0x4d0080, 256 * 1024, 128) + NO_SFDP_FLAGS(SPI_NOR_SKIP_SFDP | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) + MFR_FLAGS(USE_CLSR) + }, + { "s25fl256s1", INFO6(0x010219, 0x4d0180, 64 * 1024, 512) + NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + MFR_FLAGS(USE_CLSR) + }, + { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256) + FLAGS(SPI_NOR_HAS_LOCK) + NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + MFR_FLAGS(USE_CLSR) + }, + { "s25fs128s1", INFO6(0x012018, 0x4d0181, 64 * 1024, 256) + NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + MFR_FLAGS(USE_CLSR) + .fixups = &s25fs_s_nor_fixups, }, + { "s25fs256s0", INFO6(0x010219, 0x4d0081, 256 * 1024, 128) + NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + MFR_FLAGS(USE_CLSR) + }, + { "s25fs256s1", INFO6(0x010219, 0x4d0181, 64 * 1024, 512) + NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + MFR_FLAGS(USE_CLSR) + }, + { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256) + NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + MFR_FLAGS(USE_CLSR) + .fixups = &s25fs_s_nor_fixups, }, + { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64) }, + { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256) }, + { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64) + NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + MFR_FLAGS(USE_CLSR) + }, + { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256) + NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + MFR_FLAGS(USE_CLSR) + }, + { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8) }, + { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16) }, + { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32) }, + { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64) }, + { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128) }, + { "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64) + NO_SFDP_FLAGS(SECT_4K) }, + { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128) + NO_SFDP_FLAGS(SECT_4K) }, + { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) }, + { "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) }, + { "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + FIXUP_FLAGS(SPI_NOR_4B_OPCODES) }, + { "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + FIXUP_FLAGS(SPI_NOR_4B_OPCODES) }, + { "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + FIXUP_FLAGS(SPI_NOR_4B_OPCODES) }, + { "s25fs256t", INFO6(0x342b19, 0x0f0890, 0, 0) + PARSE_SFDP + MFR_FLAGS(USE_CLPEF) + .fixups = &s25fs256t_fixups }, + { "s25hl512t", INFO6(0x342a1a, 0x0f0390, 0, 0) + PARSE_SFDP + MFR_FLAGS(USE_CLPEF) + .fixups = &s25hx_t_fixups }, + { "s25hl01gt", INFO6(0x342a1b, 0x0f0390, 0, 0) + PARSE_SFDP + MFR_FLAGS(USE_CLPEF) + .fixups = &s25hx_t_fixups }, + { "s25hl02gt", INFO6(0x342a1c, 0x0f0090, 0, 0) + PARSE_SFDP + MFR_FLAGS(USE_CLPEF) + FLAGS(NO_CHIP_ERASE) + .fixups = &s25hx_t_fixups }, + { "s25hs512t", INFO6(0x342b1a, 0x0f0390, 0, 0) + PARSE_SFDP + MFR_FLAGS(USE_CLPEF) + .fixups = &s25hx_t_fixups }, + { "s25hs01gt", INFO6(0x342b1b, 0x0f0390, 0, 0) + PARSE_SFDP + MFR_FLAGS(USE_CLPEF) + .fixups = &s25hx_t_fixups }, + { "s25hs02gt", INFO6(0x342b1c, 0x0f0090, 0, 0) + PARSE_SFDP + MFR_FLAGS(USE_CLPEF) + FLAGS(NO_CHIP_ERASE) + .fixups = &s25hx_t_fixups }, + { "cy15x104q", INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1) + FLAGS(SPI_NOR_NO_ERASE) }, + { "s28hl512t", INFO(0x345a1a, 0, 0, 0) + PARSE_SFDP + MFR_FLAGS(USE_CLPEF) + .fixups = &s28hx_t_fixups, + }, + { "s28hl01gt", INFO(0x345a1b, 0, 0, 0) + PARSE_SFDP + MFR_FLAGS(USE_CLPEF) + .fixups = &s28hx_t_fixups, + }, + { "s28hs512t", INFO(0x345b1a, 0, 0, 0) + PARSE_SFDP + MFR_FLAGS(USE_CLPEF) + .fixups = &s28hx_t_fixups, + }, + { "s28hs01gt", INFO(0x345b1b, 0, 0, 0) + PARSE_SFDP + MFR_FLAGS(USE_CLPEF) + .fixups = &s28hx_t_fixups, + }, + { "s28hs02gt", INFO(0x345b1c, 0, 0, 0) + PARSE_SFDP + MFR_FLAGS(USE_CLPEF) + .fixups = &s28hx_t_fixups, + }, +}; + +/** + * spansion_nor_sr_ready_and_clear() - Query the Status Register to see if the + * flash is ready for new commands and clear it if there are any errors. + * @nor: pointer to 'struct spi_nor'. + * + * Return: 1 if ready, 0 if not ready, -errno on errors. + */ +static int spansion_nor_sr_ready_and_clear(struct spi_nor *nor) +{ + int ret; + + ret = spi_nor_read_sr(nor, nor->bouncebuf); + if (ret) + return ret; + + if (nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) { + if (nor->bouncebuf[0] & SR_E_ERR) + dev_err(nor->dev, "Erase Error occurred\n"); + else + dev_err(nor->dev, "Programming Error occurred\n"); + + spansion_nor_clear_sr(nor); + + /* + * WEL bit remains set to one when an erase or page program + * error occurs. Issue a Write Disable command to protect + * against inadvertent writes that can possibly corrupt the + * contents of the memory. + */ + ret = spi_nor_write_disable(nor); + if (ret) + return ret; + + return -EIO; + } + + return !(nor->bouncebuf[0] & SR_WIP); +} + +static int spansion_nor_late_init(struct spi_nor *nor) +{ + struct spi_nor_flash_parameter *params = nor->params; + struct spansion_nor_params *priv_params; + u8 mfr_flags = nor->info->mfr_flags; + + if (params->size > SZ_16M) { + nor->flags |= SNOR_F_4B_OPCODES; + /* No small sector erase for 4-byte command set */ + nor->erase_opcode = SPINOR_OP_SE; + nor->mtd.erasesize = nor->info->sector_size; + } + + if (mfr_flags & (USE_CLSR | USE_CLPEF)) { + priv_params = devm_kmalloc(nor->dev, sizeof(*priv_params), + GFP_KERNEL); + if (!priv_params) + return -ENOMEM; + + if (mfr_flags & USE_CLSR) + priv_params->clsr = SPINOR_OP_CLSR; + else if (mfr_flags & USE_CLPEF) + priv_params->clsr = SPINOR_OP_CLPEF; + + params->priv = priv_params; + params->ready = spansion_nor_sr_ready_and_clear; + } + + return 0; +} + +static const struct spi_nor_fixups spansion_nor_fixups = { + .late_init = spansion_nor_late_init, +}; + +const struct spi_nor_manufacturer spi_nor_spansion = { + .name = "spansion", + .parts = spansion_nor_parts, + .nparts = ARRAY_SIZE(spansion_nor_parts), + .fixups = &spansion_nor_fixups, +}; diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c new file mode 100644 index 0000000000..197d2c1101 --- /dev/null +++ b/drivers/mtd/spi-nor/sst.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +/* SST flash_info mfr_flag. Used to specify SST byte programming. */ +#define SST_WRITE BIT(0) + +#define SST26VF_CR_BPNV BIT(3) + +static int sst26vf_nor_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) +{ + return -EOPNOTSUPP; +} + +static int sst26vf_nor_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) +{ + int ret; + + /* We only support unlocking the entire flash array. */ + if (ofs != 0 || len != nor->params->size) + return -EINVAL; + + ret = spi_nor_read_cr(nor, nor->bouncebuf); + if (ret) + return ret; + + if (!(nor->bouncebuf[0] & SST26VF_CR_BPNV)) { + dev_dbg(nor->dev, "Any block has been permanently locked\n"); + return -EINVAL; + } + + return spi_nor_global_block_unlock(nor); +} + +static int sst26vf_nor_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len) +{ + return -EOPNOTSUPP; +} + +static const struct spi_nor_locking_ops sst26vf_nor_locking_ops = { + .lock = sst26vf_nor_lock, + .unlock = sst26vf_nor_unlock, + .is_locked = sst26vf_nor_is_locked, +}; + +static int sst26vf_nor_late_init(struct spi_nor *nor) +{ + nor->params->locking_ops = &sst26vf_nor_locking_ops; + + return 0; +} + +static const struct spi_nor_fixups sst26vf_nor_fixups = { + .late_init = sst26vf_nor_late_init, +}; + +static const struct flash_info sst_nor_parts[] = { + /* SST -- large erase sizes are "overlays", "sectors" are 4K */ + { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) + NO_SFDP_FLAGS(SECT_4K) + MFR_FLAGS(SST_WRITE) }, + { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) + NO_SFDP_FLAGS(SECT_4K) + MFR_FLAGS(SST_WRITE) }, + { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) + NO_SFDP_FLAGS(SECT_4K) + MFR_FLAGS(SST_WRITE) }, + { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) + NO_SFDP_FLAGS(SECT_4K) + MFR_FLAGS(SST_WRITE) }, + { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP | + SPI_NOR_SWP_IS_VOLATILE) + NO_SFDP_FLAGS(SECT_4K) }, + { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) + NO_SFDP_FLAGS(SECT_4K) + MFR_FLAGS(SST_WRITE) }, + { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) + NO_SFDP_FLAGS(SECT_4K) + MFR_FLAGS(SST_WRITE) }, + { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) + NO_SFDP_FLAGS(SECT_4K) + MFR_FLAGS(SST_WRITE) }, + { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4) + FLAGS(SPI_NOR_HAS_LOCK) + NO_SFDP_FLAGS(SECT_4K) }, + { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8) + FLAGS(SPI_NOR_HAS_LOCK) + NO_SFDP_FLAGS(SECT_4K) }, + { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) + NO_SFDP_FLAGS(SECT_4K) + MFR_FLAGS(SST_WRITE) }, + { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) + NO_SFDP_FLAGS(SECT_4K) + MFR_FLAGS(SST_WRITE) }, + { "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) }, + { "sst26vf032b", INFO(0xbf2642, 0, 0, 0) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) + PARSE_SFDP + .fixups = &sst26vf_nor_fixups }, + { "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + .fixups = &sst26vf_nor_fixups }, +}; + +static int sst_nor_write(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + size_t actual = 0; + int ret; + + dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len); + + ret = spi_nor_prep_and_lock(nor); + if (ret) + return ret; + + ret = spi_nor_write_enable(nor); + if (ret) + goto out; + + nor->sst_write_second = false; + + /* Start write from odd address. */ + if (to % 2) { + nor->program_opcode = SPINOR_OP_BP; + + /* write one byte. */ + ret = spi_nor_write_data(nor, to, 1, buf); + if (ret < 0) + goto out; + WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret); + ret = spi_nor_wait_till_ready(nor); + if (ret) + goto out; + + to++; + actual++; + } + + /* Write out most of the data here. */ + for (; actual < len - 1; actual += 2) { + nor->program_opcode = SPINOR_OP_AAI_WP; + + /* write two bytes. */ + ret = spi_nor_write_data(nor, to, 2, buf + actual); + if (ret < 0) + goto out; + WARN(ret != 2, "While writing 2 bytes written %i bytes\n", ret); + ret = spi_nor_wait_till_ready(nor); + if (ret) + goto out; + to += 2; + nor->sst_write_second = true; + } + nor->sst_write_second = false; + + ret = spi_nor_write_disable(nor); + if (ret) + goto out; + + ret = spi_nor_wait_till_ready(nor); + if (ret) + goto out; + + /* Write out trailing byte if it exists. */ + if (actual != len) { + ret = spi_nor_write_enable(nor); + if (ret) + goto out; + + nor->program_opcode = SPINOR_OP_BP; + ret = spi_nor_write_data(nor, to, 1, buf + actual); + if (ret < 0) + goto out; + WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret); + ret = spi_nor_wait_till_ready(nor); + if (ret) + goto out; + + actual += 1; + + ret = spi_nor_write_disable(nor); + } +out: + *retlen += actual; + spi_nor_unlock_and_unprep(nor); + return ret; +} + +static int sst_nor_late_init(struct spi_nor *nor) +{ + if (nor->info->mfr_flags & SST_WRITE) + nor->mtd._write = sst_nor_write; + + return 0; +} + +static const struct spi_nor_fixups sst_nor_fixups = { + .late_init = sst_nor_late_init, +}; + +const struct spi_nor_manufacturer spi_nor_sst = { + .name = "sst", + .parts = sst_nor_parts, + .nparts = ARRAY_SIZE(sst_nor_parts), + .fixups = &sst_nor_fixups, +}; diff --git a/drivers/mtd/spi-nor/swp.c b/drivers/mtd/spi-nor/swp.c new file mode 100644 index 0000000000..5ab9d53248 --- /dev/null +++ b/drivers/mtd/spi-nor/swp.c @@ -0,0 +1,432 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SPI NOR Software Write Protection logic. + * + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ +#include <linux/mtd/mtd.h> +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +static u8 spi_nor_get_sr_bp_mask(struct spi_nor *nor) +{ + u8 mask = SR_BP2 | SR_BP1 | SR_BP0; + + if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6) + return mask | SR_BP3_BIT6; + + if (nor->flags & SNOR_F_HAS_4BIT_BP) + return mask | SR_BP3; + + return mask; +} + +static u8 spi_nor_get_sr_tb_mask(struct spi_nor *nor) +{ + if (nor->flags & SNOR_F_HAS_SR_TB_BIT6) + return SR_TB_BIT6; + else + return SR_TB_BIT5; +} + +static u64 spi_nor_get_min_prot_length_sr(struct spi_nor *nor) +{ + unsigned int bp_slots, bp_slots_needed; + u8 mask = spi_nor_get_sr_bp_mask(nor); + + /* Reserved one for "protect none" and one for "protect all". */ + bp_slots = (1 << hweight8(mask)) - 2; + bp_slots_needed = ilog2(nor->info->n_sectors); + + if (bp_slots_needed > bp_slots) + return nor->info->sector_size << + (bp_slots_needed - bp_slots); + else + return nor->info->sector_size; +} + +static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs, + uint64_t *len) +{ + struct mtd_info *mtd = &nor->mtd; + u64 min_prot_len; + u8 mask = spi_nor_get_sr_bp_mask(nor); + u8 tb_mask = spi_nor_get_sr_tb_mask(nor); + u8 bp, val = sr & mask; + + if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3_BIT6) + val = (val & ~SR_BP3_BIT6) | SR_BP3; + + bp = val >> SR_BP_SHIFT; + + if (!bp) { + /* No protection */ + *ofs = 0; + *len = 0; + return; + } + + min_prot_len = spi_nor_get_min_prot_length_sr(nor); + *len = min_prot_len << (bp - 1); + + if (*len > mtd->size) + *len = mtd->size; + + if (nor->flags & SNOR_F_HAS_SR_TB && sr & tb_mask) + *ofs = 0; + else + *ofs = mtd->size - *len; +} + +/* + * Return true if the entire region is locked (if @locked is true) or unlocked + * (if @locked is false); false otherwise. + */ +static bool spi_nor_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, + uint64_t len, u8 sr, bool locked) +{ + loff_t lock_offs, lock_offs_max, offs_max; + uint64_t lock_len; + + if (!len) + return true; + + spi_nor_get_locked_range_sr(nor, sr, &lock_offs, &lock_len); + + lock_offs_max = lock_offs + lock_len; + offs_max = ofs + len; + + if (locked) + /* Requested range is a sub-range of locked range */ + return (offs_max <= lock_offs_max) && (ofs >= lock_offs); + else + /* Requested range does not overlap with locked range */ + return (ofs >= lock_offs_max) || (offs_max <= lock_offs); +} + +static bool spi_nor_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len, + u8 sr) +{ + return spi_nor_check_lock_status_sr(nor, ofs, len, sr, true); +} + +static bool spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, + uint64_t len, u8 sr) +{ + return spi_nor_check_lock_status_sr(nor, ofs, len, sr, false); +} + +/* + * Lock a region of the flash. Compatible with ST Micro and similar flash. + * Supports the block protection bits BP{0,1,2}/BP{0,1,2,3} in the status + * register + * (SR). Does not support these features found in newer SR bitfields: + * - SEC: sector/block protect - only handle SEC=0 (block protect) + * - CMP: complement protect - only support CMP=0 (range is not complemented) + * + * Support for the following is provided conditionally for some flash: + * - TB: top/bottom protect + * + * Sample table portion for 8MB flash (Winbond w25q64fw): + * + * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion + * -------------------------------------------------------------------------- + * X | X | 0 | 0 | 0 | NONE | NONE + * 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64 + * 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32 + * 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16 + * 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8 + * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4 + * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2 + * X | X | 1 | 1 | 1 | 8 MB | ALL + * ------|-------|-------|-------|-------|---------------|------------------- + * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64 + * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32 + * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16 + * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8 + * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4 + * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2 + * + * Returns negative on errors, 0 on success. + */ +static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) +{ + struct mtd_info *mtd = &nor->mtd; + u64 min_prot_len; + int ret, status_old, status_new; + u8 mask = spi_nor_get_sr_bp_mask(nor); + u8 tb_mask = spi_nor_get_sr_tb_mask(nor); + u8 pow, val; + loff_t lock_len; + bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB; + bool use_top; + + ret = spi_nor_read_sr(nor, nor->bouncebuf); + if (ret) + return ret; + + status_old = nor->bouncebuf[0]; + + /* If nothing in our range is unlocked, we don't need to do anything */ + if (spi_nor_is_locked_sr(nor, ofs, len, status_old)) + return 0; + + /* If anything below us is unlocked, we can't use 'bottom' protection */ + if (!spi_nor_is_locked_sr(nor, 0, ofs, status_old)) + can_be_bottom = false; + + /* If anything above us is unlocked, we can't use 'top' protection */ + if (!spi_nor_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len), + status_old)) + can_be_top = false; + + if (!can_be_bottom && !can_be_top) + return -EINVAL; + + /* Prefer top, if both are valid */ + use_top = can_be_top; + + /* lock_len: length of region that should end up locked */ + if (use_top) + lock_len = mtd->size - ofs; + else + lock_len = ofs + len; + + if (lock_len == mtd->size) { + val = mask; + } else { + min_prot_len = spi_nor_get_min_prot_length_sr(nor); + pow = ilog2(lock_len) - ilog2(min_prot_len) + 1; + val = pow << SR_BP_SHIFT; + + if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3) + val = (val & ~SR_BP3) | SR_BP3_BIT6; + + if (val & ~mask) + return -EINVAL; + + /* Don't "lock" with no region! */ + if (!(val & mask)) + return -EINVAL; + } + + status_new = (status_old & ~mask & ~tb_mask) | val; + + /* + * Disallow further writes if WP# pin is neither left floating nor + * wrongly tied to GND (that includes internal pull-downs). + * WP# pin hard strapped to GND can be a valid use case. + */ + if (!(nor->flags & SNOR_F_NO_WP)) + status_new |= SR_SRWD; + + if (!use_top) + status_new |= tb_mask; + + /* Don't bother if they're the same */ + if (status_new == status_old) + return 0; + + /* Only modify protection if it will not unlock other areas */ + if ((status_new & mask) < (status_old & mask)) + return -EINVAL; + + return spi_nor_write_sr_and_check(nor, status_new); +} + +/* + * Unlock a region of the flash. See spi_nor_sr_lock() for more info + * + * Returns negative on errors, 0 on success. + */ +static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) +{ + struct mtd_info *mtd = &nor->mtd; + u64 min_prot_len; + int ret, status_old, status_new; + u8 mask = spi_nor_get_sr_bp_mask(nor); + u8 tb_mask = spi_nor_get_sr_tb_mask(nor); + u8 pow, val; + loff_t lock_len; + bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB; + bool use_top; + + ret = spi_nor_read_sr(nor, nor->bouncebuf); + if (ret) + return ret; + + status_old = nor->bouncebuf[0]; + + /* If nothing in our range is locked, we don't need to do anything */ + if (spi_nor_is_unlocked_sr(nor, ofs, len, status_old)) + return 0; + + /* If anything below us is locked, we can't use 'top' protection */ + if (!spi_nor_is_unlocked_sr(nor, 0, ofs, status_old)) + can_be_top = false; + + /* If anything above us is locked, we can't use 'bottom' protection */ + if (!spi_nor_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len), + status_old)) + can_be_bottom = false; + + if (!can_be_bottom && !can_be_top) + return -EINVAL; + + /* Prefer top, if both are valid */ + use_top = can_be_top; + + /* lock_len: length of region that should remain locked */ + if (use_top) + lock_len = mtd->size - (ofs + len); + else + lock_len = ofs; + + if (lock_len == 0) { + val = 0; /* fully unlocked */ + } else { + min_prot_len = spi_nor_get_min_prot_length_sr(nor); + pow = ilog2(lock_len) - ilog2(min_prot_len) + 1; + val = pow << SR_BP_SHIFT; + + if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3) + val = (val & ~SR_BP3) | SR_BP3_BIT6; + + /* Some power-of-two sizes are not supported */ + if (val & ~mask) + return -EINVAL; + } + + status_new = (status_old & ~mask & ~tb_mask) | val; + + /* Don't protect status register if we're fully unlocked */ + if (lock_len == 0) + status_new &= ~SR_SRWD; + + if (!use_top) + status_new |= tb_mask; + + /* Don't bother if they're the same */ + if (status_new == status_old) + return 0; + + /* Only modify protection if it will not lock other areas */ + if ((status_new & mask) > (status_old & mask)) + return -EINVAL; + + return spi_nor_write_sr_and_check(nor, status_new); +} + +/* + * Check if a region of the flash is (completely) locked. See spi_nor_sr_lock() + * for more info. + * + * Returns 1 if entire region is locked, 0 if any portion is unlocked, and + * negative on errors. + */ +static int spi_nor_sr_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len) +{ + int ret; + + ret = spi_nor_read_sr(nor, nor->bouncebuf); + if (ret) + return ret; + + return spi_nor_is_locked_sr(nor, ofs, len, nor->bouncebuf[0]); +} + +static const struct spi_nor_locking_ops spi_nor_sr_locking_ops = { + .lock = spi_nor_sr_lock, + .unlock = spi_nor_sr_unlock, + .is_locked = spi_nor_sr_is_locked, +}; + +void spi_nor_init_default_locking_ops(struct spi_nor *nor) +{ + nor->params->locking_ops = &spi_nor_sr_locking_ops; +} + +static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + int ret; + + ret = spi_nor_prep_and_lock(nor); + if (ret) + return ret; + + ret = nor->params->locking_ops->lock(nor, ofs, len); + + spi_nor_unlock_and_unprep(nor); + return ret; +} + +static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + int ret; + + ret = spi_nor_prep_and_lock(nor); + if (ret) + return ret; + + ret = nor->params->locking_ops->unlock(nor, ofs, len); + + spi_nor_unlock_and_unprep(nor); + return ret; +} + +static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + int ret; + + ret = spi_nor_prep_and_lock(nor); + if (ret) + return ret; + + ret = nor->params->locking_ops->is_locked(nor, ofs, len); + + spi_nor_unlock_and_unprep(nor); + return ret; +} + +/** + * spi_nor_try_unlock_all() - Tries to unlock the entire flash memory array. + * @nor: pointer to a 'struct spi_nor'. + * + * Some SPI NOR flashes are write protected by default after a power-on reset + * cycle, in order to avoid inadvertent writes during power-up. Backward + * compatibility imposes to unlock the entire flash memory array at power-up + * by default. + * + * Unprotecting the entire flash array will fail for boards which are hardware + * write-protected. Thus any errors are ignored. + */ +void spi_nor_try_unlock_all(struct spi_nor *nor) +{ + int ret; + + if (!(nor->flags & SNOR_F_HAS_LOCK)) + return; + + dev_dbg(nor->dev, "Unprotecting entire flash array\n"); + + ret = spi_nor_unlock(&nor->mtd, 0, nor->params->size); + if (ret) + dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n"); +} + +void spi_nor_set_mtd_locking_ops(struct spi_nor *nor) +{ + struct mtd_info *mtd = &nor->mtd; + + if (!nor->params->locking_ops) + return; + + mtd->_lock = spi_nor_lock; + mtd->_unlock = spi_nor_unlock; + mtd->_is_locked = spi_nor_is_locked; +} diff --git a/drivers/mtd/spi-nor/sysfs.c b/drivers/mtd/spi-nor/sysfs.c new file mode 100644 index 0000000000..c09bb832b3 --- /dev/null +++ b/drivers/mtd/spi-nor/sysfs.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/mtd/spi-nor.h> +#include <linux/spi/spi.h> +#include <linux/spi/spi-mem.h> +#include <linux/sysfs.h> + +#include "core.h" + +static ssize_t manufacturer_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct spi_device *spi = to_spi_device(dev); + struct spi_mem *spimem = spi_get_drvdata(spi); + struct spi_nor *nor = spi_mem_get_drvdata(spimem); + + return sysfs_emit(buf, "%s\n", nor->manufacturer->name); +} +static DEVICE_ATTR_RO(manufacturer); + +static ssize_t partname_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct spi_device *spi = to_spi_device(dev); + struct spi_mem *spimem = spi_get_drvdata(spi); + struct spi_nor *nor = spi_mem_get_drvdata(spimem); + + return sysfs_emit(buf, "%s\n", nor->info->name); +} +static DEVICE_ATTR_RO(partname); + +static ssize_t jedec_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct spi_device *spi = to_spi_device(dev); + struct spi_mem *spimem = spi_get_drvdata(spi); + struct spi_nor *nor = spi_mem_get_drvdata(spimem); + const u8 *id = nor->info->id_len ? nor->info->id : nor->id; + u8 id_len = nor->info->id_len ?: SPI_NOR_MAX_ID_LEN; + + return sysfs_emit(buf, "%*phN\n", id_len, id); +} +static DEVICE_ATTR_RO(jedec_id); + +static struct attribute *spi_nor_sysfs_entries[] = { + &dev_attr_manufacturer.attr, + &dev_attr_partname.attr, + &dev_attr_jedec_id.attr, + NULL +}; + +static ssize_t sfdp_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, + loff_t off, size_t count) +{ + struct spi_device *spi = to_spi_device(kobj_to_dev(kobj)); + struct spi_mem *spimem = spi_get_drvdata(spi); + struct spi_nor *nor = spi_mem_get_drvdata(spimem); + struct sfdp *sfdp = nor->sfdp; + size_t sfdp_size = sfdp->num_dwords * sizeof(*sfdp->dwords); + + return memory_read_from_buffer(buf, count, &off, nor->sfdp->dwords, + sfdp_size); +} +static BIN_ATTR_RO(sfdp, 0); + +static struct bin_attribute *spi_nor_sysfs_bin_entries[] = { + &bin_attr_sfdp, + NULL +}; + +static umode_t spi_nor_sysfs_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct spi_device *spi = to_spi_device(kobj_to_dev(kobj)); + struct spi_mem *spimem = spi_get_drvdata(spi); + struct spi_nor *nor = spi_mem_get_drvdata(spimem); + + if (attr == &dev_attr_manufacturer.attr && !nor->manufacturer) + return 0; + if (attr == &dev_attr_jedec_id.attr && !nor->info->id_len && !nor->id) + return 0; + + return 0444; +} + +static umode_t spi_nor_sysfs_is_bin_visible(struct kobject *kobj, + struct bin_attribute *attr, int n) +{ + struct spi_device *spi = to_spi_device(kobj_to_dev(kobj)); + struct spi_mem *spimem = spi_get_drvdata(spi); + struct spi_nor *nor = spi_mem_get_drvdata(spimem); + + if (attr == &bin_attr_sfdp && nor->sfdp) + return 0444; + + return 0; +} + +static const struct attribute_group spi_nor_sysfs_group = { + .name = "spi-nor", + .is_visible = spi_nor_sysfs_is_visible, + .is_bin_visible = spi_nor_sysfs_is_bin_visible, + .attrs = spi_nor_sysfs_entries, + .bin_attrs = spi_nor_sysfs_bin_entries, +}; + +const struct attribute_group *spi_nor_sysfs_groups[] = { + &spi_nor_sysfs_group, + NULL +}; diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c new file mode 100644 index 0000000000..cd99c9a1c5 --- /dev/null +++ b/drivers/mtd/spi-nor/winbond.c @@ -0,0 +1,249 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +#define WINBOND_NOR_OP_RDEAR 0xc8 /* Read Extended Address Register */ +#define WINBOND_NOR_OP_WREAR 0xc5 /* Write Extended Address Register */ + +#define WINBOND_NOR_WREAR_OP(buf) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(WINBOND_NOR_OP_WREAR, 0), \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_DATA_OUT(1, buf, 0)) + +static int +w25q256_post_bfpt_fixups(struct spi_nor *nor, + const struct sfdp_parameter_header *bfpt_header, + const struct sfdp_bfpt *bfpt) +{ + /* + * W25Q256JV supports 4B opcodes but W25Q256FV does not. + * Unfortunately, Winbond has re-used the same JEDEC ID for both + * variants which prevents us from defining a new entry in the parts + * table. + * To differentiate between W25Q256JV and W25Q256FV check SFDP header + * version: only JV has JESD216A compliant structure (version 5). + */ + if (bfpt_header->major == SFDP_JESD216_MAJOR && + bfpt_header->minor == SFDP_JESD216A_MINOR) + nor->flags |= SNOR_F_4B_OPCODES; + + return 0; +} + +static const struct spi_nor_fixups w25q256_fixups = { + .post_bfpt = w25q256_post_bfpt_fixups, +}; + +static const struct flash_info winbond_nor_parts[] = { + /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */ + { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1) + NO_SFDP_FLAGS(SECT_4K) }, + { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2) + NO_SFDP_FLAGS(SECT_4K) }, + { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4) + NO_SFDP_FLAGS(SECT_4K) }, + { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8) + NO_SFDP_FLAGS(SECT_4K) }, + { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16) + NO_SFDP_FLAGS(SECT_4K) }, + { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32) + NO_SFDP_FLAGS(SECT_4K) }, + { "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64) + NO_SFDP_FLAGS(SECT_4K) }, + { "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024, 32) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4) + NO_SFDP_FLAGS(SECT_4K) }, + { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4) + NO_SFDP_FLAGS(SECT_4K) }, + { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4) + NO_SFDP_FLAGS(SECT_4K) }, + { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64) + NO_SFDP_FLAGS(SECT_4K) }, + { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + OTP_INFO(256, 3, 0x1000, 0x1000) }, + { "w25q32jv", INFO(0xef7016, 0, 64 * 1024, 64) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "w25q32jwm", INFO(0xef8016, 0, 64 * 1024, 64) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + OTP_INFO(256, 3, 0x1000, 0x1000) }, + { "w25q64jwm", INFO(0xef8017, 0, 64 * 1024, 128) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "w25q128jwm", INFO(0xef8018, 0, 64 * 1024, 256) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "w25q256jwm", INFO(0xef8019, 0, 64 * 1024, 512) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128) + NO_SFDP_FLAGS(SECT_4K) }, + { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "w25q64jvm", INFO(0xef7017, 0, 64 * 1024, 128) + NO_SFDP_FLAGS(SECT_4K) }, + { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256) + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16) + NO_SFDP_FLAGS(SECT_4K) }, + { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16) + NO_SFDP_FLAGS(SECT_4K) }, + { "w25q128", INFO(0xef4018, 0, 0, 0) + PARSE_SFDP + FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, + { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + .fixups = &w25q256_fixups }, + { "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512) + PARSE_SFDP }, + { "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ | + SPI_NOR_DUAL_READ) }, + { "w25q512nwq", INFO(0xef6020, 0, 0, 0) + PARSE_SFDP + OTP_INFO(256, 3, 0x1000, 0x1000) }, + { "w25q512nwm", INFO(0xef8020, 0, 64 * 1024, 1024) + PARSE_SFDP + OTP_INFO(256, 3, 0x1000, 0x1000) }, + { "w25q512jvq", INFO(0xef4020, 0, 64 * 1024, 1024) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, +}; + +/** + * winbond_nor_write_ear() - Write Extended Address Register. + * @nor: pointer to 'struct spi_nor'. + * @ear: value to write to the Extended Address Register. + * + * Return: 0 on success, -errno otherwise. + */ +static int winbond_nor_write_ear(struct spi_nor *nor, u8 ear) +{ + int ret; + + nor->bouncebuf[0] = ear; + + if (nor->spimem) { + struct spi_mem_op op = WINBOND_NOR_WREAR_OP(nor->bouncebuf); + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = spi_nor_controller_ops_write_reg(nor, + WINBOND_NOR_OP_WREAR, + nor->bouncebuf, 1); + } + + if (ret) + dev_dbg(nor->dev, "error %d writing EAR\n", ret); + + return ret; +} + +/** + * winbond_nor_set_4byte_addr_mode() - Set 4-byte address mode for Winbond + * flashes. + * @nor: pointer to 'struct spi_nor'. + * @enable: true to enter the 4-byte address mode, false to exit the 4-byte + * address mode. + * + * Return: 0 on success, -errno otherwise. + */ +static int winbond_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable) +{ + int ret; + + ret = spi_nor_set_4byte_addr_mode_en4b_ex4b(nor, enable); + if (ret || enable) + return ret; + + /* + * On Winbond W25Q256FV, leaving 4byte mode causes the Extended Address + * Register to be set to 1, so all 3-byte-address reads come from the + * second 16M. We must clear the register to enable normal behavior. + */ + ret = spi_nor_write_enable(nor); + if (ret) + return ret; + + ret = winbond_nor_write_ear(nor, 0); + if (ret) + return ret; + + return spi_nor_write_disable(nor); +} + +static const struct spi_nor_otp_ops winbond_nor_otp_ops = { + .read = spi_nor_otp_read_secr, + .write = spi_nor_otp_write_secr, + .erase = spi_nor_otp_erase_secr, + .lock = spi_nor_otp_lock_sr2, + .is_locked = spi_nor_otp_is_locked_sr2, +}; + +static int winbond_nor_late_init(struct spi_nor *nor) +{ + struct spi_nor_flash_parameter *params = nor->params; + + if (params->otp.org->n_regions) + params->otp.ops = &winbond_nor_otp_ops; + + /* + * Winbond seems to require that the Extended Address Register to be set + * to zero when exiting the 4-Byte Address Mode, at least for W25Q256FV. + * This requirement is not described in the JESD216 SFDP standard, thus + * it is Winbond specific. Since we do not know if other Winbond flashes + * have the same requirement, play safe and overwrite the method parsed + * from BFPT, if any. + */ + params->set_4byte_addr_mode = winbond_nor_set_4byte_addr_mode; + + return 0; +} + +static const struct spi_nor_fixups winbond_nor_fixups = { + .late_init = winbond_nor_late_init, +}; + +const struct spi_nor_manufacturer spi_nor_winbond = { + .name = "winbond", + .parts = winbond_nor_parts, + .nparts = ARRAY_SIZE(winbond_nor_parts), + .fixups = &winbond_nor_fixups, +}; diff --git a/drivers/mtd/spi-nor/xilinx.c b/drivers/mtd/spi-nor/xilinx.c new file mode 100644 index 0000000000..00d53eae5e --- /dev/null +++ b/drivers/mtd/spi-nor/xilinx.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +#define XILINX_OP_SE 0x50 /* Sector erase */ +#define XILINX_OP_PP 0x82 /* Page program */ +#define XILINX_OP_RDSR 0xd7 /* Read status register */ + +#define XSR_PAGESIZE BIT(0) /* Page size in Po2 or Linear */ +#define XSR_RDY BIT(7) /* Ready */ + +#define XILINX_RDSR_OP(buf) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(XILINX_OP_RDSR, 0), \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_DATA_IN(1, buf, 0)) + +#define S3AN_INFO(_jedec_id, _n_sectors, _page_size) \ + .id = { \ + ((_jedec_id) >> 16) & 0xff, \ + ((_jedec_id) >> 8) & 0xff, \ + (_jedec_id) & 0xff \ + }, \ + .id_len = 3, \ + .sector_size = (8 * (_page_size)), \ + .n_sectors = (_n_sectors), \ + .page_size = (_page_size), \ + .n_banks = 1, \ + .addr_nbytes = 3, \ + .flags = SPI_NOR_NO_FR + +/* Xilinx S3AN share MFR with Atmel SPI NOR */ +static const struct flash_info xilinx_nor_parts[] = { + /* Xilinx S3AN Internal Flash */ + { "3S50AN", S3AN_INFO(0x1f2200, 64, 264) }, + { "3S200AN", S3AN_INFO(0x1f2400, 256, 264) }, + { "3S400AN", S3AN_INFO(0x1f2400, 256, 264) }, + { "3S700AN", S3AN_INFO(0x1f2500, 512, 264) }, + { "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) }, +}; + +/* + * This code converts an address to the Default Address Mode, that has non + * power of two page sizes. We must support this mode because it is the default + * mode supported by Xilinx tools, it can access the whole flash area and + * changing over to the Power-of-two mode is irreversible and corrupts the + * original data. + * Addr can safely be unsigned int, the biggest S3AN device is smaller than + * 4 MiB. + */ +static u32 s3an_nor_convert_addr(struct spi_nor *nor, u32 addr) +{ + u32 page_size = nor->params->page_size; + u32 offset, page; + + offset = addr % page_size; + page = addr / page_size; + page <<= (page_size > 512) ? 10 : 9; + + return page | offset; +} + +/** + * xilinx_nor_read_sr() - Read the Status Register on S3AN flashes. + * @nor: pointer to 'struct spi_nor'. + * @sr: pointer to a DMA-able buffer where the value of the + * Status Register will be written. + * + * Return: 0 on success, -errno otherwise. + */ +static int xilinx_nor_read_sr(struct spi_nor *nor, u8 *sr) +{ + int ret; + + if (nor->spimem) { + struct spi_mem_op op = XILINX_RDSR_OP(sr); + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = spi_nor_controller_ops_read_reg(nor, XILINX_OP_RDSR, sr, + 1); + } + + if (ret) + dev_dbg(nor->dev, "error %d reading SR\n", ret); + + return ret; +} + +/** + * xilinx_nor_sr_ready() - Query the Status Register of the S3AN flash to see + * if the flash is ready for new commands. + * @nor: pointer to 'struct spi_nor'. + * + * Return: 1 if ready, 0 if not ready, -errno on errors. + */ +static int xilinx_nor_sr_ready(struct spi_nor *nor) +{ + int ret; + + ret = xilinx_nor_read_sr(nor, nor->bouncebuf); + if (ret) + return ret; + + return !!(nor->bouncebuf[0] & XSR_RDY); +} + +static int xilinx_nor_setup(struct spi_nor *nor, + const struct spi_nor_hwcaps *hwcaps) +{ + u32 page_size; + int ret; + + ret = xilinx_nor_read_sr(nor, nor->bouncebuf); + if (ret) + return ret; + + nor->erase_opcode = XILINX_OP_SE; + nor->program_opcode = XILINX_OP_PP; + nor->read_opcode = SPINOR_OP_READ; + nor->flags |= SNOR_F_NO_OP_CHIP_ERASE; + + /* + * This flashes have a page size of 264 or 528 bytes (known as + * Default addressing mode). It can be changed to a more standard + * Power of two mode where the page size is 256/512. This comes + * with a price: there is 3% less of space, the data is corrupted + * and the page size cannot be changed back to default addressing + * mode. + * + * The current addressing mode can be read from the XRDSR register + * and should not be changed, because is a destructive operation. + */ + if (nor->bouncebuf[0] & XSR_PAGESIZE) { + /* Flash in Power of 2 mode */ + page_size = (nor->params->page_size == 264) ? 256 : 512; + nor->params->page_size = page_size; + nor->mtd.writebufsize = page_size; + nor->params->size = 8 * page_size * nor->info->n_sectors; + nor->mtd.erasesize = 8 * page_size; + } else { + /* Flash in Default addressing mode */ + nor->params->convert_addr = s3an_nor_convert_addr; + nor->mtd.erasesize = nor->info->sector_size; + } + + return 0; +} + +static int xilinx_nor_late_init(struct spi_nor *nor) +{ + nor->params->setup = xilinx_nor_setup; + nor->params->ready = xilinx_nor_sr_ready; + + return 0; +} + +static const struct spi_nor_fixups xilinx_nor_fixups = { + .late_init = xilinx_nor_late_init, +}; + +const struct spi_nor_manufacturer spi_nor_xilinx = { + .name = "xilinx", + .parts = xilinx_nor_parts, + .nparts = ARRAY_SIZE(xilinx_nor_parts), + .fixups = &xilinx_nor_fixups, +}; diff --git a/drivers/mtd/spi-nor/xmc.c b/drivers/mtd/spi-nor/xmc.c new file mode 100644 index 0000000000..051411e863 --- /dev/null +++ b/drivers/mtd/spi-nor/xmc.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +static const struct flash_info xmc_nor_parts[] = { + /* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */ + { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256) + NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, +}; + +const struct spi_nor_manufacturer spi_nor_xmc = { + .name = "xmc", + .parts = xmc_nor_parts, + .nparts = ARRAY_SIZE(xmc_nor_parts), +}; |