diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
commit | 5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch) | |
tree | a94efe259b9009378be6d90eb30d2b019d95c194 /drivers/mtd/spi-nor | |
parent | Initial commit. (diff) | |
download | linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip |
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/mtd/spi-nor')
31 files changed, 9653 insertions, 0 deletions
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig new file mode 100644 index 000000000..ffc4b380f --- /dev/null +++ b/drivers/mtd/spi-nor/Kconfig @@ -0,0 +1,29 @@ +# SPDX-License-Identifier: GPL-2.0-only +menuconfig MTD_SPI_NOR + tristate "SPI NOR device support" + depends on MTD + depends on MTD && SPI_MASTER + select SPI_MEM + help + This is the framework for the SPI NOR which can be used by the SPI + device drivers and the SPI NOR device driver. + +if MTD_SPI_NOR + +config MTD_SPI_NOR_USE_4K_SECTORS + bool "Use small 4096 B erase sectors" + default y + help + Many flash memories support erasing small (4096 B) sectors. Depending + on the usage this feature may provide performance gain in comparison + to erasing whole blocks (32/64 KiB). + Changing a small part of the flash's contents is usually faster with + small sectors. On the other hand erasing should be faster when using + 64 KiB block instead of 16 × 4 KiB sectors. + + Please note that some tools/drivers/filesystems may not work with + 4096 B erase size (e.g. UBIFS requires 15 KiB as a minimum). + +source "drivers/mtd/spi-nor/controllers/Kconfig" + +endif # MTD_SPI_NOR diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile new file mode 100644 index 000000000..653923896 --- /dev/null +++ b/drivers/mtd/spi-nor/Makefile @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: GPL-2.0 + +spi-nor-objs := core.o sfdp.o +spi-nor-objs += atmel.o +spi-nor-objs += catalyst.o +spi-nor-objs += eon.o +spi-nor-objs += esmt.o +spi-nor-objs += everspin.o +spi-nor-objs += fujitsu.o +spi-nor-objs += gigadevice.o +spi-nor-objs += intel.o +spi-nor-objs += issi.o +spi-nor-objs += macronix.o +spi-nor-objs += micron-st.o +spi-nor-objs += spansion.o +spi-nor-objs += sst.o +spi-nor-objs += winbond.o +spi-nor-objs += xilinx.o +spi-nor-objs += xmc.o +obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o + +obj-$(CONFIG_MTD_SPI_NOR) += controllers/ diff --git a/drivers/mtd/spi-nor/atmel.c b/drivers/mtd/spi-nor/atmel.c new file mode 100644 index 000000000..deacf87a6 --- /dev/null +++ b/drivers/mtd/spi-nor/atmel.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +/* + * The Atmel AT25FS010/AT25FS040 parts have some weird configuration for the + * block protection bits. We don't support them. But legacy behavior in linux + * is to unlock the whole flash array on startup. Therefore, we have to support + * exactly this operation. + */ +static int atmel_at25fs_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) +{ + return -EOPNOTSUPP; +} + +static int atmel_at25fs_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) +{ + int ret; + + /* We only support unlocking the whole flash array */ + if (ofs || len != nor->params->size) + return -EINVAL; + + /* Write 0x00 to the status register to disable write protection */ + ret = spi_nor_write_sr_and_check(nor, 0); + if (ret) + dev_dbg(nor->dev, "unable to clear BP bits, WP# asserted?\n"); + + return ret; +} + +static int atmel_at25fs_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len) +{ + return -EOPNOTSUPP; +} + +static const struct spi_nor_locking_ops atmel_at25fs_locking_ops = { + .lock = atmel_at25fs_lock, + .unlock = atmel_at25fs_unlock, + .is_locked = atmel_at25fs_is_locked, +}; + +static void atmel_at25fs_default_init(struct spi_nor *nor) +{ + nor->params->locking_ops = &atmel_at25fs_locking_ops; +} + +static const struct spi_nor_fixups atmel_at25fs_fixups = { + .default_init = atmel_at25fs_default_init, +}; + +static const struct flash_info atmel_parts[] = { + /* Atmel -- some are (confusingly) marketed as "DataFlash" */ + { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K | SPI_NOR_HAS_LOCK) + .fixups = &atmel_at25fs_fixups }, + { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_HAS_LOCK) + .fixups = &atmel_at25fs_fixups }, + + { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_HAS_LOCK) }, + { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) }, + { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) }, + { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) }, + + { "at25sl321", INFO(0x1f4216, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + + { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) }, + { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_HAS_LOCK) }, + { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_HAS_LOCK) }, + { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) }, + + { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) }, +}; + +const struct spi_nor_manufacturer spi_nor_atmel = { + .name = "atmel", + .parts = atmel_parts, + .nparts = ARRAY_SIZE(atmel_parts), +}; diff --git a/drivers/mtd/spi-nor/catalyst.c b/drivers/mtd/spi-nor/catalyst.c new file mode 100644 index 000000000..011b83e99 --- /dev/null +++ b/drivers/mtd/spi-nor/catalyst.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +static const struct flash_info catalyst_parts[] = { + /* Catalyst / On Semiconductor -- non-JEDEC */ + { "cat25c11", CAT25_INFO(16, 8, 16, 1, + SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, + { "cat25c03", CAT25_INFO(32, 8, 16, 2, + SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, + { "cat25c09", CAT25_INFO(128, 8, 32, 2, + SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, + { "cat25c17", CAT25_INFO(256, 8, 32, 2, + SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, + { "cat25128", CAT25_INFO(2048, 8, 64, 2, + SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, +}; + +const struct spi_nor_manufacturer spi_nor_catalyst = { + .name = "catalyst", + .parts = catalyst_parts, + .nparts = ARRAY_SIZE(catalyst_parts), +}; diff --git a/drivers/mtd/spi-nor/controllers/Kconfig b/drivers/mtd/spi-nor/controllers/Kconfig new file mode 100644 index 000000000..5c0e0ec2e --- /dev/null +++ b/drivers/mtd/spi-nor/controllers/Kconfig @@ -0,0 +1,64 @@ +# SPDX-License-Identifier: GPL-2.0-only +config SPI_ASPEED_SMC + tristate "Aspeed flash controllers in SPI mode" + depends on ARCH_ASPEED || COMPILE_TEST + depends on HAS_IOMEM && OF + help + This enables support for the Firmware Memory controller (FMC) + in the Aspeed AST2500/AST2400 SoCs when attached to SPI NOR chips, + and support for the SPI flash memory controller (SPI) for + the host firmware. The implementation only supports SPI NOR. + +config SPI_HISI_SFC + tristate "Hisilicon FMC SPI NOR Flash Controller(SFC)" + depends on ARCH_HISI || COMPILE_TEST + depends on HAS_IOMEM + help + This enables support for HiSilicon FMC SPI NOR flash controller. + +config SPI_NXP_SPIFI + tristate "NXP SPI Flash Interface (SPIFI)" + depends on OF && (ARCH_LPC18XX || COMPILE_TEST) + depends on HAS_IOMEM + help + Enable support for the NXP LPC SPI Flash Interface controller. + + SPIFI is a specialized controller for connecting serial SPI + Flash. Enable this option if you have a device with a SPIFI + controller and want to access the Flash as a mtd device. + +config SPI_INTEL_SPI + tristate + +config SPI_INTEL_SPI_PCI + tristate "Intel PCH/PCU SPI flash PCI driver (DANGEROUS)" + depends on X86 && PCI + select SPI_INTEL_SPI + help + This enables PCI support for the Intel PCH/PCU SPI controller in + master mode. This controller is present in modern Intel hardware + and is used to hold BIOS and other persistent settings. Using + this driver it is possible to upgrade BIOS directly from Linux. + + Say N here unless you know what you are doing. Overwriting the + SPI flash may render the system unbootable. + + To compile this driver as a module, choose M here: the module + will be called intel-spi-pci. + +config SPI_INTEL_SPI_PLATFORM + tristate "Intel PCH/PCU SPI flash platform driver (DANGEROUS)" + depends on X86 + select SPI_INTEL_SPI + help + This enables platform support for the Intel PCH/PCU SPI + controller in master mode. This controller is present in modern + Intel hardware and is used to hold BIOS and other persistent + settings. Using this driver it is possible to upgrade BIOS + directly from Linux. + + Say N here unless you know what you are doing. Overwriting the + SPI flash may render the system unbootable. + + To compile this driver as a module, choose M here: the module + will be called intel-spi-platform. diff --git a/drivers/mtd/spi-nor/controllers/Makefile b/drivers/mtd/spi-nor/controllers/Makefile new file mode 100644 index 000000000..e7abba491 --- /dev/null +++ b/drivers/mtd/spi-nor/controllers/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o +obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o +obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o +obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o +obj-$(CONFIG_SPI_INTEL_SPI_PCI) += intel-spi-pci.o +obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM) += intel-spi-platform.o diff --git a/drivers/mtd/spi-nor/controllers/aspeed-smc.c b/drivers/mtd/spi-nor/controllers/aspeed-smc.c new file mode 100644 index 000000000..7225870e8 --- /dev/null +++ b/drivers/mtd/spi-nor/controllers/aspeed-smc.c @@ -0,0 +1,910 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * ASPEED Static Memory Controller driver + * + * Copyright (c) 2015-2016, IBM Corporation. + */ + +#include <linux/bug.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> +#include <linux/mtd/spi-nor.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/sizes.h> +#include <linux/sysfs.h> + +#define DEVICE_NAME "aspeed-smc" + +/* + * The driver only support SPI flash + */ +enum aspeed_smc_flash_type { + smc_type_nor = 0, + smc_type_nand = 1, + smc_type_spi = 2, +}; + +struct aspeed_smc_chip; + +struct aspeed_smc_info { + u32 maxsize; /* maximum size of chip window */ + u8 nce; /* number of chip enables */ + bool hastype; /* flash type field exists in config reg */ + u8 we0; /* shift for write enable bit for CE0 */ + u8 ctl0; /* offset in regs of ctl for CE0 */ + + void (*set_4b)(struct aspeed_smc_chip *chip); +}; + +static void aspeed_smc_chip_set_4b_spi_2400(struct aspeed_smc_chip *chip); +static void aspeed_smc_chip_set_4b(struct aspeed_smc_chip *chip); + +static const struct aspeed_smc_info fmc_2400_info = { + .maxsize = 64 * 1024 * 1024, + .nce = 5, + .hastype = true, + .we0 = 16, + .ctl0 = 0x10, + .set_4b = aspeed_smc_chip_set_4b, +}; + +static const struct aspeed_smc_info spi_2400_info = { + .maxsize = 64 * 1024 * 1024, + .nce = 1, + .hastype = false, + .we0 = 0, + .ctl0 = 0x04, + .set_4b = aspeed_smc_chip_set_4b_spi_2400, +}; + +static const struct aspeed_smc_info fmc_2500_info = { + .maxsize = 256 * 1024 * 1024, + .nce = 3, + .hastype = true, + .we0 = 16, + .ctl0 = 0x10, + .set_4b = aspeed_smc_chip_set_4b, +}; + +static const struct aspeed_smc_info spi_2500_info = { + .maxsize = 128 * 1024 * 1024, + .nce = 2, + .hastype = false, + .we0 = 16, + .ctl0 = 0x10, + .set_4b = aspeed_smc_chip_set_4b, +}; + +enum aspeed_smc_ctl_reg_value { + smc_base, /* base value without mode for other commands */ + smc_read, /* command reg for (maybe fast) reads */ + smc_write, /* command reg for writes */ + smc_max, +}; + +struct aspeed_smc_controller; + +struct aspeed_smc_chip { + int cs; + struct aspeed_smc_controller *controller; + void __iomem *ctl; /* control register */ + void __iomem *ahb_base; /* base of chip window */ + u32 ahb_window_size; /* chip mapping window size */ + u32 ctl_val[smc_max]; /* control settings */ + enum aspeed_smc_flash_type type; /* what type of flash */ + struct spi_nor nor; +}; + +struct aspeed_smc_controller { + struct device *dev; + + struct mutex mutex; /* controller access mutex */ + const struct aspeed_smc_info *info; /* type info of controller */ + void __iomem *regs; /* controller registers */ + void __iomem *ahb_base; /* per-chip windows resource */ + u32 ahb_window_size; /* full mapping window size */ + + struct aspeed_smc_chip *chips[]; /* pointers to attached chips */ +}; + +/* + * SPI Flash Configuration Register (AST2500 SPI) + * or + * Type setting Register (AST2500 FMC). + * CE0 and CE1 can only be of type SPI. CE2 can be of type NOR but the + * driver does not support it. + */ +#define CONFIG_REG 0x0 +#define CONFIG_DISABLE_LEGACY BIT(31) /* 1 */ + +#define CONFIG_CE2_WRITE BIT(18) +#define CONFIG_CE1_WRITE BIT(17) +#define CONFIG_CE0_WRITE BIT(16) + +#define CONFIG_CE2_TYPE BIT(4) /* AST2500 FMC only */ +#define CONFIG_CE1_TYPE BIT(2) /* AST2500 FMC only */ +#define CONFIG_CE0_TYPE BIT(0) /* AST2500 FMC only */ + +/* + * CE Control Register + */ +#define CE_CONTROL_REG 0x4 + +/* + * CEx Control Register + */ +#define CONTROL_AAF_MODE BIT(31) +#define CONTROL_IO_MODE_MASK GENMASK(30, 28) +#define CONTROL_IO_DUAL_DATA BIT(29) +#define CONTROL_IO_DUAL_ADDR_DATA (BIT(29) | BIT(28)) +#define CONTROL_IO_QUAD_DATA BIT(30) +#define CONTROL_IO_QUAD_ADDR_DATA (BIT(30) | BIT(28)) +#define CONTROL_CE_INACTIVE_SHIFT 24 +#define CONTROL_CE_INACTIVE_MASK GENMASK(27, \ + CONTROL_CE_INACTIVE_SHIFT) +/* 0 = 16T ... 15 = 1T T=HCLK */ +#define CONTROL_COMMAND_SHIFT 16 +#define CONTROL_DUMMY_COMMAND_OUT BIT(15) +#define CONTROL_IO_DUMMY_HI BIT(14) +#define CONTROL_IO_DUMMY_HI_SHIFT 14 +#define CONTROL_CLK_DIV4 BIT(13) /* others */ +#define CONTROL_IO_ADDRESS_4B BIT(13) /* AST2400 SPI */ +#define CONTROL_RW_MERGE BIT(12) +#define CONTROL_IO_DUMMY_LO_SHIFT 6 +#define CONTROL_IO_DUMMY_LO GENMASK(7, \ + CONTROL_IO_DUMMY_LO_SHIFT) +#define CONTROL_IO_DUMMY_MASK (CONTROL_IO_DUMMY_HI | \ + CONTROL_IO_DUMMY_LO) +#define CONTROL_IO_DUMMY_SET(dummy) \ + (((((dummy) >> 2) & 0x1) << CONTROL_IO_DUMMY_HI_SHIFT) | \ + (((dummy) & 0x3) << CONTROL_IO_DUMMY_LO_SHIFT)) + +#define CONTROL_CLOCK_FREQ_SEL_SHIFT 8 +#define CONTROL_CLOCK_FREQ_SEL_MASK GENMASK(11, \ + CONTROL_CLOCK_FREQ_SEL_SHIFT) +#define CONTROL_LSB_FIRST BIT(5) +#define CONTROL_CLOCK_MODE_3 BIT(4) +#define CONTROL_IN_DUAL_DATA BIT(3) +#define CONTROL_CE_STOP_ACTIVE_CONTROL BIT(2) +#define CONTROL_COMMAND_MODE_MASK GENMASK(1, 0) +#define CONTROL_COMMAND_MODE_NORMAL 0 +#define CONTROL_COMMAND_MODE_FREAD 1 +#define CONTROL_COMMAND_MODE_WRITE 2 +#define CONTROL_COMMAND_MODE_USER 3 + +#define CONTROL_KEEP_MASK \ + (CONTROL_AAF_MODE | CONTROL_CE_INACTIVE_MASK | CONTROL_CLK_DIV4 | \ + CONTROL_CLOCK_FREQ_SEL_MASK | CONTROL_LSB_FIRST | CONTROL_CLOCK_MODE_3) + +/* + * The Segment Register uses a 8MB unit to encode the start address + * and the end address of the mapping window of a flash SPI slave : + * + * | byte 1 | byte 2 | byte 3 | byte 4 | + * +--------+--------+--------+--------+ + * | end | start | 0 | 0 | + */ +#define SEGMENT_ADDR_REG0 0x30 +#define SEGMENT_ADDR_START(_r) ((((_r) >> 16) & 0xFF) << 23) +#define SEGMENT_ADDR_END(_r) ((((_r) >> 24) & 0xFF) << 23) +#define SEGMENT_ADDR_VALUE(start, end) \ + (((((start) >> 23) & 0xFF) << 16) | ((((end) >> 23) & 0xFF) << 24)) +#define SEGMENT_ADDR_REG(controller, cs) \ + ((controller)->regs + SEGMENT_ADDR_REG0 + (cs) * 4) + +/* + * In user mode all data bytes read or written to the chip decode address + * range are transferred to or from the SPI bus. The range is treated as a + * fifo of arbitratry 1, 2, or 4 byte width but each write has to be aligned + * to its size. The address within the multiple 8kB range is ignored when + * sending bytes to the SPI bus. + * + * On the arm architecture, as of Linux version 4.3, memcpy_fromio and + * memcpy_toio on little endian targets use the optimized memcpy routines + * that were designed for well behavied memory storage. These routines + * have a stutter if the source and destination are not both word aligned, + * once with a duplicate access to the source after aligning to the + * destination to a word boundary, and again with a duplicate access to + * the source when the final byte count is not word aligned. + * + * When writing or reading the fifo this stutter discards data or sends + * too much data to the fifo and can not be used by this driver. + * + * While the low level io string routines that implement the insl family do + * the desired accesses and memory increments, the cross architecture io + * macros make them essentially impossible to use on a memory mapped address + * instead of a a token from the call to iomap of an io port. + * + * These fifo routines use readl and friends to a constant io port and update + * the memory buffer pointer and count via explicit code. The final updates + * to len are optimistically suppressed. + */ +static int aspeed_smc_read_from_ahb(void *buf, void __iomem *src, size_t len) +{ + size_t offset = 0; + + if (IS_ALIGNED((uintptr_t)src, sizeof(uintptr_t)) && + IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) { + ioread32_rep(src, buf, len >> 2); + offset = len & ~0x3; + len -= offset; + } + ioread8_rep(src, (u8 *)buf + offset, len); + return 0; +} + +static int aspeed_smc_write_to_ahb(void __iomem *dst, const void *buf, + size_t len) +{ + size_t offset = 0; + + if (IS_ALIGNED((uintptr_t)dst, sizeof(uintptr_t)) && + IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) { + iowrite32_rep(dst, buf, len >> 2); + offset = len & ~0x3; + len -= offset; + } + iowrite8_rep(dst, (const u8 *)buf + offset, len); + return 0; +} + +static inline u32 aspeed_smc_chip_write_bit(struct aspeed_smc_chip *chip) +{ + return BIT(chip->controller->info->we0 + chip->cs); +} + +static void aspeed_smc_chip_check_config(struct aspeed_smc_chip *chip) +{ + struct aspeed_smc_controller *controller = chip->controller; + u32 reg; + + reg = readl(controller->regs + CONFIG_REG); + + if (reg & aspeed_smc_chip_write_bit(chip)) + return; + + dev_dbg(controller->dev, "config write is not set ! @%p: 0x%08x\n", + controller->regs + CONFIG_REG, reg); + reg |= aspeed_smc_chip_write_bit(chip); + writel(reg, controller->regs + CONFIG_REG); +} + +static void aspeed_smc_start_user(struct spi_nor *nor) +{ + struct aspeed_smc_chip *chip = nor->priv; + u32 ctl = chip->ctl_val[smc_base]; + + /* + * When the chip is controlled in user mode, we need write + * access to send the opcodes to it. So check the config. + */ + aspeed_smc_chip_check_config(chip); + + ctl |= CONTROL_COMMAND_MODE_USER | + CONTROL_CE_STOP_ACTIVE_CONTROL; + writel(ctl, chip->ctl); + + ctl &= ~CONTROL_CE_STOP_ACTIVE_CONTROL; + writel(ctl, chip->ctl); +} + +static void aspeed_smc_stop_user(struct spi_nor *nor) +{ + struct aspeed_smc_chip *chip = nor->priv; + + u32 ctl = chip->ctl_val[smc_read]; + u32 ctl2 = ctl | CONTROL_COMMAND_MODE_USER | + CONTROL_CE_STOP_ACTIVE_CONTROL; + + writel(ctl2, chip->ctl); /* stop user CE control */ + writel(ctl, chip->ctl); /* default to fread or read mode */ +} + +static int aspeed_smc_prep(struct spi_nor *nor) +{ + struct aspeed_smc_chip *chip = nor->priv; + + mutex_lock(&chip->controller->mutex); + return 0; +} + +static void aspeed_smc_unprep(struct spi_nor *nor) +{ + struct aspeed_smc_chip *chip = nor->priv; + + mutex_unlock(&chip->controller->mutex); +} + +static int aspeed_smc_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, + size_t len) +{ + struct aspeed_smc_chip *chip = nor->priv; + + aspeed_smc_start_user(nor); + aspeed_smc_write_to_ahb(chip->ahb_base, &opcode, 1); + aspeed_smc_read_from_ahb(buf, chip->ahb_base, len); + aspeed_smc_stop_user(nor); + return 0; +} + +static int aspeed_smc_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf, + size_t len) +{ + struct aspeed_smc_chip *chip = nor->priv; + + aspeed_smc_start_user(nor); + aspeed_smc_write_to_ahb(chip->ahb_base, &opcode, 1); + aspeed_smc_write_to_ahb(chip->ahb_base, buf, len); + aspeed_smc_stop_user(nor); + return 0; +} + +static void aspeed_smc_send_cmd_addr(struct spi_nor *nor, u8 cmd, u32 addr) +{ + struct aspeed_smc_chip *chip = nor->priv; + __be32 temp; + u32 cmdaddr; + + switch (nor->addr_width) { + default: + WARN_ONCE(1, "Unexpected address width %u, defaulting to 3\n", + nor->addr_width); + fallthrough; + case 3: + cmdaddr = addr & 0xFFFFFF; + cmdaddr |= cmd << 24; + + temp = cpu_to_be32(cmdaddr); + aspeed_smc_write_to_ahb(chip->ahb_base, &temp, 4); + break; + case 4: + temp = cpu_to_be32(addr); + aspeed_smc_write_to_ahb(chip->ahb_base, &cmd, 1); + aspeed_smc_write_to_ahb(chip->ahb_base, &temp, 4); + break; + } +} + +static ssize_t aspeed_smc_read_user(struct spi_nor *nor, loff_t from, + size_t len, u_char *read_buf) +{ + struct aspeed_smc_chip *chip = nor->priv; + int i; + u8 dummy = 0xFF; + + aspeed_smc_start_user(nor); + aspeed_smc_send_cmd_addr(nor, nor->read_opcode, from); + for (i = 0; i < chip->nor.read_dummy / 8; i++) + aspeed_smc_write_to_ahb(chip->ahb_base, &dummy, sizeof(dummy)); + + aspeed_smc_read_from_ahb(read_buf, chip->ahb_base, len); + aspeed_smc_stop_user(nor); + return len; +} + +static ssize_t aspeed_smc_write_user(struct spi_nor *nor, loff_t to, + size_t len, const u_char *write_buf) +{ + struct aspeed_smc_chip *chip = nor->priv; + + aspeed_smc_start_user(nor); + aspeed_smc_send_cmd_addr(nor, nor->program_opcode, to); + aspeed_smc_write_to_ahb(chip->ahb_base, write_buf, len); + aspeed_smc_stop_user(nor); + return len; +} + +static int aspeed_smc_unregister(struct aspeed_smc_controller *controller) +{ + struct aspeed_smc_chip *chip; + int n; + + for (n = 0; n < controller->info->nce; n++) { + chip = controller->chips[n]; + if (chip) + mtd_device_unregister(&chip->nor.mtd); + } + + return 0; +} + +static int aspeed_smc_remove(struct platform_device *dev) +{ + return aspeed_smc_unregister(platform_get_drvdata(dev)); +} + +static const struct of_device_id aspeed_smc_matches[] = { + { .compatible = "aspeed,ast2400-fmc", .data = &fmc_2400_info }, + { .compatible = "aspeed,ast2400-spi", .data = &spi_2400_info }, + { .compatible = "aspeed,ast2500-fmc", .data = &fmc_2500_info }, + { .compatible = "aspeed,ast2500-spi", .data = &spi_2500_info }, + { } +}; +MODULE_DEVICE_TABLE(of, aspeed_smc_matches); + +/* + * Each chip has a mapping window defined by a segment address + * register defining a start and an end address on the AHB bus. These + * addresses can be configured to fit the chip size and offer a + * contiguous memory region across chips. For the moment, we only + * check that each chip segment is valid. + */ +static void __iomem *aspeed_smc_chip_base(struct aspeed_smc_chip *chip, + struct resource *res) +{ + struct aspeed_smc_controller *controller = chip->controller; + u32 offset = 0; + u32 reg; + + if (controller->info->nce > 1) { + reg = readl(SEGMENT_ADDR_REG(controller, chip->cs)); + + if (SEGMENT_ADDR_START(reg) >= SEGMENT_ADDR_END(reg)) + return NULL; + + offset = SEGMENT_ADDR_START(reg) - res->start; + } + + return controller->ahb_base + offset; +} + +static u32 aspeed_smc_ahb_base_phy(struct aspeed_smc_controller *controller) +{ + u32 seg0_val = readl(SEGMENT_ADDR_REG(controller, 0)); + + return SEGMENT_ADDR_START(seg0_val); +} + +static u32 chip_set_segment(struct aspeed_smc_chip *chip, u32 cs, u32 start, + u32 size) +{ + struct aspeed_smc_controller *controller = chip->controller; + void __iomem *seg_reg; + u32 seg_oldval, seg_newval, ahb_base_phy, end; + + ahb_base_phy = aspeed_smc_ahb_base_phy(controller); + + seg_reg = SEGMENT_ADDR_REG(controller, cs); + seg_oldval = readl(seg_reg); + + /* + * If the chip size is not specified, use the default segment + * size, but take into account the possible overlap with the + * previous segment + */ + if (!size) + size = SEGMENT_ADDR_END(seg_oldval) - start; + + /* + * The segment cannot exceed the maximum window size of the + * controller. + */ + if (start + size > ahb_base_phy + controller->ahb_window_size) { + size = ahb_base_phy + controller->ahb_window_size - start; + dev_warn(chip->nor.dev, "CE%d window resized to %dMB", + cs, size >> 20); + } + + end = start + size; + seg_newval = SEGMENT_ADDR_VALUE(start, end); + writel(seg_newval, seg_reg); + + /* + * Restore default value if something goes wrong. The chip + * might have set some bogus value and we would loose access + * to the chip. + */ + if (seg_newval != readl(seg_reg)) { + dev_err(chip->nor.dev, "CE%d window invalid", cs); + writel(seg_oldval, seg_reg); + start = SEGMENT_ADDR_START(seg_oldval); + end = SEGMENT_ADDR_END(seg_oldval); + size = end - start; + } + + dev_info(chip->nor.dev, "CE%d window [ 0x%.8x - 0x%.8x ] %dMB", + cs, start, end, size >> 20); + + return size; +} + +/* + * The segment register defines the mapping window on the AHB bus and + * it needs to be configured depending on the chip size. The segment + * register of the following CE also needs to be tuned in order to + * provide a contiguous window across multiple chips. + * + * This is expected to be called in increasing CE order + */ +static u32 aspeed_smc_chip_set_segment(struct aspeed_smc_chip *chip) +{ + struct aspeed_smc_controller *controller = chip->controller; + u32 ahb_base_phy, start; + u32 size = chip->nor.mtd.size; + + /* + * Each controller has a chip size limit for direct memory + * access + */ + if (size > controller->info->maxsize) + size = controller->info->maxsize; + + /* + * The AST2400 SPI controller only handles one chip and does + * not have segment registers. Let's use the chip size for the + * AHB window. + */ + if (controller->info == &spi_2400_info) + goto out; + + /* + * The AST2500 SPI controller has a HW bug when the CE0 chip + * size reaches 128MB. Enforce a size limit of 120MB to + * prevent the controller from using bogus settings in the + * segment register. + */ + if (chip->cs == 0 && controller->info == &spi_2500_info && + size == SZ_128M) { + size = 120 << 20; + dev_info(chip->nor.dev, + "CE%d window resized to %dMB (AST2500 HW quirk)", + chip->cs, size >> 20); + } + + ahb_base_phy = aspeed_smc_ahb_base_phy(controller); + + /* + * As a start address for the current segment, use the default + * start address if we are handling CE0 or use the previous + * segment ending address + */ + if (chip->cs) { + u32 prev = readl(SEGMENT_ADDR_REG(controller, chip->cs - 1)); + + start = SEGMENT_ADDR_END(prev); + } else { + start = ahb_base_phy; + } + + size = chip_set_segment(chip, chip->cs, start, size); + + /* Update chip base address on the AHB bus */ + chip->ahb_base = controller->ahb_base + (start - ahb_base_phy); + + /* + * Now, make sure the next segment does not overlap with the + * current one we just configured, even if there is no + * available chip. That could break access in Command Mode. + */ + if (chip->cs < controller->info->nce - 1) + chip_set_segment(chip, chip->cs + 1, start + size, 0); + +out: + if (size < chip->nor.mtd.size) + dev_warn(chip->nor.dev, + "CE%d window too small for chip %dMB", + chip->cs, (u32)chip->nor.mtd.size >> 20); + + return size; +} + +static void aspeed_smc_chip_enable_write(struct aspeed_smc_chip *chip) +{ + struct aspeed_smc_controller *controller = chip->controller; + u32 reg; + + reg = readl(controller->regs + CONFIG_REG); + + reg |= aspeed_smc_chip_write_bit(chip); + writel(reg, controller->regs + CONFIG_REG); +} + +static void aspeed_smc_chip_set_type(struct aspeed_smc_chip *chip, int type) +{ + struct aspeed_smc_controller *controller = chip->controller; + u32 reg; + + chip->type = type; + + reg = readl(controller->regs + CONFIG_REG); + reg &= ~(3 << (chip->cs * 2)); + reg |= chip->type << (chip->cs * 2); + writel(reg, controller->regs + CONFIG_REG); +} + +/* + * The first chip of the AST2500 FMC flash controller is strapped by + * hardware, or autodetected, but other chips need to be set. Enforce + * the 4B setting for all chips. + */ +static void aspeed_smc_chip_set_4b(struct aspeed_smc_chip *chip) +{ + struct aspeed_smc_controller *controller = chip->controller; + u32 reg; + + reg = readl(controller->regs + CE_CONTROL_REG); + reg |= 1 << chip->cs; + writel(reg, controller->regs + CE_CONTROL_REG); +} + +/* + * The AST2400 SPI flash controller does not have a CE Control + * register. It uses the CE0 control register to set 4Byte mode at the + * controller level. + */ +static void aspeed_smc_chip_set_4b_spi_2400(struct aspeed_smc_chip *chip) +{ + chip->ctl_val[smc_base] |= CONTROL_IO_ADDRESS_4B; + chip->ctl_val[smc_read] |= CONTROL_IO_ADDRESS_4B; +} + +static int aspeed_smc_chip_setup_init(struct aspeed_smc_chip *chip, + struct resource *res) +{ + struct aspeed_smc_controller *controller = chip->controller; + const struct aspeed_smc_info *info = controller->info; + u32 reg, base_reg; + + /* + * Always turn on the write enable bit to allow opcodes to be + * sent in user mode. + */ + aspeed_smc_chip_enable_write(chip); + + /* The driver only supports SPI type flash */ + if (info->hastype) + aspeed_smc_chip_set_type(chip, smc_type_spi); + + /* + * Configure chip base address in memory + */ + chip->ahb_base = aspeed_smc_chip_base(chip, res); + if (!chip->ahb_base) { + dev_warn(chip->nor.dev, "CE%d window closed", chip->cs); + return -EINVAL; + } + + /* + * Get value of the inherited control register. U-Boot usually + * does some timing calibration on the FMC chip, so it's good + * to keep them. In the future, we should handle calibration + * from Linux. + */ + reg = readl(chip->ctl); + dev_dbg(controller->dev, "control register: %08x\n", reg); + + base_reg = reg & CONTROL_KEEP_MASK; + if (base_reg != reg) { + dev_dbg(controller->dev, + "control register changed to: %08x\n", + base_reg); + } + chip->ctl_val[smc_base] = base_reg; + + /* + * Retain the prior value of the control register as the + * default if it was normal access mode. Otherwise start with + * the sanitized base value set to read mode. + */ + if ((reg & CONTROL_COMMAND_MODE_MASK) == + CONTROL_COMMAND_MODE_NORMAL) + chip->ctl_val[smc_read] = reg; + else + chip->ctl_val[smc_read] = chip->ctl_val[smc_base] | + CONTROL_COMMAND_MODE_NORMAL; + + dev_dbg(controller->dev, "default control register: %08x\n", + chip->ctl_val[smc_read]); + return 0; +} + +static int aspeed_smc_chip_setup_finish(struct aspeed_smc_chip *chip) +{ + struct aspeed_smc_controller *controller = chip->controller; + const struct aspeed_smc_info *info = controller->info; + u32 cmd; + + if (chip->nor.addr_width == 4 && info->set_4b) + info->set_4b(chip); + + /* This is for direct AHB access when using Command Mode. */ + chip->ahb_window_size = aspeed_smc_chip_set_segment(chip); + + /* + * base mode has not been optimized yet. use it for writes. + */ + chip->ctl_val[smc_write] = chip->ctl_val[smc_base] | + chip->nor.program_opcode << CONTROL_COMMAND_SHIFT | + CONTROL_COMMAND_MODE_WRITE; + + dev_dbg(controller->dev, "write control register: %08x\n", + chip->ctl_val[smc_write]); + + /* + * TODO: Adjust clocks if fast read is supported and interpret + * SPI NOR flags to adjust controller settings. + */ + if (chip->nor.read_proto == SNOR_PROTO_1_1_1) { + if (chip->nor.read_dummy == 0) + cmd = CONTROL_COMMAND_MODE_NORMAL; + else + cmd = CONTROL_COMMAND_MODE_FREAD; + } else { + dev_err(chip->nor.dev, "unsupported SPI read mode\n"); + return -EINVAL; + } + + chip->ctl_val[smc_read] |= cmd | + CONTROL_IO_DUMMY_SET(chip->nor.read_dummy / 8); + + dev_dbg(controller->dev, "base control register: %08x\n", + chip->ctl_val[smc_read]); + return 0; +} + +static const struct spi_nor_controller_ops aspeed_smc_controller_ops = { + .prepare = aspeed_smc_prep, + .unprepare = aspeed_smc_unprep, + .read_reg = aspeed_smc_read_reg, + .write_reg = aspeed_smc_write_reg, + .read = aspeed_smc_read_user, + .write = aspeed_smc_write_user, +}; + +static int aspeed_smc_setup_flash(struct aspeed_smc_controller *controller, + struct device_node *np, struct resource *r) +{ + const struct spi_nor_hwcaps hwcaps = { + .mask = SNOR_HWCAPS_READ | + SNOR_HWCAPS_READ_FAST | + SNOR_HWCAPS_PP, + }; + const struct aspeed_smc_info *info = controller->info; + struct device *dev = controller->dev; + struct device_node *child; + unsigned int cs; + int ret = -ENODEV; + + for_each_available_child_of_node(np, child) { + struct aspeed_smc_chip *chip; + struct spi_nor *nor; + struct mtd_info *mtd; + + /* This driver does not support NAND or NOR flash devices. */ + if (!of_device_is_compatible(child, "jedec,spi-nor")) + continue; + + ret = of_property_read_u32(child, "reg", &cs); + if (ret) { + dev_err(dev, "Couldn't not read chip select.\n"); + break; + } + + if (cs >= info->nce) { + dev_err(dev, "Chip select %d out of range.\n", + cs); + ret = -ERANGE; + break; + } + + if (controller->chips[cs]) { + dev_err(dev, "Chip select %d already in use by %s\n", + cs, dev_name(controller->chips[cs]->nor.dev)); + ret = -EBUSY; + break; + } + + chip = devm_kzalloc(controller->dev, sizeof(*chip), GFP_KERNEL); + if (!chip) { + ret = -ENOMEM; + break; + } + + chip->controller = controller; + chip->ctl = controller->regs + info->ctl0 + cs * 4; + chip->cs = cs; + + nor = &chip->nor; + mtd = &nor->mtd; + + nor->dev = dev; + nor->priv = chip; + spi_nor_set_flash_node(nor, child); + nor->controller_ops = &aspeed_smc_controller_ops; + + ret = aspeed_smc_chip_setup_init(chip, r); + if (ret) + break; + + /* + * TODO: Add support for Dual and Quad SPI protocols + * attach when board support is present as determined + * by of property. + */ + ret = spi_nor_scan(nor, NULL, &hwcaps); + if (ret) + break; + + ret = aspeed_smc_chip_setup_finish(chip); + if (ret) + break; + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) + break; + + controller->chips[cs] = chip; + } + + if (ret) { + of_node_put(child); + aspeed_smc_unregister(controller); + } + + return ret; +} + +static int aspeed_smc_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct device *dev = &pdev->dev; + struct aspeed_smc_controller *controller; + const struct of_device_id *match; + const struct aspeed_smc_info *info; + struct resource *res; + int ret; + + match = of_match_device(aspeed_smc_matches, &pdev->dev); + if (!match || !match->data) + return -ENODEV; + info = match->data; + + controller = devm_kzalloc(&pdev->dev, + struct_size(controller, chips, info->nce), + GFP_KERNEL); + if (!controller) + return -ENOMEM; + controller->info = info; + controller->dev = dev; + + mutex_init(&controller->mutex); + platform_set_drvdata(pdev, controller); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + controller->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(controller->regs)) + return PTR_ERR(controller->regs); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + controller->ahb_base = devm_ioremap_resource(dev, res); + if (IS_ERR(controller->ahb_base)) + return PTR_ERR(controller->ahb_base); + + controller->ahb_window_size = resource_size(res); + + ret = aspeed_smc_setup_flash(controller, np, res); + if (ret) + dev_err(dev, "Aspeed SMC probe failed %d\n", ret); + + return ret; +} + +static struct platform_driver aspeed_smc_driver = { + .probe = aspeed_smc_probe, + .remove = aspeed_smc_remove, + .driver = { + .name = DEVICE_NAME, + .of_match_table = aspeed_smc_matches, + } +}; + +module_platform_driver(aspeed_smc_driver); + +MODULE_DESCRIPTION("ASPEED Static Memory Controller Driver"); +MODULE_AUTHOR("Cedric Le Goater <clg@kaod.org>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/spi-nor/controllers/hisi-sfc.c b/drivers/mtd/spi-nor/controllers/hisi-sfc.c new file mode 100644 index 000000000..fd2c19a04 --- /dev/null +++ b/drivers/mtd/spi-nor/controllers/hisi-sfc.c @@ -0,0 +1,500 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * HiSilicon FMC SPI NOR flash controller driver + * + * Copyright (c) 2015-2016 HiSilicon Technologies Co., Ltd. + */ +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/spi-nor.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +/* Hardware register offsets and field definitions */ +#define FMC_CFG 0x00 +#define FMC_CFG_OP_MODE_MASK BIT_MASK(0) +#define FMC_CFG_OP_MODE_BOOT 0 +#define FMC_CFG_OP_MODE_NORMAL 1 +#define FMC_CFG_FLASH_SEL(type) (((type) & 0x3) << 1) +#define FMC_CFG_FLASH_SEL_MASK 0x6 +#define FMC_ECC_TYPE(type) (((type) & 0x7) << 5) +#define FMC_ECC_TYPE_MASK GENMASK(7, 5) +#define SPI_NOR_ADDR_MODE_MASK BIT_MASK(10) +#define SPI_NOR_ADDR_MODE_3BYTES (0x0 << 10) +#define SPI_NOR_ADDR_MODE_4BYTES (0x1 << 10) +#define FMC_GLOBAL_CFG 0x04 +#define FMC_GLOBAL_CFG_WP_ENABLE BIT(6) +#define FMC_SPI_TIMING_CFG 0x08 +#define TIMING_CFG_TCSH(nr) (((nr) & 0xf) << 8) +#define TIMING_CFG_TCSS(nr) (((nr) & 0xf) << 4) +#define TIMING_CFG_TSHSL(nr) ((nr) & 0xf) +#define CS_HOLD_TIME 0x6 +#define CS_SETUP_TIME 0x6 +#define CS_DESELECT_TIME 0xf +#define FMC_INT 0x18 +#define FMC_INT_OP_DONE BIT(0) +#define FMC_INT_CLR 0x20 +#define FMC_CMD 0x24 +#define FMC_CMD_CMD1(cmd) ((cmd) & 0xff) +#define FMC_ADDRL 0x2c +#define FMC_OP_CFG 0x30 +#define OP_CFG_FM_CS(cs) ((cs) << 11) +#define OP_CFG_MEM_IF_TYPE(type) (((type) & 0x7) << 7) +#define OP_CFG_ADDR_NUM(addr) (((addr) & 0x7) << 4) +#define OP_CFG_DUMMY_NUM(dummy) ((dummy) & 0xf) +#define FMC_DATA_NUM 0x38 +#define FMC_DATA_NUM_CNT(cnt) ((cnt) & GENMASK(13, 0)) +#define FMC_OP 0x3c +#define FMC_OP_DUMMY_EN BIT(8) +#define FMC_OP_CMD1_EN BIT(7) +#define FMC_OP_ADDR_EN BIT(6) +#define FMC_OP_WRITE_DATA_EN BIT(5) +#define FMC_OP_READ_DATA_EN BIT(2) +#define FMC_OP_READ_STATUS_EN BIT(1) +#define FMC_OP_REG_OP_START BIT(0) +#define FMC_DMA_LEN 0x40 +#define FMC_DMA_LEN_SET(len) ((len) & GENMASK(27, 0)) +#define FMC_DMA_SADDR_D0 0x4c +#define HIFMC_DMA_MAX_LEN (4096) +#define HIFMC_DMA_MASK (HIFMC_DMA_MAX_LEN - 1) +#define FMC_OP_DMA 0x68 +#define OP_CTRL_RD_OPCODE(code) (((code) & 0xff) << 16) +#define OP_CTRL_WR_OPCODE(code) (((code) & 0xff) << 8) +#define OP_CTRL_RW_OP(op) ((op) << 1) +#define OP_CTRL_DMA_OP_READY BIT(0) +#define FMC_OP_READ 0x0 +#define FMC_OP_WRITE 0x1 +#define FMC_WAIT_TIMEOUT 1000000 + +enum hifmc_iftype { + IF_TYPE_STD, + IF_TYPE_DUAL, + IF_TYPE_DIO, + IF_TYPE_QUAD, + IF_TYPE_QIO, +}; + +struct hifmc_priv { + u32 chipselect; + u32 clkrate; + struct hifmc_host *host; +}; + +#define HIFMC_MAX_CHIP_NUM 2 +struct hifmc_host { + struct device *dev; + struct mutex lock; + + void __iomem *regbase; + void __iomem *iobase; + struct clk *clk; + void *buffer; + dma_addr_t dma_buffer; + + struct spi_nor *nor[HIFMC_MAX_CHIP_NUM]; + u32 num_chip; +}; + +static inline int hisi_spi_nor_wait_op_finish(struct hifmc_host *host) +{ + u32 reg; + + return readl_poll_timeout(host->regbase + FMC_INT, reg, + (reg & FMC_INT_OP_DONE), 0, FMC_WAIT_TIMEOUT); +} + +static int hisi_spi_nor_get_if_type(enum spi_nor_protocol proto) +{ + enum hifmc_iftype if_type; + + switch (proto) { + case SNOR_PROTO_1_1_2: + if_type = IF_TYPE_DUAL; + break; + case SNOR_PROTO_1_2_2: + if_type = IF_TYPE_DIO; + break; + case SNOR_PROTO_1_1_4: + if_type = IF_TYPE_QUAD; + break; + case SNOR_PROTO_1_4_4: + if_type = IF_TYPE_QIO; + break; + case SNOR_PROTO_1_1_1: + default: + if_type = IF_TYPE_STD; + break; + } + + return if_type; +} + +static void hisi_spi_nor_init(struct hifmc_host *host) +{ + u32 reg; + + reg = TIMING_CFG_TCSH(CS_HOLD_TIME) + | TIMING_CFG_TCSS(CS_SETUP_TIME) + | TIMING_CFG_TSHSL(CS_DESELECT_TIME); + writel(reg, host->regbase + FMC_SPI_TIMING_CFG); +} + +static int hisi_spi_nor_prep(struct spi_nor *nor) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + int ret; + + mutex_lock(&host->lock); + + ret = clk_set_rate(host->clk, priv->clkrate); + if (ret) + goto out; + + ret = clk_prepare_enable(host->clk); + if (ret) + goto out; + + return 0; + +out: + mutex_unlock(&host->lock); + return ret; +} + +static void hisi_spi_nor_unprep(struct spi_nor *nor) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + + clk_disable_unprepare(host->clk); + mutex_unlock(&host->lock); +} + +static int hisi_spi_nor_op_reg(struct spi_nor *nor, + u8 opcode, size_t len, u8 optype) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + u32 reg; + + reg = FMC_CMD_CMD1(opcode); + writel(reg, host->regbase + FMC_CMD); + + reg = FMC_DATA_NUM_CNT(len); + writel(reg, host->regbase + FMC_DATA_NUM); + + reg = OP_CFG_FM_CS(priv->chipselect); + writel(reg, host->regbase + FMC_OP_CFG); + + writel(0xff, host->regbase + FMC_INT_CLR); + reg = FMC_OP_CMD1_EN | FMC_OP_REG_OP_START | optype; + writel(reg, host->regbase + FMC_OP); + + return hisi_spi_nor_wait_op_finish(host); +} + +static int hisi_spi_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, + size_t len) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + int ret; + + ret = hisi_spi_nor_op_reg(nor, opcode, len, FMC_OP_READ_DATA_EN); + if (ret) + return ret; + + memcpy_fromio(buf, host->iobase, len); + return 0; +} + +static int hisi_spi_nor_write_reg(struct spi_nor *nor, u8 opcode, + const u8 *buf, size_t len) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + + if (len) + memcpy_toio(host->iobase, buf, len); + + return hisi_spi_nor_op_reg(nor, opcode, len, FMC_OP_WRITE_DATA_EN); +} + +static int hisi_spi_nor_dma_transfer(struct spi_nor *nor, loff_t start_off, + dma_addr_t dma_buf, size_t len, u8 op_type) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + u8 if_type = 0; + u32 reg; + + reg = readl(host->regbase + FMC_CFG); + reg &= ~(FMC_CFG_OP_MODE_MASK | SPI_NOR_ADDR_MODE_MASK); + reg |= FMC_CFG_OP_MODE_NORMAL; + reg |= (nor->addr_width == 4) ? SPI_NOR_ADDR_MODE_4BYTES + : SPI_NOR_ADDR_MODE_3BYTES; + writel(reg, host->regbase + FMC_CFG); + + writel(start_off, host->regbase + FMC_ADDRL); + writel(dma_buf, host->regbase + FMC_DMA_SADDR_D0); + writel(FMC_DMA_LEN_SET(len), host->regbase + FMC_DMA_LEN); + + reg = OP_CFG_FM_CS(priv->chipselect); + if (op_type == FMC_OP_READ) + if_type = hisi_spi_nor_get_if_type(nor->read_proto); + else + if_type = hisi_spi_nor_get_if_type(nor->write_proto); + reg |= OP_CFG_MEM_IF_TYPE(if_type); + if (op_type == FMC_OP_READ) + reg |= OP_CFG_DUMMY_NUM(nor->read_dummy >> 3); + writel(reg, host->regbase + FMC_OP_CFG); + + writel(0xff, host->regbase + FMC_INT_CLR); + reg = OP_CTRL_RW_OP(op_type) | OP_CTRL_DMA_OP_READY; + reg |= (op_type == FMC_OP_READ) + ? OP_CTRL_RD_OPCODE(nor->read_opcode) + : OP_CTRL_WR_OPCODE(nor->program_opcode); + writel(reg, host->regbase + FMC_OP_DMA); + + return hisi_spi_nor_wait_op_finish(host); +} + +static ssize_t hisi_spi_nor_read(struct spi_nor *nor, loff_t from, size_t len, + u_char *read_buf) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + size_t offset; + int ret; + + for (offset = 0; offset < len; offset += HIFMC_DMA_MAX_LEN) { + size_t trans = min_t(size_t, HIFMC_DMA_MAX_LEN, len - offset); + + ret = hisi_spi_nor_dma_transfer(nor, + from + offset, host->dma_buffer, trans, FMC_OP_READ); + if (ret) { + dev_warn(nor->dev, "DMA read timeout\n"); + return ret; + } + memcpy(read_buf + offset, host->buffer, trans); + } + + return len; +} + +static ssize_t hisi_spi_nor_write(struct spi_nor *nor, loff_t to, + size_t len, const u_char *write_buf) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + size_t offset; + int ret; + + for (offset = 0; offset < len; offset += HIFMC_DMA_MAX_LEN) { + size_t trans = min_t(size_t, HIFMC_DMA_MAX_LEN, len - offset); + + memcpy(host->buffer, write_buf + offset, trans); + ret = hisi_spi_nor_dma_transfer(nor, + to + offset, host->dma_buffer, trans, FMC_OP_WRITE); + if (ret) { + dev_warn(nor->dev, "DMA write timeout\n"); + return ret; + } + } + + return len; +} + +static const struct spi_nor_controller_ops hisi_controller_ops = { + .prepare = hisi_spi_nor_prep, + .unprepare = hisi_spi_nor_unprep, + .read_reg = hisi_spi_nor_read_reg, + .write_reg = hisi_spi_nor_write_reg, + .read = hisi_spi_nor_read, + .write = hisi_spi_nor_write, +}; + +/** + * Get spi flash device information and register it as a mtd device. + */ +static int hisi_spi_nor_register(struct device_node *np, + struct hifmc_host *host) +{ + const struct spi_nor_hwcaps hwcaps = { + .mask = SNOR_HWCAPS_READ | + SNOR_HWCAPS_READ_FAST | + SNOR_HWCAPS_READ_1_1_2 | + SNOR_HWCAPS_READ_1_1_4 | + SNOR_HWCAPS_PP, + }; + struct device *dev = host->dev; + struct spi_nor *nor; + struct hifmc_priv *priv; + struct mtd_info *mtd; + int ret; + + nor = devm_kzalloc(dev, sizeof(*nor), GFP_KERNEL); + if (!nor) + return -ENOMEM; + + nor->dev = dev; + spi_nor_set_flash_node(nor, np); + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + ret = of_property_read_u32(np, "reg", &priv->chipselect); + if (ret) { + dev_err(dev, "There's no reg property for %pOF\n", + np); + return ret; + } + + ret = of_property_read_u32(np, "spi-max-frequency", + &priv->clkrate); + if (ret) { + dev_err(dev, "There's no spi-max-frequency property for %pOF\n", + np); + return ret; + } + priv->host = host; + nor->priv = priv; + nor->controller_ops = &hisi_controller_ops; + + ret = spi_nor_scan(nor, NULL, &hwcaps); + if (ret) + return ret; + + mtd = &nor->mtd; + mtd->name = np->name; + ret = mtd_device_register(mtd, NULL, 0); + if (ret) + return ret; + + host->nor[host->num_chip] = nor; + host->num_chip++; + return 0; +} + +static void hisi_spi_nor_unregister_all(struct hifmc_host *host) +{ + int i; + + for (i = 0; i < host->num_chip; i++) + mtd_device_unregister(&host->nor[i]->mtd); +} + +static int hisi_spi_nor_register_all(struct hifmc_host *host) +{ + struct device *dev = host->dev; + struct device_node *np; + int ret; + + for_each_available_child_of_node(dev->of_node, np) { + ret = hisi_spi_nor_register(np, host); + if (ret) { + of_node_put(np); + goto fail; + } + + if (host->num_chip == HIFMC_MAX_CHIP_NUM) { + dev_warn(dev, "Flash device number exceeds the maximum chipselect number\n"); + of_node_put(np); + break; + } + } + + return 0; + +fail: + hisi_spi_nor_unregister_all(host); + return ret; +} + +static int hisi_spi_nor_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct hifmc_host *host; + int ret; + + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); + if (!host) + return -ENOMEM; + + platform_set_drvdata(pdev, host); + host->dev = dev; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control"); + host->regbase = devm_ioremap_resource(dev, res); + if (IS_ERR(host->regbase)) + return PTR_ERR(host->regbase); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "memory"); + host->iobase = devm_ioremap_resource(dev, res); + if (IS_ERR(host->iobase)) + return PTR_ERR(host->iobase); + + host->clk = devm_clk_get(dev, NULL); + if (IS_ERR(host->clk)) + return PTR_ERR(host->clk); + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) { + dev_warn(dev, "Unable to set dma mask\n"); + return ret; + } + + host->buffer = dmam_alloc_coherent(dev, HIFMC_DMA_MAX_LEN, + &host->dma_buffer, GFP_KERNEL); + if (!host->buffer) + return -ENOMEM; + + ret = clk_prepare_enable(host->clk); + if (ret) + return ret; + + mutex_init(&host->lock); + hisi_spi_nor_init(host); + ret = hisi_spi_nor_register_all(host); + if (ret) + mutex_destroy(&host->lock); + + clk_disable_unprepare(host->clk); + return ret; +} + +static int hisi_spi_nor_remove(struct platform_device *pdev) +{ + struct hifmc_host *host = platform_get_drvdata(pdev); + + hisi_spi_nor_unregister_all(host); + mutex_destroy(&host->lock); + return 0; +} + +static const struct of_device_id hisi_spi_nor_dt_ids[] = { + { .compatible = "hisilicon,fmc-spi-nor"}, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, hisi_spi_nor_dt_ids); + +static struct platform_driver hisi_spi_nor_driver = { + .driver = { + .name = "hisi-sfc", + .of_match_table = hisi_spi_nor_dt_ids, + }, + .probe = hisi_spi_nor_probe, + .remove = hisi_spi_nor_remove, +}; +module_platform_driver(hisi_spi_nor_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("HiSilicon SPI Nor Flash Controller Driver"); diff --git a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c new file mode 100644 index 000000000..8a3c1f3c2 --- /dev/null +++ b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Intel PCH/PCU SPI flash PCI driver. + * + * Copyright (C) 2016, Intel Corporation + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/ioport.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "intel-spi.h" + +#define BCR 0xdc +#define BCR_WPD BIT(0) + +static bool intel_spi_pci_set_writeable(void __iomem *base, void *data) +{ + struct pci_dev *pdev = data; + u32 bcr; + + /* Try to make the chip read/write */ + pci_read_config_dword(pdev, BCR, &bcr); + if (!(bcr & BCR_WPD)) { + bcr |= BCR_WPD; + pci_write_config_dword(pdev, BCR, bcr); + pci_read_config_dword(pdev, BCR, &bcr); + } + + return bcr & BCR_WPD; +} + +static const struct intel_spi_boardinfo bxt_info = { + .type = INTEL_SPI_BXT, + .set_writeable = intel_spi_pci_set_writeable, +}; + +static const struct intel_spi_boardinfo cnl_info = { + .type = INTEL_SPI_CNL, + .set_writeable = intel_spi_pci_set_writeable, +}; + +static int intel_spi_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct intel_spi_boardinfo *info; + struct intel_spi *ispi; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + info = devm_kmemdup(&pdev->dev, (void *)id->driver_data, sizeof(*info), + GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->data = pdev; + ispi = intel_spi_probe(&pdev->dev, &pdev->resource[0], info); + if (IS_ERR(ispi)) + return PTR_ERR(ispi); + + pci_set_drvdata(pdev, ispi); + return 0; +} + +static void intel_spi_pci_remove(struct pci_dev *pdev) +{ + intel_spi_remove(pci_get_drvdata(pdev)); +} + +static const struct pci_device_id intel_spi_pci_ids[] = { + { PCI_VDEVICE(INTEL, 0x02a4), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x06a4), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x1bca), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x43a4), (unsigned long)&cnl_info }, + { PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info }, + { PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info }, + { PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&bxt_info }, + { }, +}; +MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids); + +static struct pci_driver intel_spi_pci_driver = { + .name = "intel-spi", + .id_table = intel_spi_pci_ids, + .probe = intel_spi_pci_probe, + .remove = intel_spi_pci_remove, +}; + +module_pci_driver(intel_spi_pci_driver); + +MODULE_DESCRIPTION("Intel PCH/PCU SPI flash PCI driver"); +MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/spi-nor/controllers/intel-spi-platform.c b/drivers/mtd/spi-nor/controllers/intel-spi-platform.c new file mode 100644 index 000000000..f80f1086f --- /dev/null +++ b/drivers/mtd/spi-nor/controllers/intel-spi-platform.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Intel PCH/PCU SPI flash platform driver. + * + * Copyright (C) 2016, Intel Corporation + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/ioport.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#include "intel-spi.h" + +static int intel_spi_platform_probe(struct platform_device *pdev) +{ + struct intel_spi_boardinfo *info; + struct intel_spi *ispi; + struct resource *mem; + + info = dev_get_platdata(&pdev->dev); + if (!info) + return -EINVAL; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ispi = intel_spi_probe(&pdev->dev, mem, info); + if (IS_ERR(ispi)) + return PTR_ERR(ispi); + + platform_set_drvdata(pdev, ispi); + return 0; +} + +static int intel_spi_platform_remove(struct platform_device *pdev) +{ + struct intel_spi *ispi = platform_get_drvdata(pdev); + + return intel_spi_remove(ispi); +} + +static struct platform_driver intel_spi_platform_driver = { + .probe = intel_spi_platform_probe, + .remove = intel_spi_platform_remove, + .driver = { + .name = "intel-spi", + }, +}; + +module_platform_driver(intel_spi_platform_driver); + +MODULE_DESCRIPTION("Intel PCH/PCU SPI flash platform driver"); +MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:intel-spi"); diff --git a/drivers/mtd/spi-nor/controllers/intel-spi.c b/drivers/mtd/spi-nor/controllers/intel-spi.c new file mode 100644 index 000000000..6c802db6b --- /dev/null +++ b/drivers/mtd/spi-nor/controllers/intel-spi.c @@ -0,0 +1,966 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Intel PCH/PCU SPI flash driver. + * + * Copyright (C) 2016, Intel Corporation + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/err.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/sizes.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> +#include <linux/mtd/spi-nor.h> +#include <linux/platform_data/intel-spi.h> + +#include "intel-spi.h" + +/* Offsets are from @ispi->base */ +#define BFPREG 0x00 + +#define HSFSTS_CTL 0x04 +#define HSFSTS_CTL_FSMIE BIT(31) +#define HSFSTS_CTL_FDBC_SHIFT 24 +#define HSFSTS_CTL_FDBC_MASK (0x3f << HSFSTS_CTL_FDBC_SHIFT) + +#define HSFSTS_CTL_FCYCLE_SHIFT 17 +#define HSFSTS_CTL_FCYCLE_MASK (0x0f << HSFSTS_CTL_FCYCLE_SHIFT) +/* HW sequencer opcodes */ +#define HSFSTS_CTL_FCYCLE_READ (0x00 << HSFSTS_CTL_FCYCLE_SHIFT) +#define HSFSTS_CTL_FCYCLE_WRITE (0x02 << HSFSTS_CTL_FCYCLE_SHIFT) +#define HSFSTS_CTL_FCYCLE_ERASE (0x03 << HSFSTS_CTL_FCYCLE_SHIFT) +#define HSFSTS_CTL_FCYCLE_ERASE_64K (0x04 << HSFSTS_CTL_FCYCLE_SHIFT) +#define HSFSTS_CTL_FCYCLE_RDID (0x06 << HSFSTS_CTL_FCYCLE_SHIFT) +#define HSFSTS_CTL_FCYCLE_WRSR (0x07 << HSFSTS_CTL_FCYCLE_SHIFT) +#define HSFSTS_CTL_FCYCLE_RDSR (0x08 << HSFSTS_CTL_FCYCLE_SHIFT) + +#define HSFSTS_CTL_FGO BIT(16) +#define HSFSTS_CTL_FLOCKDN BIT(15) +#define HSFSTS_CTL_FDV BIT(14) +#define HSFSTS_CTL_SCIP BIT(5) +#define HSFSTS_CTL_AEL BIT(2) +#define HSFSTS_CTL_FCERR BIT(1) +#define HSFSTS_CTL_FDONE BIT(0) + +#define FADDR 0x08 +#define DLOCK 0x0c +#define FDATA(n) (0x10 + ((n) * 4)) + +#define FRACC 0x50 + +#define FREG(n) (0x54 + ((n) * 4)) +#define FREG_BASE_MASK GENMASK(14, 0) +#define FREG_LIMIT_SHIFT 16 +#define FREG_LIMIT_MASK GENMASK(30, 16) + +/* Offset is from @ispi->pregs */ +#define PR(n) ((n) * 4) +#define PR_WPE BIT(31) +#define PR_LIMIT_SHIFT 16 +#define PR_LIMIT_MASK GENMASK(30, 16) +#define PR_RPE BIT(15) +#define PR_BASE_MASK GENMASK(14, 0) + +/* Offsets are from @ispi->sregs */ +#define SSFSTS_CTL 0x00 +#define SSFSTS_CTL_FSMIE BIT(23) +#define SSFSTS_CTL_DS BIT(22) +#define SSFSTS_CTL_DBC_SHIFT 16 +#define SSFSTS_CTL_SPOP BIT(11) +#define SSFSTS_CTL_ACS BIT(10) +#define SSFSTS_CTL_SCGO BIT(9) +#define SSFSTS_CTL_COP_SHIFT 12 +#define SSFSTS_CTL_FRS BIT(7) +#define SSFSTS_CTL_DOFRS BIT(6) +#define SSFSTS_CTL_AEL BIT(4) +#define SSFSTS_CTL_FCERR BIT(3) +#define SSFSTS_CTL_FDONE BIT(2) +#define SSFSTS_CTL_SCIP BIT(0) + +#define PREOP_OPTYPE 0x04 +#define OPMENU0 0x08 +#define OPMENU1 0x0c + +#define OPTYPE_READ_NO_ADDR 0 +#define OPTYPE_WRITE_NO_ADDR 1 +#define OPTYPE_READ_WITH_ADDR 2 +#define OPTYPE_WRITE_WITH_ADDR 3 + +/* CPU specifics */ +#define BYT_PR 0x74 +#define BYT_SSFSTS_CTL 0x90 +#define BYT_BCR 0xfc +#define BYT_BCR_WPD BIT(0) +#define BYT_FREG_NUM 5 +#define BYT_PR_NUM 5 + +#define LPT_PR 0x74 +#define LPT_SSFSTS_CTL 0x90 +#define LPT_FREG_NUM 5 +#define LPT_PR_NUM 5 + +#define BXT_PR 0x84 +#define BXT_SSFSTS_CTL 0xa0 +#define BXT_FREG_NUM 12 +#define BXT_PR_NUM 6 + +#define CNL_PR 0x84 +#define CNL_FREG_NUM 6 +#define CNL_PR_NUM 5 + +#define LVSCC 0xc4 +#define UVSCC 0xc8 +#define ERASE_OPCODE_SHIFT 8 +#define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT) +#define ERASE_64K_OPCODE_SHIFT 16 +#define ERASE_64K_OPCODE_MASK (0xff << ERASE_64K_OPCODE_SHIFT) + +#define INTEL_SPI_TIMEOUT 5000 /* ms */ +#define INTEL_SPI_FIFO_SZ 64 + +/** + * struct intel_spi - Driver private data + * @dev: Device pointer + * @info: Pointer to board specific info + * @nor: SPI NOR layer structure + * @base: Beginning of MMIO space + * @pregs: Start of protection registers + * @sregs: Start of software sequencer registers + * @nregions: Maximum number of regions + * @pr_num: Maximum number of protected range registers + * @locked: Is SPI setting locked + * @swseq_reg: Use SW sequencer in register reads/writes + * @swseq_erase: Use SW sequencer in erase operation + * @erase_64k: 64k erase supported + * @atomic_preopcode: Holds preopcode when atomic sequence is requested + * @opcodes: Opcodes which are supported. This are programmed by BIOS + * before it locks down the controller. + */ +struct intel_spi { + struct device *dev; + const struct intel_spi_boardinfo *info; + struct spi_nor nor; + void __iomem *base; + void __iomem *pregs; + void __iomem *sregs; + size_t nregions; + size_t pr_num; + bool locked; + bool swseq_reg; + bool swseq_erase; + bool erase_64k; + u8 atomic_preopcode; + u8 opcodes[8]; +}; + +static bool writeable; +module_param(writeable, bool, 0); +MODULE_PARM_DESC(writeable, "Enable write access to SPI flash chip (default=0)"); + +static void intel_spi_dump_regs(struct intel_spi *ispi) +{ + u32 value; + int i; + + dev_dbg(ispi->dev, "BFPREG=0x%08x\n", readl(ispi->base + BFPREG)); + + value = readl(ispi->base + HSFSTS_CTL); + dev_dbg(ispi->dev, "HSFSTS_CTL=0x%08x\n", value); + if (value & HSFSTS_CTL_FLOCKDN) + dev_dbg(ispi->dev, "-> Locked\n"); + + dev_dbg(ispi->dev, "FADDR=0x%08x\n", readl(ispi->base + FADDR)); + dev_dbg(ispi->dev, "DLOCK=0x%08x\n", readl(ispi->base + DLOCK)); + + for (i = 0; i < 16; i++) + dev_dbg(ispi->dev, "FDATA(%d)=0x%08x\n", + i, readl(ispi->base + FDATA(i))); + + dev_dbg(ispi->dev, "FRACC=0x%08x\n", readl(ispi->base + FRACC)); + + for (i = 0; i < ispi->nregions; i++) + dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i, + readl(ispi->base + FREG(i))); + for (i = 0; i < ispi->pr_num; i++) + dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i, + readl(ispi->pregs + PR(i))); + + if (ispi->sregs) { + value = readl(ispi->sregs + SSFSTS_CTL); + dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value); + dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n", + readl(ispi->sregs + PREOP_OPTYPE)); + dev_dbg(ispi->dev, "OPMENU0=0x%08x\n", + readl(ispi->sregs + OPMENU0)); + dev_dbg(ispi->dev, "OPMENU1=0x%08x\n", + readl(ispi->sregs + OPMENU1)); + } + + if (ispi->info->type == INTEL_SPI_BYT) + dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR)); + + dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC)); + dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC)); + + dev_dbg(ispi->dev, "Protected regions:\n"); + for (i = 0; i < ispi->pr_num; i++) { + u32 base, limit; + + value = readl(ispi->pregs + PR(i)); + if (!(value & (PR_WPE | PR_RPE))) + continue; + + limit = (value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT; + base = value & PR_BASE_MASK; + + dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x [%c%c]\n", + i, base << 12, (limit << 12) | 0xfff, + value & PR_WPE ? 'W' : '.', + value & PR_RPE ? 'R' : '.'); + } + + dev_dbg(ispi->dev, "Flash regions:\n"); + for (i = 0; i < ispi->nregions; i++) { + u32 region, base, limit; + + region = readl(ispi->base + FREG(i)); + base = region & FREG_BASE_MASK; + limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT; + + if (base >= limit || (i > 0 && limit == 0)) + dev_dbg(ispi->dev, " %02d disabled\n", i); + else + dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x\n", + i, base << 12, (limit << 12) | 0xfff); + } + + dev_dbg(ispi->dev, "Using %cW sequencer for register access\n", + ispi->swseq_reg ? 'S' : 'H'); + dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n", + ispi->swseq_erase ? 'S' : 'H'); +} + +/* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */ +static int intel_spi_read_block(struct intel_spi *ispi, void *buf, size_t size) +{ + size_t bytes; + int i = 0; + + if (size > INTEL_SPI_FIFO_SZ) + return -EINVAL; + + while (size > 0) { + bytes = min_t(size_t, size, 4); + memcpy_fromio(buf, ispi->base + FDATA(i), bytes); + size -= bytes; + buf += bytes; + i++; + } + + return 0; +} + +/* Writes max INTEL_SPI_FIFO_SZ bytes to the device fifo */ +static int intel_spi_write_block(struct intel_spi *ispi, const void *buf, + size_t size) +{ + size_t bytes; + int i = 0; + + if (size > INTEL_SPI_FIFO_SZ) + return -EINVAL; + + while (size > 0) { + bytes = min_t(size_t, size, 4); + memcpy_toio(ispi->base + FDATA(i), buf, bytes); + size -= bytes; + buf += bytes; + i++; + } + + return 0; +} + +static int intel_spi_wait_hw_busy(struct intel_spi *ispi) +{ + u32 val; + + return readl_poll_timeout(ispi->base + HSFSTS_CTL, val, + !(val & HSFSTS_CTL_SCIP), 0, + INTEL_SPI_TIMEOUT * 1000); +} + +static int intel_spi_wait_sw_busy(struct intel_spi *ispi) +{ + u32 val; + + return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val, + !(val & SSFSTS_CTL_SCIP), 0, + INTEL_SPI_TIMEOUT * 1000); +} + +static bool intel_spi_set_writeable(struct intel_spi *ispi) +{ + if (!ispi->info->set_writeable) + return false; + + return ispi->info->set_writeable(ispi->base, ispi->info->data); +} + +static int intel_spi_init(struct intel_spi *ispi) +{ + u32 opmenu0, opmenu1, lvscc, uvscc, val; + int i; + + switch (ispi->info->type) { + case INTEL_SPI_BYT: + ispi->sregs = ispi->base + BYT_SSFSTS_CTL; + ispi->pregs = ispi->base + BYT_PR; + ispi->nregions = BYT_FREG_NUM; + ispi->pr_num = BYT_PR_NUM; + ispi->swseq_reg = true; + break; + + case INTEL_SPI_LPT: + ispi->sregs = ispi->base + LPT_SSFSTS_CTL; + ispi->pregs = ispi->base + LPT_PR; + ispi->nregions = LPT_FREG_NUM; + ispi->pr_num = LPT_PR_NUM; + ispi->swseq_reg = true; + break; + + case INTEL_SPI_BXT: + ispi->sregs = ispi->base + BXT_SSFSTS_CTL; + ispi->pregs = ispi->base + BXT_PR; + ispi->nregions = BXT_FREG_NUM; + ispi->pr_num = BXT_PR_NUM; + ispi->erase_64k = true; + break; + + case INTEL_SPI_CNL: + ispi->sregs = NULL; + ispi->pregs = ispi->base + CNL_PR; + ispi->nregions = CNL_FREG_NUM; + ispi->pr_num = CNL_PR_NUM; + break; + + default: + return -EINVAL; + } + + /* Try to disable write protection if user asked to do so */ + if (writeable && !intel_spi_set_writeable(ispi)) { + dev_warn(ispi->dev, "can't disable chip write protection\n"); + writeable = false; + } + + /* Disable #SMI generation from HW sequencer */ + val = readl(ispi->base + HSFSTS_CTL); + val &= ~HSFSTS_CTL_FSMIE; + writel(val, ispi->base + HSFSTS_CTL); + + /* + * Determine whether erase operation should use HW or SW sequencer. + * + * The HW sequencer has a predefined list of opcodes, with only the + * erase opcode being programmable in LVSCC and UVSCC registers. + * If these registers don't contain a valid erase opcode, erase + * cannot be done using HW sequencer. + */ + lvscc = readl(ispi->base + LVSCC); + uvscc = readl(ispi->base + UVSCC); + if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK)) + ispi->swseq_erase = true; + /* SPI controller on Intel BXT supports 64K erase opcode */ + if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase) + if (!(lvscc & ERASE_64K_OPCODE_MASK) || + !(uvscc & ERASE_64K_OPCODE_MASK)) + ispi->erase_64k = false; + + if (ispi->sregs == NULL && (ispi->swseq_reg || ispi->swseq_erase)) { + dev_err(ispi->dev, "software sequencer not supported, but required\n"); + return -EINVAL; + } + + /* + * Some controllers can only do basic operations using hardware + * sequencer. All other operations are supposed to be carried out + * using software sequencer. + */ + if (ispi->swseq_reg) { + /* Disable #SMI generation from SW sequencer */ + val = readl(ispi->sregs + SSFSTS_CTL); + val &= ~SSFSTS_CTL_FSMIE; + writel(val, ispi->sregs + SSFSTS_CTL); + } + + /* Check controller's lock status */ + val = readl(ispi->base + HSFSTS_CTL); + ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN); + + if (ispi->locked && ispi->sregs) { + /* + * BIOS programs allowed opcodes and then locks down the + * register. So read back what opcodes it decided to support. + * That's the set we are going to support as well. + */ + opmenu0 = readl(ispi->sregs + OPMENU0); + opmenu1 = readl(ispi->sregs + OPMENU1); + + if (opmenu0 && opmenu1) { + for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) { + ispi->opcodes[i] = opmenu0 >> i * 8; + ispi->opcodes[i + 4] = opmenu1 >> i * 8; + } + } + } + + intel_spi_dump_regs(ispi); + + return 0; +} + +static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype) +{ + int i; + int preop; + + if (ispi->locked) { + for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++) + if (ispi->opcodes[i] == opcode) + return i; + + return -EINVAL; + } + + /* The lock is off, so just use index 0 */ + writel(opcode, ispi->sregs + OPMENU0); + preop = readw(ispi->sregs + PREOP_OPTYPE); + writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE); + + return 0; +} + +static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, size_t len) +{ + u32 val, status; + int ret; + + val = readl(ispi->base + HSFSTS_CTL); + val &= ~(HSFSTS_CTL_FCYCLE_MASK | HSFSTS_CTL_FDBC_MASK); + + switch (opcode) { + case SPINOR_OP_RDID: + val |= HSFSTS_CTL_FCYCLE_RDID; + break; + case SPINOR_OP_WRSR: + val |= HSFSTS_CTL_FCYCLE_WRSR; + break; + case SPINOR_OP_RDSR: + val |= HSFSTS_CTL_FCYCLE_RDSR; + break; + default: + return -EINVAL; + } + + if (len > INTEL_SPI_FIFO_SZ) + return -EINVAL; + + val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT; + val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; + val |= HSFSTS_CTL_FGO; + writel(val, ispi->base + HSFSTS_CTL); + + ret = intel_spi_wait_hw_busy(ispi); + if (ret) + return ret; + + status = readl(ispi->base + HSFSTS_CTL); + if (status & HSFSTS_CTL_FCERR) + return -EIO; + else if (status & HSFSTS_CTL_AEL) + return -EACCES; + + return 0; +} + +static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len, + int optype) +{ + u32 val = 0, status; + u8 atomic_preopcode; + int ret; + + ret = intel_spi_opcode_index(ispi, opcode, optype); + if (ret < 0) + return ret; + + if (len > INTEL_SPI_FIFO_SZ) + return -EINVAL; + + /* + * Always clear it after each SW sequencer operation regardless + * of whether it is successful or not. + */ + atomic_preopcode = ispi->atomic_preopcode; + ispi->atomic_preopcode = 0; + + /* Only mark 'Data Cycle' bit when there is data to be transferred */ + if (len > 0) + val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS; + val |= ret << SSFSTS_CTL_COP_SHIFT; + val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE; + val |= SSFSTS_CTL_SCGO; + if (atomic_preopcode) { + u16 preop; + + switch (optype) { + case OPTYPE_WRITE_NO_ADDR: + case OPTYPE_WRITE_WITH_ADDR: + /* Pick matching preopcode for the atomic sequence */ + preop = readw(ispi->sregs + PREOP_OPTYPE); + if ((preop & 0xff) == atomic_preopcode) + ; /* Do nothing */ + else if ((preop >> 8) == atomic_preopcode) + val |= SSFSTS_CTL_SPOP; + else + return -EINVAL; + + /* Enable atomic sequence */ + val |= SSFSTS_CTL_ACS; + break; + + default: + return -EINVAL; + } + + } + writel(val, ispi->sregs + SSFSTS_CTL); + + ret = intel_spi_wait_sw_busy(ispi); + if (ret) + return ret; + + status = readl(ispi->sregs + SSFSTS_CTL); + if (status & SSFSTS_CTL_FCERR) + return -EIO; + else if (status & SSFSTS_CTL_AEL) + return -EACCES; + + return 0; +} + +static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, + size_t len) +{ + struct intel_spi *ispi = nor->priv; + int ret; + + /* Address of the first chip */ + writel(0, ispi->base + FADDR); + + if (ispi->swseq_reg) + ret = intel_spi_sw_cycle(ispi, opcode, len, + OPTYPE_READ_NO_ADDR); + else + ret = intel_spi_hw_cycle(ispi, opcode, len); + + if (ret) + return ret; + + return intel_spi_read_block(ispi, buf, len); +} + +static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf, + size_t len) +{ + struct intel_spi *ispi = nor->priv; + int ret; + + /* + * This is handled with atomic operation and preop code in Intel + * controller so we only verify that it is available. If the + * controller is not locked, program the opcode to the PREOP + * register for later use. + * + * When hardware sequencer is used there is no need to program + * any opcodes (it handles them automatically as part of a command). + */ + if (opcode == SPINOR_OP_WREN) { + u16 preop; + + if (!ispi->swseq_reg) + return 0; + + preop = readw(ispi->sregs + PREOP_OPTYPE); + if ((preop & 0xff) != opcode && (preop >> 8) != opcode) { + if (ispi->locked) + return -EINVAL; + writel(opcode, ispi->sregs + PREOP_OPTYPE); + } + + /* + * This enables atomic sequence on next SW sycle. Will + * be cleared after next operation. + */ + ispi->atomic_preopcode = opcode; + return 0; + } + + /* + * We hope that HW sequencer will do the right thing automatically and + * with the SW sequencer we cannot use preopcode anyway, so just ignore + * the Write Disable operation and pretend it was completed + * successfully. + */ + if (opcode == SPINOR_OP_WRDI) + return 0; + + writel(0, ispi->base + FADDR); + + /* Write the value beforehand */ + ret = intel_spi_write_block(ispi, buf, len); + if (ret) + return ret; + + if (ispi->swseq_reg) + return intel_spi_sw_cycle(ispi, opcode, len, + OPTYPE_WRITE_NO_ADDR); + return intel_spi_hw_cycle(ispi, opcode, len); +} + +static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len, + u_char *read_buf) +{ + struct intel_spi *ispi = nor->priv; + size_t block_size, retlen = 0; + u32 val, status; + ssize_t ret; + + /* + * Atomic sequence is not expected with HW sequencer reads. Make + * sure it is cleared regardless. + */ + if (WARN_ON_ONCE(ispi->atomic_preopcode)) + ispi->atomic_preopcode = 0; + + switch (nor->read_opcode) { + case SPINOR_OP_READ: + case SPINOR_OP_READ_FAST: + case SPINOR_OP_READ_4B: + case SPINOR_OP_READ_FAST_4B: + break; + default: + return -EINVAL; + } + + while (len > 0) { + block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ); + + /* Read cannot cross 4K boundary */ + block_size = min_t(loff_t, from + block_size, + round_up(from + 1, SZ_4K)) - from; + + writel(from, ispi->base + FADDR); + + val = readl(ispi->base + HSFSTS_CTL); + val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK); + val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; + val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT; + val |= HSFSTS_CTL_FCYCLE_READ; + val |= HSFSTS_CTL_FGO; + writel(val, ispi->base + HSFSTS_CTL); + + ret = intel_spi_wait_hw_busy(ispi); + if (ret) + return ret; + + status = readl(ispi->base + HSFSTS_CTL); + if (status & HSFSTS_CTL_FCERR) + ret = -EIO; + else if (status & HSFSTS_CTL_AEL) + ret = -EACCES; + + if (ret < 0) { + dev_err(ispi->dev, "read error: %llx: %#x\n", from, + status); + return ret; + } + + ret = intel_spi_read_block(ispi, read_buf, block_size); + if (ret) + return ret; + + len -= block_size; + from += block_size; + retlen += block_size; + read_buf += block_size; + } + + return retlen; +} + +static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len, + const u_char *write_buf) +{ + struct intel_spi *ispi = nor->priv; + size_t block_size, retlen = 0; + u32 val, status; + ssize_t ret; + + /* Not needed with HW sequencer write, make sure it is cleared */ + ispi->atomic_preopcode = 0; + + while (len > 0) { + block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ); + + /* Write cannot cross 4K boundary */ + block_size = min_t(loff_t, to + block_size, + round_up(to + 1, SZ_4K)) - to; + + writel(to, ispi->base + FADDR); + + val = readl(ispi->base + HSFSTS_CTL); + val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK); + val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; + val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT; + val |= HSFSTS_CTL_FCYCLE_WRITE; + + ret = intel_spi_write_block(ispi, write_buf, block_size); + if (ret) { + dev_err(ispi->dev, "failed to write block\n"); + return ret; + } + + /* Start the write now */ + val |= HSFSTS_CTL_FGO; + writel(val, ispi->base + HSFSTS_CTL); + + ret = intel_spi_wait_hw_busy(ispi); + if (ret) { + dev_err(ispi->dev, "timeout\n"); + return ret; + } + + status = readl(ispi->base + HSFSTS_CTL); + if (status & HSFSTS_CTL_FCERR) + ret = -EIO; + else if (status & HSFSTS_CTL_AEL) + ret = -EACCES; + + if (ret < 0) { + dev_err(ispi->dev, "write error: %llx: %#x\n", to, + status); + return ret; + } + + len -= block_size; + to += block_size; + retlen += block_size; + write_buf += block_size; + } + + return retlen; +} + +static int intel_spi_erase(struct spi_nor *nor, loff_t offs) +{ + size_t erase_size, len = nor->mtd.erasesize; + struct intel_spi *ispi = nor->priv; + u32 val, status, cmd; + int ret; + + /* If the hardware can do 64k erase use that when possible */ + if (len >= SZ_64K && ispi->erase_64k) { + cmd = HSFSTS_CTL_FCYCLE_ERASE_64K; + erase_size = SZ_64K; + } else { + cmd = HSFSTS_CTL_FCYCLE_ERASE; + erase_size = SZ_4K; + } + + if (ispi->swseq_erase) { + while (len > 0) { + writel(offs, ispi->base + FADDR); + + ret = intel_spi_sw_cycle(ispi, nor->erase_opcode, + 0, OPTYPE_WRITE_WITH_ADDR); + if (ret) + return ret; + + offs += erase_size; + len -= erase_size; + } + + return 0; + } + + /* Not needed with HW sequencer erase, make sure it is cleared */ + ispi->atomic_preopcode = 0; + + while (len > 0) { + writel(offs, ispi->base + FADDR); + + val = readl(ispi->base + HSFSTS_CTL); + val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK); + val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; + val |= cmd; + val |= HSFSTS_CTL_FGO; + writel(val, ispi->base + HSFSTS_CTL); + + ret = intel_spi_wait_hw_busy(ispi); + if (ret) + return ret; + + status = readl(ispi->base + HSFSTS_CTL); + if (status & HSFSTS_CTL_FCERR) + return -EIO; + else if (status & HSFSTS_CTL_AEL) + return -EACCES; + + offs += erase_size; + len -= erase_size; + } + + return 0; +} + +static bool intel_spi_is_protected(const struct intel_spi *ispi, + unsigned int base, unsigned int limit) +{ + int i; + + for (i = 0; i < ispi->pr_num; i++) { + u32 pr_base, pr_limit, pr_value; + + pr_value = readl(ispi->pregs + PR(i)); + if (!(pr_value & (PR_WPE | PR_RPE))) + continue; + + pr_limit = (pr_value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT; + pr_base = pr_value & PR_BASE_MASK; + + if (pr_base >= base && pr_limit <= limit) + return true; + } + + return false; +} + +/* + * There will be a single partition holding all enabled flash regions. We + * call this "BIOS". + */ +static void intel_spi_fill_partition(struct intel_spi *ispi, + struct mtd_partition *part) +{ + u64 end; + int i; + + memset(part, 0, sizeof(*part)); + + /* Start from the mandatory descriptor region */ + part->size = 4096; + part->name = "BIOS"; + + /* + * Now try to find where this partition ends based on the flash + * region registers. + */ + for (i = 1; i < ispi->nregions; i++) { + u32 region, base, limit; + + region = readl(ispi->base + FREG(i)); + base = region & FREG_BASE_MASK; + limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT; + + if (base >= limit || limit == 0) + continue; + + /* + * If any of the regions have protection bits set, make the + * whole partition read-only to be on the safe side. + * + * Also if the user did not ask the chip to be writeable + * mask the bit too. + */ + if (!writeable || intel_spi_is_protected(ispi, base, limit)) + part->mask_flags |= MTD_WRITEABLE; + + end = (limit << 12) + 4096; + if (end > part->size) + part->size = end; + } +} + +static const struct spi_nor_controller_ops intel_spi_controller_ops = { + .read_reg = intel_spi_read_reg, + .write_reg = intel_spi_write_reg, + .read = intel_spi_read, + .write = intel_spi_write, + .erase = intel_spi_erase, +}; + +struct intel_spi *intel_spi_probe(struct device *dev, + struct resource *mem, const struct intel_spi_boardinfo *info) +{ + const struct spi_nor_hwcaps hwcaps = { + .mask = SNOR_HWCAPS_READ | + SNOR_HWCAPS_READ_FAST | + SNOR_HWCAPS_PP, + }; + struct mtd_partition part; + struct intel_spi *ispi; + int ret; + + if (!info || !mem) + return ERR_PTR(-EINVAL); + + ispi = devm_kzalloc(dev, sizeof(*ispi), GFP_KERNEL); + if (!ispi) + return ERR_PTR(-ENOMEM); + + ispi->base = devm_ioremap_resource(dev, mem); + if (IS_ERR(ispi->base)) + return ERR_CAST(ispi->base); + + ispi->dev = dev; + ispi->info = info; + + ret = intel_spi_init(ispi); + if (ret) + return ERR_PTR(ret); + + ispi->nor.dev = ispi->dev; + ispi->nor.priv = ispi; + ispi->nor.controller_ops = &intel_spi_controller_ops; + + ret = spi_nor_scan(&ispi->nor, NULL, &hwcaps); + if (ret) { + dev_info(dev, "failed to locate the chip\n"); + return ERR_PTR(ret); + } + + intel_spi_fill_partition(ispi, &part); + + ret = mtd_device_register(&ispi->nor.mtd, &part, 1); + if (ret) + return ERR_PTR(ret); + + return ispi; +} +EXPORT_SYMBOL_GPL(intel_spi_probe); + +int intel_spi_remove(struct intel_spi *ispi) +{ + return mtd_device_unregister(&ispi->nor.mtd); +} +EXPORT_SYMBOL_GPL(intel_spi_remove); + +MODULE_DESCRIPTION("Intel PCH/PCU SPI flash core driver"); +MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/spi-nor/controllers/intel-spi.h b/drivers/mtd/spi-nor/controllers/intel-spi.h new file mode 100644 index 000000000..e2f41b882 --- /dev/null +++ b/drivers/mtd/spi-nor/controllers/intel-spi.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Intel PCH/PCU SPI flash driver. + * + * Copyright (C) 2016, Intel Corporation + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#ifndef INTEL_SPI_H +#define INTEL_SPI_H + +#include <linux/platform_data/intel-spi.h> + +struct intel_spi; +struct resource; + +struct intel_spi *intel_spi_probe(struct device *dev, + struct resource *mem, const struct intel_spi_boardinfo *info); +int intel_spi_remove(struct intel_spi *ispi); + +#endif /* INTEL_SPI_H */ diff --git a/drivers/mtd/spi-nor/controllers/nxp-spifi.c b/drivers/mtd/spi-nor/controllers/nxp-spifi.c new file mode 100644 index 000000000..5703e8313 --- /dev/null +++ b/drivers/mtd/spi-nor/controllers/nxp-spifi.c @@ -0,0 +1,486 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * SPI NOR driver for NXP SPI Flash Interface (SPIFI) + * + * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com> + * + * Based on Freescale QuadSPI driver: + * Copyright (C) 2013 Freescale Semiconductor, Inc. + */ + +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> +#include <linux/mtd/spi-nor.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> + +/* NXP SPIFI registers, bits and macros */ +#define SPIFI_CTRL 0x000 +#define SPIFI_CTRL_TIMEOUT(timeout) (timeout) +#define SPIFI_CTRL_CSHIGH(cshigh) ((cshigh) << 16) +#define SPIFI_CTRL_MODE3 BIT(23) +#define SPIFI_CTRL_DUAL BIT(28) +#define SPIFI_CTRL_FBCLK BIT(30) +#define SPIFI_CMD 0x004 +#define SPIFI_CMD_DATALEN(dlen) ((dlen) & 0x3fff) +#define SPIFI_CMD_DOUT BIT(15) +#define SPIFI_CMD_INTLEN(ilen) ((ilen) << 16) +#define SPIFI_CMD_FIELDFORM(field) ((field) << 19) +#define SPIFI_CMD_FIELDFORM_ALL_SERIAL SPIFI_CMD_FIELDFORM(0x0) +#define SPIFI_CMD_FIELDFORM_QUAD_DUAL_DATA SPIFI_CMD_FIELDFORM(0x1) +#define SPIFI_CMD_FRAMEFORM(frame) ((frame) << 21) +#define SPIFI_CMD_FRAMEFORM_OPCODE_ONLY SPIFI_CMD_FRAMEFORM(0x1) +#define SPIFI_CMD_OPCODE(op) ((op) << 24) +#define SPIFI_ADDR 0x008 +#define SPIFI_IDATA 0x00c +#define SPIFI_CLIMIT 0x010 +#define SPIFI_DATA 0x014 +#define SPIFI_MCMD 0x018 +#define SPIFI_STAT 0x01c +#define SPIFI_STAT_MCINIT BIT(0) +#define SPIFI_STAT_CMD BIT(1) +#define SPIFI_STAT_RESET BIT(4) + +#define SPI_NOR_MAX_ID_LEN 6 + +struct nxp_spifi { + struct device *dev; + struct clk *clk_spifi; + struct clk *clk_reg; + void __iomem *io_base; + void __iomem *flash_base; + struct spi_nor nor; + bool memory_mode; + u32 mcmd; +}; + +static int nxp_spifi_wait_for_cmd(struct nxp_spifi *spifi) +{ + u8 stat; + int ret; + + ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat, + !(stat & SPIFI_STAT_CMD), 10, 30); + if (ret) + dev_warn(spifi->dev, "command timed out\n"); + + return ret; +} + +static int nxp_spifi_reset(struct nxp_spifi *spifi) +{ + u8 stat; + int ret; + + writel(SPIFI_STAT_RESET, spifi->io_base + SPIFI_STAT); + ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat, + !(stat & SPIFI_STAT_RESET), 10, 30); + if (ret) + dev_warn(spifi->dev, "state reset timed out\n"); + + return ret; +} + +static int nxp_spifi_set_memory_mode_off(struct nxp_spifi *spifi) +{ + int ret; + + if (!spifi->memory_mode) + return 0; + + ret = nxp_spifi_reset(spifi); + if (ret) + dev_err(spifi->dev, "unable to enter command mode\n"); + else + spifi->memory_mode = false; + + return ret; +} + +static int nxp_spifi_set_memory_mode_on(struct nxp_spifi *spifi) +{ + u8 stat; + int ret; + + if (spifi->memory_mode) + return 0; + + writel(spifi->mcmd, spifi->io_base + SPIFI_MCMD); + ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat, + stat & SPIFI_STAT_MCINIT, 10, 30); + if (ret) + dev_err(spifi->dev, "unable to enter memory mode\n"); + else + spifi->memory_mode = true; + + return ret; +} + +static int nxp_spifi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, + size_t len) +{ + struct nxp_spifi *spifi = nor->priv; + u32 cmd; + int ret; + + ret = nxp_spifi_set_memory_mode_off(spifi); + if (ret) + return ret; + + cmd = SPIFI_CMD_DATALEN(len) | + SPIFI_CMD_OPCODE(opcode) | + SPIFI_CMD_FIELDFORM_ALL_SERIAL | + SPIFI_CMD_FRAMEFORM_OPCODE_ONLY; + writel(cmd, spifi->io_base + SPIFI_CMD); + + while (len--) + *buf++ = readb(spifi->io_base + SPIFI_DATA); + + return nxp_spifi_wait_for_cmd(spifi); +} + +static int nxp_spifi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf, + size_t len) +{ + struct nxp_spifi *spifi = nor->priv; + u32 cmd; + int ret; + + ret = nxp_spifi_set_memory_mode_off(spifi); + if (ret) + return ret; + + cmd = SPIFI_CMD_DOUT | + SPIFI_CMD_DATALEN(len) | + SPIFI_CMD_OPCODE(opcode) | + SPIFI_CMD_FIELDFORM_ALL_SERIAL | + SPIFI_CMD_FRAMEFORM_OPCODE_ONLY; + writel(cmd, spifi->io_base + SPIFI_CMD); + + while (len--) + writeb(*buf++, spifi->io_base + SPIFI_DATA); + + return nxp_spifi_wait_for_cmd(spifi); +} + +static ssize_t nxp_spifi_read(struct spi_nor *nor, loff_t from, size_t len, + u_char *buf) +{ + struct nxp_spifi *spifi = nor->priv; + int ret; + + ret = nxp_spifi_set_memory_mode_on(spifi); + if (ret) + return ret; + + memcpy_fromio(buf, spifi->flash_base + from, len); + + return len; +} + +static ssize_t nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len, + const u_char *buf) +{ + struct nxp_spifi *spifi = nor->priv; + u32 cmd; + int ret; + size_t i; + + ret = nxp_spifi_set_memory_mode_off(spifi); + if (ret) + return ret; + + writel(to, spifi->io_base + SPIFI_ADDR); + + cmd = SPIFI_CMD_DOUT | + SPIFI_CMD_DATALEN(len) | + SPIFI_CMD_FIELDFORM_ALL_SERIAL | + SPIFI_CMD_OPCODE(nor->program_opcode) | + SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1); + writel(cmd, spifi->io_base + SPIFI_CMD); + + for (i = 0; i < len; i++) + writeb(buf[i], spifi->io_base + SPIFI_DATA); + + ret = nxp_spifi_wait_for_cmd(spifi); + if (ret) + return ret; + + return len; +} + +static int nxp_spifi_erase(struct spi_nor *nor, loff_t offs) +{ + struct nxp_spifi *spifi = nor->priv; + u32 cmd; + int ret; + + ret = nxp_spifi_set_memory_mode_off(spifi); + if (ret) + return ret; + + writel(offs, spifi->io_base + SPIFI_ADDR); + + cmd = SPIFI_CMD_FIELDFORM_ALL_SERIAL | + SPIFI_CMD_OPCODE(nor->erase_opcode) | + SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1); + writel(cmd, spifi->io_base + SPIFI_CMD); + + return nxp_spifi_wait_for_cmd(spifi); +} + +static int nxp_spifi_setup_memory_cmd(struct nxp_spifi *spifi) +{ + switch (spifi->nor.read_proto) { + case SNOR_PROTO_1_1_1: + spifi->mcmd = SPIFI_CMD_FIELDFORM_ALL_SERIAL; + break; + case SNOR_PROTO_1_1_2: + case SNOR_PROTO_1_1_4: + spifi->mcmd = SPIFI_CMD_FIELDFORM_QUAD_DUAL_DATA; + break; + default: + dev_err(spifi->dev, "unsupported SPI read mode\n"); + return -EINVAL; + } + + /* Memory mode supports address length between 1 and 4 */ + if (spifi->nor.addr_width < 1 || spifi->nor.addr_width > 4) + return -EINVAL; + + spifi->mcmd |= SPIFI_CMD_OPCODE(spifi->nor.read_opcode) | + SPIFI_CMD_INTLEN(spifi->nor.read_dummy / 8) | + SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1); + + return 0; +} + +static void nxp_spifi_dummy_id_read(struct spi_nor *nor) +{ + u8 id[SPI_NOR_MAX_ID_LEN]; + nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id, + SPI_NOR_MAX_ID_LEN); +} + +static const struct spi_nor_controller_ops nxp_spifi_controller_ops = { + .read_reg = nxp_spifi_read_reg, + .write_reg = nxp_spifi_write_reg, + .read = nxp_spifi_read, + .write = nxp_spifi_write, + .erase = nxp_spifi_erase, +}; + +static int nxp_spifi_setup_flash(struct nxp_spifi *spifi, + struct device_node *np) +{ + struct spi_nor_hwcaps hwcaps = { + .mask = SNOR_HWCAPS_READ | + SNOR_HWCAPS_READ_FAST | + SNOR_HWCAPS_PP, + }; + u32 ctrl, property; + u16 mode = 0; + int ret; + + if (!of_property_read_u32(np, "spi-rx-bus-width", &property)) { + switch (property) { + case 1: + break; + case 2: + mode |= SPI_RX_DUAL; + break; + case 4: + mode |= SPI_RX_QUAD; + break; + default: + dev_err(spifi->dev, "unsupported rx-bus-width\n"); + return -EINVAL; + } + } + + if (of_find_property(np, "spi-cpha", NULL)) + mode |= SPI_CPHA; + + if (of_find_property(np, "spi-cpol", NULL)) + mode |= SPI_CPOL; + + /* Setup control register defaults */ + ctrl = SPIFI_CTRL_TIMEOUT(1000) | + SPIFI_CTRL_CSHIGH(15) | + SPIFI_CTRL_FBCLK; + + if (mode & SPI_RX_DUAL) { + ctrl |= SPIFI_CTRL_DUAL; + hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2; + } else if (mode & SPI_RX_QUAD) { + ctrl &= ~SPIFI_CTRL_DUAL; + hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4; + } else { + ctrl |= SPIFI_CTRL_DUAL; + } + + switch (mode & (SPI_CPHA | SPI_CPOL)) { + case SPI_MODE_0: + ctrl &= ~SPIFI_CTRL_MODE3; + break; + case SPI_MODE_3: + ctrl |= SPIFI_CTRL_MODE3; + break; + default: + dev_err(spifi->dev, "only mode 0 and 3 supported\n"); + return -EINVAL; + } + + writel(ctrl, spifi->io_base + SPIFI_CTRL); + + spifi->nor.dev = spifi->dev; + spi_nor_set_flash_node(&spifi->nor, np); + spifi->nor.priv = spifi; + spifi->nor.controller_ops = &nxp_spifi_controller_ops; + + /* + * The first read on a hard reset isn't reliable so do a + * dummy read of the id before calling spi_nor_scan(). + * The reason for this problem is unknown. + * + * The official NXP spifilib uses more or less the same + * workaround that is applied here by reading the device + * id multiple times. + */ + nxp_spifi_dummy_id_read(&spifi->nor); + + ret = spi_nor_scan(&spifi->nor, NULL, &hwcaps); + if (ret) { + dev_err(spifi->dev, "device scan failed\n"); + return ret; + } + + ret = nxp_spifi_setup_memory_cmd(spifi); + if (ret) { + dev_err(spifi->dev, "memory command setup failed\n"); + return ret; + } + + ret = mtd_device_register(&spifi->nor.mtd, NULL, 0); + if (ret) { + dev_err(spifi->dev, "mtd device parse failed\n"); + return ret; + } + + return 0; +} + +static int nxp_spifi_probe(struct platform_device *pdev) +{ + struct device_node *flash_np; + struct nxp_spifi *spifi; + struct resource *res; + int ret; + + spifi = devm_kzalloc(&pdev->dev, sizeof(*spifi), GFP_KERNEL); + if (!spifi) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "spifi"); + spifi->io_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(spifi->io_base)) + return PTR_ERR(spifi->io_base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash"); + spifi->flash_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(spifi->flash_base)) + return PTR_ERR(spifi->flash_base); + + spifi->clk_spifi = devm_clk_get(&pdev->dev, "spifi"); + if (IS_ERR(spifi->clk_spifi)) { + dev_err(&pdev->dev, "spifi clock not found\n"); + return PTR_ERR(spifi->clk_spifi); + } + + spifi->clk_reg = devm_clk_get(&pdev->dev, "reg"); + if (IS_ERR(spifi->clk_reg)) { + dev_err(&pdev->dev, "reg clock not found\n"); + return PTR_ERR(spifi->clk_reg); + } + + ret = clk_prepare_enable(spifi->clk_reg); + if (ret) { + dev_err(&pdev->dev, "unable to enable reg clock\n"); + return ret; + } + + ret = clk_prepare_enable(spifi->clk_spifi); + if (ret) { + dev_err(&pdev->dev, "unable to enable spifi clock\n"); + goto dis_clk_reg; + } + + spifi->dev = &pdev->dev; + platform_set_drvdata(pdev, spifi); + + /* Initialize and reset device */ + nxp_spifi_reset(spifi); + writel(0, spifi->io_base + SPIFI_IDATA); + writel(0, spifi->io_base + SPIFI_MCMD); + nxp_spifi_reset(spifi); + + flash_np = of_get_next_available_child(pdev->dev.of_node, NULL); + if (!flash_np) { + dev_err(&pdev->dev, "no SPI flash device to configure\n"); + ret = -ENODEV; + goto dis_clks; + } + + ret = nxp_spifi_setup_flash(spifi, flash_np); + of_node_put(flash_np); + if (ret) { + dev_err(&pdev->dev, "unable to setup flash chip\n"); + goto dis_clks; + } + + return 0; + +dis_clks: + clk_disable_unprepare(spifi->clk_spifi); +dis_clk_reg: + clk_disable_unprepare(spifi->clk_reg); + return ret; +} + +static int nxp_spifi_remove(struct platform_device *pdev) +{ + struct nxp_spifi *spifi = platform_get_drvdata(pdev); + + mtd_device_unregister(&spifi->nor.mtd); + clk_disable_unprepare(spifi->clk_spifi); + clk_disable_unprepare(spifi->clk_reg); + + return 0; +} + +static const struct of_device_id nxp_spifi_match[] = { + {.compatible = "nxp,lpc1773-spifi"}, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, nxp_spifi_match); + +static struct platform_driver nxp_spifi_driver = { + .probe = nxp_spifi_probe, + .remove = nxp_spifi_remove, + .driver = { + .name = "nxp-spifi", + .of_match_table = nxp_spifi_match, + }, +}; +module_platform_driver(nxp_spifi_driver); + +MODULE_DESCRIPTION("NXP SPI Flash Interface driver"); +MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c new file mode 100644 index 000000000..09e112f37 --- /dev/null +++ b/drivers/mtd/spi-nor/core.c @@ -0,0 +1,3526 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with + * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c + * + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/mutex.h> +#include <linux/math64.h> +#include <linux/sizes.h> +#include <linux/slab.h> + +#include <linux/mtd/mtd.h> +#include <linux/of_platform.h> +#include <linux/sched/task_stack.h> +#include <linux/spi/flash.h> +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +/* Define max times to check status register before we give up. */ + +/* + * For everything but full-chip erase; probably could be much smaller, but kept + * around for safety for now + */ +#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ) + +/* + * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up + * for larger flash + */ +#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ) + +#define SPI_NOR_MAX_ADDR_WIDTH 4 + +/** + * spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data + * transfer + * @nor: pointer to 'struct spi_nor' + * @op: pointer to 'struct spi_mem_op' template for transfer + * + * If we have to use the bounce buffer, the data field in @op will be updated. + * + * Return: true if the bounce buffer is needed, false if not + */ +static bool spi_nor_spimem_bounce(struct spi_nor *nor, struct spi_mem_op *op) +{ + /* op->data.buf.in occupies the same memory as op->data.buf.out */ + if (object_is_on_stack(op->data.buf.in) || + !virt_addr_valid(op->data.buf.in)) { + if (op->data.nbytes > nor->bouncebuf_size) + op->data.nbytes = nor->bouncebuf_size; + op->data.buf.in = nor->bouncebuf; + return true; + } + + return false; +} + +/** + * spi_nor_spimem_exec_op() - execute a memory operation + * @nor: pointer to 'struct spi_nor' + * @op: pointer to 'struct spi_mem_op' template for transfer + * + * Return: 0 on success, -error otherwise. + */ +static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op) +{ + int error; + + error = spi_mem_adjust_op_size(nor->spimem, op); + if (error) + return error; + + return spi_mem_exec_op(nor->spimem, op); +} + +/** + * spi_nor_spimem_read_data() - read data from flash's memory region via + * spi-mem + * @nor: pointer to 'struct spi_nor' + * @from: offset to read from + * @len: number of bytes to read + * @buf: pointer to dst buffer + * + * Return: number of bytes read successfully, -errno otherwise + */ +static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from, + size_t len, u8 *buf) +{ + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1), + SPI_MEM_OP_ADDR(nor->addr_width, from, 1), + SPI_MEM_OP_DUMMY(nor->read_dummy, 1), + SPI_MEM_OP_DATA_IN(len, buf, 1)); + bool usebouncebuf; + ssize_t nbytes; + int error; + + /* get transfer protocols. */ + op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto); + op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto); + op.dummy.buswidth = op.addr.buswidth; + op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto); + + /* convert the dummy cycles to the number of bytes */ + op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8; + + usebouncebuf = spi_nor_spimem_bounce(nor, &op); + + if (nor->dirmap.rdesc) { + nbytes = spi_mem_dirmap_read(nor->dirmap.rdesc, op.addr.val, + op.data.nbytes, op.data.buf.in); + } else { + error = spi_nor_spimem_exec_op(nor, &op); + if (error) + return error; + nbytes = op.data.nbytes; + } + + if (usebouncebuf && nbytes > 0) + memcpy(buf, op.data.buf.in, nbytes); + + return nbytes; +} + +/** + * spi_nor_read_data() - read data from flash memory + * @nor: pointer to 'struct spi_nor' + * @from: offset to read from + * @len: number of bytes to read + * @buf: pointer to dst buffer + * + * Return: number of bytes read successfully, -errno otherwise + */ +ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf) +{ + if (nor->spimem) + return spi_nor_spimem_read_data(nor, from, len, buf); + + return nor->controller_ops->read(nor, from, len, buf); +} + +/** + * spi_nor_spimem_write_data() - write data to flash memory via + * spi-mem + * @nor: pointer to 'struct spi_nor' + * @to: offset to write to + * @len: number of bytes to write + * @buf: pointer to src buffer + * + * Return: number of bytes written successfully, -errno otherwise + */ +static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to, + size_t len, const u8 *buf) +{ + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1), + SPI_MEM_OP_ADDR(nor->addr_width, to, 1), + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_OUT(len, buf, 1)); + ssize_t nbytes; + int error; + + op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto); + op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto); + op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto); + + if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second) + op.addr.nbytes = 0; + + if (spi_nor_spimem_bounce(nor, &op)) + memcpy(nor->bouncebuf, buf, op.data.nbytes); + + if (nor->dirmap.wdesc) { + nbytes = spi_mem_dirmap_write(nor->dirmap.wdesc, op.addr.val, + op.data.nbytes, op.data.buf.out); + } else { + error = spi_nor_spimem_exec_op(nor, &op); + if (error) + return error; + nbytes = op.data.nbytes; + } + + return nbytes; +} + +/** + * spi_nor_write_data() - write data to flash memory + * @nor: pointer to 'struct spi_nor' + * @to: offset to write to + * @len: number of bytes to write + * @buf: pointer to src buffer + * + * Return: number of bytes written successfully, -errno otherwise + */ +ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len, + const u8 *buf) +{ + if (nor->spimem) + return spi_nor_spimem_write_data(nor, to, len, buf); + + return nor->controller_ops->write(nor, to, len, buf); +} + +/** + * spi_nor_write_enable() - Set write enable latch with Write Enable command. + * @nor: pointer to 'struct spi_nor'. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_write_enable(struct spi_nor *nor) +{ + int ret; + + if (nor->spimem) { + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_NO_DATA); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREN, + NULL, 0); + } + + if (ret) + dev_dbg(nor->dev, "error %d on Write Enable\n", ret); + + return ret; +} + +/** + * spi_nor_write_disable() - Send Write Disable instruction to the chip. + * @nor: pointer to 'struct spi_nor'. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_write_disable(struct spi_nor *nor) +{ + int ret; + + if (nor->spimem) { + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_NO_DATA); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRDI, + NULL, 0); + } + + if (ret) + dev_dbg(nor->dev, "error %d on Write Disable\n", ret); + + return ret; +} + +/** + * spi_nor_read_sr() - Read the Status Register. + * @nor: pointer to 'struct spi_nor'. + * @sr: pointer to a DMA-able buffer where the value of the + * Status Register will be written. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_read_sr(struct spi_nor *nor, u8 *sr) +{ + int ret; + + if (nor->spimem) { + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_IN(1, sr, 1)); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR, + sr, 1); + } + + if (ret) + dev_dbg(nor->dev, "error %d reading SR\n", ret); + + return ret; +} + +/** + * spi_nor_read_fsr() - Read the Flag Status Register. + * @nor: pointer to 'struct spi_nor' + * @fsr: pointer to a DMA-able buffer where the value of the + * Flag Status Register will be written. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr) +{ + int ret; + + if (nor->spimem) { + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_IN(1, fsr, 1)); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDFSR, + fsr, 1); + } + + if (ret) + dev_dbg(nor->dev, "error %d reading FSR\n", ret); + + return ret; +} + +/** + * spi_nor_read_cr() - Read the Configuration Register using the + * SPINOR_OP_RDCR (35h) command. + * @nor: pointer to 'struct spi_nor' + * @cr: pointer to a DMA-able buffer where the value of the + * Configuration Register will be written. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_read_cr(struct spi_nor *nor, u8 *cr) +{ + int ret; + + if (nor->spimem) { + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 1), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_IN(1, cr, 1)); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDCR, cr, 1); + } + + if (ret) + dev_dbg(nor->dev, "error %d reading CR\n", ret); + + return ret; +} + +/** + * spi_nor_set_4byte_addr_mode() - Enter/Exit 4-byte address mode. + * @nor: pointer to 'struct spi_nor'. + * @enable: true to enter the 4-byte address mode, false to exit the 4-byte + * address mode. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable) +{ + int ret; + + if (nor->spimem) { + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(enable ? + SPINOR_OP_EN4B : + SPINOR_OP_EX4B, + 1), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_NO_DATA); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->write_reg(nor, + enable ? SPINOR_OP_EN4B : + SPINOR_OP_EX4B, + NULL, 0); + } + + if (ret) + dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret); + + return ret; +} + +/** + * spansion_set_4byte_addr_mode() - Set 4-byte address mode for Spansion + * flashes. + * @nor: pointer to 'struct spi_nor'. + * @enable: true to enter the 4-byte address mode, false to exit the 4-byte + * address mode. + * + * Return: 0 on success, -errno otherwise. + */ +static int spansion_set_4byte_addr_mode(struct spi_nor *nor, bool enable) +{ + int ret; + + nor->bouncebuf[0] = enable << 7; + + if (nor->spimem) { + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 1), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1)); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->write_reg(nor, SPINOR_OP_BRWR, + nor->bouncebuf, 1); + } + + if (ret) + dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret); + + return ret; +} + +/** + * spi_nor_write_ear() - Write Extended Address Register. + * @nor: pointer to 'struct spi_nor'. + * @ear: value to write to the Extended Address Register. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_write_ear(struct spi_nor *nor, u8 ear) +{ + int ret; + + nor->bouncebuf[0] = ear; + + if (nor->spimem) { + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 1), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1)); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREAR, + nor->bouncebuf, 1); + } + + if (ret) + dev_dbg(nor->dev, "error %d writing EAR\n", ret); + + return ret; +} + +/** + * spi_nor_xread_sr() - Read the Status Register on S3AN flashes. + * @nor: pointer to 'struct spi_nor'. + * @sr: pointer to a DMA-able buffer where the value of the + * Status Register will be written. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr) +{ + int ret; + + if (nor->spimem) { + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 1), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_IN(1, sr, 1)); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->read_reg(nor, SPINOR_OP_XRDSR, + sr, 1); + } + + if (ret) + dev_dbg(nor->dev, "error %d reading XRDSR\n", ret); + + return ret; +} + +/** + * spi_nor_xsr_ready() - Query the Status Register of the S3AN flash to see if + * the flash is ready for new commands. + * @nor: pointer to 'struct spi_nor'. + * + * Return: 1 if ready, 0 if not ready, -errno on errors. + */ +static int spi_nor_xsr_ready(struct spi_nor *nor) +{ + int ret; + + ret = spi_nor_xread_sr(nor, nor->bouncebuf); + if (ret) + return ret; + + return !!(nor->bouncebuf[0] & XSR_RDY); +} + +/** + * spi_nor_clear_sr() - Clear the Status Register. + * @nor: pointer to 'struct spi_nor'. + */ +static void spi_nor_clear_sr(struct spi_nor *nor) +{ + int ret; + + if (nor->spimem) { + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 1), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_NO_DATA); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLSR, + NULL, 0); + } + + if (ret) + dev_dbg(nor->dev, "error %d clearing SR\n", ret); +} + +/** + * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready + * for new commands. + * @nor: pointer to 'struct spi_nor'. + * + * Return: 1 if ready, 0 if not ready, -errno on errors. + */ +static int spi_nor_sr_ready(struct spi_nor *nor) +{ + int ret = spi_nor_read_sr(nor, nor->bouncebuf); + + if (ret) + return ret; + + if (nor->flags & SNOR_F_USE_CLSR && + nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) { + if (nor->bouncebuf[0] & SR_E_ERR) + dev_err(nor->dev, "Erase Error occurred\n"); + else + dev_err(nor->dev, "Programming Error occurred\n"); + + spi_nor_clear_sr(nor); + + /* + * WEL bit remains set to one when an erase or page program + * error occurs. Issue a Write Disable command to protect + * against inadvertent writes that can possibly corrupt the + * contents of the memory. + */ + ret = spi_nor_write_disable(nor); + if (ret) + return ret; + + return -EIO; + } + + return !(nor->bouncebuf[0] & SR_WIP); +} + +/** + * spi_nor_clear_fsr() - Clear the Flag Status Register. + * @nor: pointer to 'struct spi_nor'. + */ +static void spi_nor_clear_fsr(struct spi_nor *nor) +{ + int ret; + + if (nor->spimem) { + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 1), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_NO_DATA); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLFSR, + NULL, 0); + } + + if (ret) + dev_dbg(nor->dev, "error %d clearing FSR\n", ret); +} + +/** + * spi_nor_fsr_ready() - Query the Flag Status Register to see if the flash is + * ready for new commands. + * @nor: pointer to 'struct spi_nor'. + * + * Return: 1 if ready, 0 if not ready, -errno on errors. + */ +static int spi_nor_fsr_ready(struct spi_nor *nor) +{ + int ret = spi_nor_read_fsr(nor, nor->bouncebuf); + + if (ret) + return ret; + + if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) { + if (nor->bouncebuf[0] & FSR_E_ERR) + dev_err(nor->dev, "Erase operation failed.\n"); + else + dev_err(nor->dev, "Program operation failed.\n"); + + if (nor->bouncebuf[0] & FSR_PT_ERR) + dev_err(nor->dev, + "Attempted to modify a protected sector.\n"); + + spi_nor_clear_fsr(nor); + + /* + * WEL bit remains set to one when an erase or page program + * error occurs. Issue a Write Disable command to protect + * against inadvertent writes that can possibly corrupt the + * contents of the memory. + */ + ret = spi_nor_write_disable(nor); + if (ret) + return ret; + + return -EIO; + } + + return !!(nor->bouncebuf[0] & FSR_READY); +} + +/** + * spi_nor_ready() - Query the flash to see if it is ready for new commands. + * @nor: pointer to 'struct spi_nor'. + * + * Return: 1 if ready, 0 if not ready, -errno on errors. + */ +static int spi_nor_ready(struct spi_nor *nor) +{ + int sr, fsr; + + if (nor->flags & SNOR_F_READY_XSR_RDY) + sr = spi_nor_xsr_ready(nor); + else + sr = spi_nor_sr_ready(nor); + if (sr < 0) + return sr; + fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1; + if (fsr < 0) + return fsr; + return sr && fsr; +} + +/** + * spi_nor_wait_till_ready_with_timeout() - Service routine to read the + * Status Register until ready, or timeout occurs. + * @nor: pointer to "struct spi_nor". + * @timeout_jiffies: jiffies to wait until timeout. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor, + unsigned long timeout_jiffies) +{ + unsigned long deadline; + int timeout = 0, ret; + + deadline = jiffies + timeout_jiffies; + + while (!timeout) { + if (time_after_eq(jiffies, deadline)) + timeout = 1; + + ret = spi_nor_ready(nor); + if (ret < 0) + return ret; + if (ret) + return 0; + + cond_resched(); + } + + dev_dbg(nor->dev, "flash operation timed out\n"); + + return -ETIMEDOUT; +} + +/** + * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the + * flash to be ready, or timeout occurs. + * @nor: pointer to "struct spi_nor". + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_wait_till_ready(struct spi_nor *nor) +{ + return spi_nor_wait_till_ready_with_timeout(nor, + DEFAULT_READY_WAIT_JIFFIES); +} + +/** + * spi_nor_write_sr() - Write the Status Register. + * @nor: pointer to 'struct spi_nor'. + * @sr: pointer to DMA-able buffer to write to the Status Register. + * @len: number of bytes to write to the Status Register. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len) +{ + int ret; + + ret = spi_nor_write_enable(nor); + if (ret) + return ret; + + if (nor->spimem) { + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_OUT(len, sr, 1)); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR, + sr, len); + } + + if (ret) { + dev_dbg(nor->dev, "error %d writing SR\n", ret); + return ret; + } + + return spi_nor_wait_till_ready(nor); +} + +/** + * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and + * ensure that the byte written match the received value. + * @nor: pointer to a 'struct spi_nor'. + * @sr1: byte value to be written to the Status Register. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1) +{ + int ret; + + nor->bouncebuf[0] = sr1; + + ret = spi_nor_write_sr(nor, nor->bouncebuf, 1); + if (ret) + return ret; + + ret = spi_nor_read_sr(nor, nor->bouncebuf); + if (ret) + return ret; + + if (nor->bouncebuf[0] != sr1) { + dev_dbg(nor->dev, "SR1: read back test failed\n"); + return -EIO; + } + + return 0; +} + +/** + * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the + * Status Register 2 in one shot. Ensure that the byte written in the Status + * Register 1 match the received value, and that the 16-bit Write did not + * affect what was already in the Status Register 2. + * @nor: pointer to a 'struct spi_nor'. + * @sr1: byte value to be written to the Status Register 1. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1) +{ + int ret; + u8 *sr_cr = nor->bouncebuf; + u8 cr_written; + + /* Make sure we don't overwrite the contents of Status Register 2. */ + if (!(nor->flags & SNOR_F_NO_READ_CR)) { + ret = spi_nor_read_cr(nor, &sr_cr[1]); + if (ret) + return ret; + } else if (spi_nor_get_protocol_width(nor->read_proto) == 4 && + spi_nor_get_protocol_width(nor->write_proto) == 4 && + nor->params->quad_enable) { + /* + * If the Status Register 2 Read command (35h) is not + * supported, we should at least be sure we don't + * change the value of the SR2 Quad Enable bit. + * + * When the Quad Enable method is set and the buswidth is 4, we + * can safely assume that the value of the QE bit is one, as a + * consequence of the nor->params->quad_enable() call. + * + * According to the JESD216 revB standard, BFPT DWORDS[15], + * bits 22:20, the 16-bit Write Status (01h) command is + * available just for the cases in which the QE bit is + * described in SR2 at BIT(1). + */ + sr_cr[1] = SR2_QUAD_EN_BIT1; + } else { + sr_cr[1] = 0; + } + + sr_cr[0] = sr1; + + ret = spi_nor_write_sr(nor, sr_cr, 2); + if (ret) + return ret; + + ret = spi_nor_read_sr(nor, sr_cr); + if (ret) + return ret; + + if (sr1 != sr_cr[0]) { + dev_dbg(nor->dev, "SR: Read back test failed\n"); + return -EIO; + } + + if (nor->flags & SNOR_F_NO_READ_CR) + return 0; + + cr_written = sr_cr[1]; + + ret = spi_nor_read_cr(nor, &sr_cr[1]); + if (ret) + return ret; + + if (cr_written != sr_cr[1]) { + dev_dbg(nor->dev, "CR: read back test failed\n"); + return -EIO; + } + + return 0; +} + +/** + * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the + * Configuration Register in one shot. Ensure that the byte written in the + * Configuration Register match the received value, and that the 16-bit Write + * did not affect what was already in the Status Register 1. + * @nor: pointer to a 'struct spi_nor'. + * @cr: byte value to be written to the Configuration Register. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr) +{ + int ret; + u8 *sr_cr = nor->bouncebuf; + u8 sr_written; + + /* Keep the current value of the Status Register 1. */ + ret = spi_nor_read_sr(nor, sr_cr); + if (ret) + return ret; + + sr_cr[1] = cr; + + ret = spi_nor_write_sr(nor, sr_cr, 2); + if (ret) + return ret; + + sr_written = sr_cr[0]; + + ret = spi_nor_read_sr(nor, sr_cr); + if (ret) + return ret; + + if (sr_written != sr_cr[0]) { + dev_dbg(nor->dev, "SR: Read back test failed\n"); + return -EIO; + } + + if (nor->flags & SNOR_F_NO_READ_CR) + return 0; + + ret = spi_nor_read_cr(nor, &sr_cr[1]); + if (ret) + return ret; + + if (cr != sr_cr[1]) { + dev_dbg(nor->dev, "CR: read back test failed\n"); + return -EIO; + } + + return 0; +} + +/** + * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that + * the byte written match the received value without affecting other bits in the + * Status Register 1 and 2. + * @nor: pointer to a 'struct spi_nor'. + * @sr1: byte value to be written to the Status Register. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1) +{ + if (nor->flags & SNOR_F_HAS_16BIT_SR) + return spi_nor_write_16bit_sr_and_check(nor, sr1); + + return spi_nor_write_sr1_and_check(nor, sr1); +} + +/** + * spi_nor_write_sr2() - Write the Status Register 2 using the + * SPINOR_OP_WRSR2 (3eh) command. + * @nor: pointer to 'struct spi_nor'. + * @sr2: pointer to DMA-able buffer to write to the Status Register 2. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2) +{ + int ret; + + ret = spi_nor_write_enable(nor); + if (ret) + return ret; + + if (nor->spimem) { + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 1), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_OUT(1, sr2, 1)); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR2, + sr2, 1); + } + + if (ret) { + dev_dbg(nor->dev, "error %d writing SR2\n", ret); + return ret; + } + + return spi_nor_wait_till_ready(nor); +} + +/** + * spi_nor_read_sr2() - Read the Status Register 2 using the + * SPINOR_OP_RDSR2 (3fh) command. + * @nor: pointer to 'struct spi_nor'. + * @sr2: pointer to DMA-able buffer where the value of the + * Status Register 2 will be written. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2) +{ + int ret; + + if (nor->spimem) { + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 1), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_IN(1, sr2, 1)); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR2, + sr2, 1); + } + + if (ret) + dev_dbg(nor->dev, "error %d reading SR2\n", ret); + + return ret; +} + +/** + * spi_nor_erase_chip() - Erase the entire flash memory. + * @nor: pointer to 'struct spi_nor'. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_erase_chip(struct spi_nor *nor) +{ + int ret; + + dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10)); + + if (nor->spimem) { + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 1), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_NO_DATA); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CHIP_ERASE, + NULL, 0); + } + + if (ret) + dev_dbg(nor->dev, "error %d erasing chip\n", ret); + + return ret; +} + +static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size) +{ + size_t i; + + for (i = 0; i < size; i++) + if (table[i][0] == opcode) + return table[i][1]; + + /* No conversion found, keep input op code. */ + return opcode; +} + +u8 spi_nor_convert_3to4_read(u8 opcode) +{ + static const u8 spi_nor_3to4_read[][2] = { + { SPINOR_OP_READ, SPINOR_OP_READ_4B }, + { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B }, + { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B }, + { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B }, + { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B }, + { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B }, + { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B }, + { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B }, + + { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B }, + { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B }, + { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B }, + }; + + return spi_nor_convert_opcode(opcode, spi_nor_3to4_read, + ARRAY_SIZE(spi_nor_3to4_read)); +} + +static u8 spi_nor_convert_3to4_program(u8 opcode) +{ + static const u8 spi_nor_3to4_program[][2] = { + { SPINOR_OP_PP, SPINOR_OP_PP_4B }, + { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B }, + { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B }, + { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B }, + { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B }, + }; + + return spi_nor_convert_opcode(opcode, spi_nor_3to4_program, + ARRAY_SIZE(spi_nor_3to4_program)); +} + +static u8 spi_nor_convert_3to4_erase(u8 opcode) +{ + static const u8 spi_nor_3to4_erase[][2] = { + { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B }, + { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B }, + { SPINOR_OP_SE, SPINOR_OP_SE_4B }, + }; + + return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase, + ARRAY_SIZE(spi_nor_3to4_erase)); +} + +static bool spi_nor_has_uniform_erase(const struct spi_nor *nor) +{ + return !!nor->params->erase_map.uniform_erase_type; +} + +static void spi_nor_set_4byte_opcodes(struct spi_nor *nor) +{ + nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode); + nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode); + nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode); + + if (!spi_nor_has_uniform_erase(nor)) { + struct spi_nor_erase_map *map = &nor->params->erase_map; + struct spi_nor_erase_type *erase; + int i; + + for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { + erase = &map->erase_type[i]; + erase->opcode = + spi_nor_convert_3to4_erase(erase->opcode); + } + } +} + +int spi_nor_lock_and_prep(struct spi_nor *nor) +{ + int ret = 0; + + mutex_lock(&nor->lock); + + if (nor->controller_ops && nor->controller_ops->prepare) { + ret = nor->controller_ops->prepare(nor); + if (ret) { + mutex_unlock(&nor->lock); + return ret; + } + } + return ret; +} + +void spi_nor_unlock_and_unprep(struct spi_nor *nor) +{ + if (nor->controller_ops && nor->controller_ops->unprepare) + nor->controller_ops->unprepare(nor); + mutex_unlock(&nor->lock); +} + +static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr) +{ + if (!nor->params->convert_addr) + return addr; + + return nor->params->convert_addr(nor, addr); +} + +/* + * Initiate the erasure of a single sector + */ +static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr) +{ + int i; + + addr = spi_nor_convert_addr(nor, addr); + + if (nor->spimem) { + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 1), + SPI_MEM_OP_ADDR(nor->addr_width, addr, 1), + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_NO_DATA); + + return spi_mem_exec_op(nor->spimem, &op); + } else if (nor->controller_ops->erase) { + return nor->controller_ops->erase(nor, addr); + } + + /* + * Default implementation, if driver doesn't have a specialized HW + * control + */ + for (i = nor->addr_width - 1; i >= 0; i--) { + nor->bouncebuf[i] = addr & 0xff; + addr >>= 8; + } + + return nor->controller_ops->write_reg(nor, nor->erase_opcode, + nor->bouncebuf, nor->addr_width); +} + +/** + * spi_nor_div_by_erase_size() - calculate remainder and update new dividend + * @erase: pointer to a structure that describes a SPI NOR erase type + * @dividend: dividend value + * @remainder: pointer to u32 remainder (will be updated) + * + * Return: the result of the division + */ +static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase, + u64 dividend, u32 *remainder) +{ + /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */ + *remainder = (u32)dividend & erase->size_mask; + return dividend >> erase->size_shift; +} + +/** + * spi_nor_find_best_erase_type() - find the best erase type for the given + * offset in the serial flash memory and the + * number of bytes to erase. The region in + * which the address fits is expected to be + * provided. + * @map: the erase map of the SPI NOR + * @region: pointer to a structure that describes a SPI NOR erase region + * @addr: offset in the serial flash memory + * @len: number of bytes to erase + * + * Return: a pointer to the best fitted erase type, NULL otherwise. + */ +static const struct spi_nor_erase_type * +spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map, + const struct spi_nor_erase_region *region, + u64 addr, u32 len) +{ + const struct spi_nor_erase_type *erase; + u32 rem; + int i; + u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK; + + /* + * Erase types are ordered by size, with the smallest erase type at + * index 0. + */ + for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) { + /* Does the erase region support the tested erase type? */ + if (!(erase_mask & BIT(i))) + continue; + + erase = &map->erase_type[i]; + if (!erase->size) + continue; + + /* Alignment is not mandatory for overlaid regions */ + if (region->offset & SNOR_OVERLAID_REGION && + region->size <= len) + return erase; + + /* Don't erase more than what the user has asked for. */ + if (erase->size > len) + continue; + + spi_nor_div_by_erase_size(erase, addr, &rem); + if (rem) + continue; + else + return erase; + } + + return NULL; +} + +static u64 spi_nor_region_is_last(const struct spi_nor_erase_region *region) +{ + return region->offset & SNOR_LAST_REGION; +} + +static u64 spi_nor_region_end(const struct spi_nor_erase_region *region) +{ + return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size; +} + +/** + * spi_nor_region_next() - get the next spi nor region + * @region: pointer to a structure that describes a SPI NOR erase region + * + * Return: the next spi nor region or NULL if last region. + */ +struct spi_nor_erase_region * +spi_nor_region_next(struct spi_nor_erase_region *region) +{ + if (spi_nor_region_is_last(region)) + return NULL; + region++; + return region; +} + +/** + * spi_nor_find_erase_region() - find the region of the serial flash memory in + * which the offset fits + * @map: the erase map of the SPI NOR + * @addr: offset in the serial flash memory + * + * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno) + * otherwise. + */ +static struct spi_nor_erase_region * +spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr) +{ + struct spi_nor_erase_region *region = map->regions; + u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK; + u64 region_end = region_start + region->size; + + while (addr < region_start || addr >= region_end) { + region = spi_nor_region_next(region); + if (!region) + return ERR_PTR(-EINVAL); + + region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK; + region_end = region_start + region->size; + } + + return region; +} + +/** + * spi_nor_init_erase_cmd() - initialize an erase command + * @region: pointer to a structure that describes a SPI NOR erase region + * @erase: pointer to a structure that describes a SPI NOR erase type + * + * Return: the pointer to the allocated erase command, ERR_PTR(-errno) + * otherwise. + */ +static struct spi_nor_erase_command * +spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region, + const struct spi_nor_erase_type *erase) +{ + struct spi_nor_erase_command *cmd; + + cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&cmd->list); + cmd->opcode = erase->opcode; + cmd->count = 1; + + if (region->offset & SNOR_OVERLAID_REGION) + cmd->size = region->size; + else + cmd->size = erase->size; + + return cmd; +} + +/** + * spi_nor_destroy_erase_cmd_list() - destroy erase command list + * @erase_list: list of erase commands + */ +static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list) +{ + struct spi_nor_erase_command *cmd, *next; + + list_for_each_entry_safe(cmd, next, erase_list, list) { + list_del(&cmd->list); + kfree(cmd); + } +} + +/** + * spi_nor_init_erase_cmd_list() - initialize erase command list + * @nor: pointer to a 'struct spi_nor' + * @erase_list: list of erase commands to be executed once we validate that the + * erase can be performed + * @addr: offset in the serial flash memory + * @len: number of bytes to erase + * + * Builds the list of best fitted erase commands and verifies if the erase can + * be performed. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_init_erase_cmd_list(struct spi_nor *nor, + struct list_head *erase_list, + u64 addr, u32 len) +{ + const struct spi_nor_erase_map *map = &nor->params->erase_map; + const struct spi_nor_erase_type *erase, *prev_erase = NULL; + struct spi_nor_erase_region *region; + struct spi_nor_erase_command *cmd = NULL; + u64 region_end; + int ret = -EINVAL; + + region = spi_nor_find_erase_region(map, addr); + if (IS_ERR(region)) + return PTR_ERR(region); + + region_end = spi_nor_region_end(region); + + while (len) { + erase = spi_nor_find_best_erase_type(map, region, addr, len); + if (!erase) + goto destroy_erase_cmd_list; + + if (prev_erase != erase || + erase->size != cmd->size || + region->offset & SNOR_OVERLAID_REGION) { + cmd = spi_nor_init_erase_cmd(region, erase); + if (IS_ERR(cmd)) { + ret = PTR_ERR(cmd); + goto destroy_erase_cmd_list; + } + + list_add_tail(&cmd->list, erase_list); + } else { + cmd->count++; + } + + addr += cmd->size; + len -= cmd->size; + + if (len && addr >= region_end) { + region = spi_nor_region_next(region); + if (!region) + goto destroy_erase_cmd_list; + region_end = spi_nor_region_end(region); + } + + prev_erase = erase; + } + + return 0; + +destroy_erase_cmd_list: + spi_nor_destroy_erase_cmd_list(erase_list); + return ret; +} + +/** + * spi_nor_erase_multi_sectors() - perform a non-uniform erase + * @nor: pointer to a 'struct spi_nor' + * @addr: offset in the serial flash memory + * @len: number of bytes to erase + * + * Build a list of best fitted erase commands and execute it once we validate + * that the erase can be performed. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len) +{ + LIST_HEAD(erase_list); + struct spi_nor_erase_command *cmd, *next; + int ret; + + ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len); + if (ret) + return ret; + + list_for_each_entry_safe(cmd, next, &erase_list, list) { + nor->erase_opcode = cmd->opcode; + while (cmd->count) { + ret = spi_nor_write_enable(nor); + if (ret) + goto destroy_erase_cmd_list; + + ret = spi_nor_erase_sector(nor, addr); + if (ret) + goto destroy_erase_cmd_list; + + addr += cmd->size; + cmd->count--; + + ret = spi_nor_wait_till_ready(nor); + if (ret) + goto destroy_erase_cmd_list; + } + list_del(&cmd->list); + kfree(cmd); + } + + return 0; + +destroy_erase_cmd_list: + spi_nor_destroy_erase_cmd_list(&erase_list); + return ret; +} + +/* + * Erase an address range on the nor chip. The address range may extend + * one or more erase sectors. Return an error is there is a problem erasing. + */ +static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + u32 addr, len; + uint32_t rem; + int ret; + + dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr, + (long long)instr->len); + + if (spi_nor_has_uniform_erase(nor)) { + div_u64_rem(instr->len, mtd->erasesize, &rem); + if (rem) + return -EINVAL; + } + + addr = instr->addr; + len = instr->len; + + ret = spi_nor_lock_and_prep(nor); + if (ret) + return ret; + + /* whole-chip erase? */ + if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) { + unsigned long timeout; + + ret = spi_nor_write_enable(nor); + if (ret) + goto erase_err; + + ret = spi_nor_erase_chip(nor); + if (ret) + goto erase_err; + + /* + * Scale the timeout linearly with the size of the flash, with + * a minimum calibrated to an old 2MB flash. We could try to + * pull these from CFI/SFDP, but these values should be good + * enough for now. + */ + timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES, + CHIP_ERASE_2MB_READY_WAIT_JIFFIES * + (unsigned long)(mtd->size / SZ_2M)); + ret = spi_nor_wait_till_ready_with_timeout(nor, timeout); + if (ret) + goto erase_err; + + /* REVISIT in some cases we could speed up erasing large regions + * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up + * to use "small sector erase", but that's not always optimal. + */ + + /* "sector"-at-a-time erase */ + } else if (spi_nor_has_uniform_erase(nor)) { + while (len) { + ret = spi_nor_write_enable(nor); + if (ret) + goto erase_err; + + ret = spi_nor_erase_sector(nor, addr); + if (ret) + goto erase_err; + + addr += mtd->erasesize; + len -= mtd->erasesize; + + ret = spi_nor_wait_till_ready(nor); + if (ret) + goto erase_err; + } + + /* erase multiple sectors */ + } else { + ret = spi_nor_erase_multi_sectors(nor, addr, len); + if (ret) + goto erase_err; + } + + ret = spi_nor_write_disable(nor); + +erase_err: + spi_nor_unlock_and_unprep(nor); + + return ret; +} + +static u8 spi_nor_get_sr_bp_mask(struct spi_nor *nor) +{ + u8 mask = SR_BP2 | SR_BP1 | SR_BP0; + + if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6) + return mask | SR_BP3_BIT6; + + if (nor->flags & SNOR_F_HAS_4BIT_BP) + return mask | SR_BP3; + + return mask; +} + +static u8 spi_nor_get_sr_tb_mask(struct spi_nor *nor) +{ + if (nor->flags & SNOR_F_HAS_SR_TB_BIT6) + return SR_TB_BIT6; + else + return SR_TB_BIT5; +} + +static u64 spi_nor_get_min_prot_length_sr(struct spi_nor *nor) +{ + unsigned int bp_slots, bp_slots_needed; + u8 mask = spi_nor_get_sr_bp_mask(nor); + + /* Reserved one for "protect none" and one for "protect all". */ + bp_slots = (1 << hweight8(mask)) - 2; + bp_slots_needed = ilog2(nor->info->n_sectors); + + if (bp_slots_needed > bp_slots) + return nor->info->sector_size << + (bp_slots_needed - bp_slots); + else + return nor->info->sector_size; +} + +static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs, + uint64_t *len) +{ + struct mtd_info *mtd = &nor->mtd; + u64 min_prot_len; + u8 mask = spi_nor_get_sr_bp_mask(nor); + u8 tb_mask = spi_nor_get_sr_tb_mask(nor); + u8 bp, val = sr & mask; + + if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3_BIT6) + val = (val & ~SR_BP3_BIT6) | SR_BP3; + + bp = val >> SR_BP_SHIFT; + + if (!bp) { + /* No protection */ + *ofs = 0; + *len = 0; + return; + } + + min_prot_len = spi_nor_get_min_prot_length_sr(nor); + *len = min_prot_len << (bp - 1); + + if (*len > mtd->size) + *len = mtd->size; + + if (nor->flags & SNOR_F_HAS_SR_TB && sr & tb_mask) + *ofs = 0; + else + *ofs = mtd->size - *len; +} + +/* + * Return 1 if the entire region is locked (if @locked is true) or unlocked (if + * @locked is false); 0 otherwise + */ +static int spi_nor_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, + uint64_t len, u8 sr, bool locked) +{ + loff_t lock_offs; + uint64_t lock_len; + + if (!len) + return 1; + + spi_nor_get_locked_range_sr(nor, sr, &lock_offs, &lock_len); + + if (locked) + /* Requested range is a sub-range of locked range */ + return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs); + else + /* Requested range does not overlap with locked range */ + return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs); +} + +static int spi_nor_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len, + u8 sr) +{ + return spi_nor_check_lock_status_sr(nor, ofs, len, sr, true); +} + +static int spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len, + u8 sr) +{ + return spi_nor_check_lock_status_sr(nor, ofs, len, sr, false); +} + +/* + * Lock a region of the flash. Compatible with ST Micro and similar flash. + * Supports the block protection bits BP{0,1,2}/BP{0,1,2,3} in the status + * register + * (SR). Does not support these features found in newer SR bitfields: + * - SEC: sector/block protect - only handle SEC=0 (block protect) + * - CMP: complement protect - only support CMP=0 (range is not complemented) + * + * Support for the following is provided conditionally for some flash: + * - TB: top/bottom protect + * + * Sample table portion for 8MB flash (Winbond w25q64fw): + * + * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion + * -------------------------------------------------------------------------- + * X | X | 0 | 0 | 0 | NONE | NONE + * 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64 + * 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32 + * 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16 + * 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8 + * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4 + * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2 + * X | X | 1 | 1 | 1 | 8 MB | ALL + * ------|-------|-------|-------|-------|---------------|------------------- + * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64 + * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32 + * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16 + * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8 + * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4 + * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2 + * + * Returns negative on errors, 0 on success. + */ +static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) +{ + struct mtd_info *mtd = &nor->mtd; + u64 min_prot_len; + int ret, status_old, status_new; + u8 mask = spi_nor_get_sr_bp_mask(nor); + u8 tb_mask = spi_nor_get_sr_tb_mask(nor); + u8 pow, val; + loff_t lock_len; + bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB; + bool use_top; + + ret = spi_nor_read_sr(nor, nor->bouncebuf); + if (ret) + return ret; + + status_old = nor->bouncebuf[0]; + + /* If nothing in our range is unlocked, we don't need to do anything */ + if (spi_nor_is_locked_sr(nor, ofs, len, status_old)) + return 0; + + /* If anything below us is unlocked, we can't use 'bottom' protection */ + if (!spi_nor_is_locked_sr(nor, 0, ofs, status_old)) + can_be_bottom = false; + + /* If anything above us is unlocked, we can't use 'top' protection */ + if (!spi_nor_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len), + status_old)) + can_be_top = false; + + if (!can_be_bottom && !can_be_top) + return -EINVAL; + + /* Prefer top, if both are valid */ + use_top = can_be_top; + + /* lock_len: length of region that should end up locked */ + if (use_top) + lock_len = mtd->size - ofs; + else + lock_len = ofs + len; + + if (lock_len == mtd->size) { + val = mask; + } else { + min_prot_len = spi_nor_get_min_prot_length_sr(nor); + pow = ilog2(lock_len) - ilog2(min_prot_len) + 1; + val = pow << SR_BP_SHIFT; + + if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3) + val = (val & ~SR_BP3) | SR_BP3_BIT6; + + if (val & ~mask) + return -EINVAL; + + /* Don't "lock" with no region! */ + if (!(val & mask)) + return -EINVAL; + } + + status_new = (status_old & ~mask & ~tb_mask) | val; + + /* Disallow further writes if WP pin is asserted */ + status_new |= SR_SRWD; + + if (!use_top) + status_new |= tb_mask; + + /* Don't bother if they're the same */ + if (status_new == status_old) + return 0; + + /* Only modify protection if it will not unlock other areas */ + if ((status_new & mask) < (status_old & mask)) + return -EINVAL; + + return spi_nor_write_sr_and_check(nor, status_new); +} + +/* + * Unlock a region of the flash. See spi_nor_sr_lock() for more info + * + * Returns negative on errors, 0 on success. + */ +static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) +{ + struct mtd_info *mtd = &nor->mtd; + u64 min_prot_len; + int ret, status_old, status_new; + u8 mask = spi_nor_get_sr_bp_mask(nor); + u8 tb_mask = spi_nor_get_sr_tb_mask(nor); + u8 pow, val; + loff_t lock_len; + bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB; + bool use_top; + + ret = spi_nor_read_sr(nor, nor->bouncebuf); + if (ret) + return ret; + + status_old = nor->bouncebuf[0]; + + /* If nothing in our range is locked, we don't need to do anything */ + if (spi_nor_is_unlocked_sr(nor, ofs, len, status_old)) + return 0; + + /* If anything below us is locked, we can't use 'top' protection */ + if (!spi_nor_is_unlocked_sr(nor, 0, ofs, status_old)) + can_be_top = false; + + /* If anything above us is locked, we can't use 'bottom' protection */ + if (!spi_nor_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len), + status_old)) + can_be_bottom = false; + + if (!can_be_bottom && !can_be_top) + return -EINVAL; + + /* Prefer top, if both are valid */ + use_top = can_be_top; + + /* lock_len: length of region that should remain locked */ + if (use_top) + lock_len = mtd->size - (ofs + len); + else + lock_len = ofs; + + if (lock_len == 0) { + val = 0; /* fully unlocked */ + } else { + min_prot_len = spi_nor_get_min_prot_length_sr(nor); + pow = ilog2(lock_len) - ilog2(min_prot_len) + 1; + val = pow << SR_BP_SHIFT; + + if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3) + val = (val & ~SR_BP3) | SR_BP3_BIT6; + + /* Some power-of-two sizes are not supported */ + if (val & ~mask) + return -EINVAL; + } + + status_new = (status_old & ~mask & ~tb_mask) | val; + + /* Don't protect status register if we're fully unlocked */ + if (lock_len == 0) + status_new &= ~SR_SRWD; + + if (!use_top) + status_new |= tb_mask; + + /* Don't bother if they're the same */ + if (status_new == status_old) + return 0; + + /* Only modify protection if it will not lock other areas */ + if ((status_new & mask) > (status_old & mask)) + return -EINVAL; + + return spi_nor_write_sr_and_check(nor, status_new); +} + +/* + * Check if a region of the flash is (completely) locked. See spi_nor_sr_lock() + * for more info. + * + * Returns 1 if entire region is locked, 0 if any portion is unlocked, and + * negative on errors. + */ +static int spi_nor_sr_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len) +{ + int ret; + + ret = spi_nor_read_sr(nor, nor->bouncebuf); + if (ret) + return ret; + + return spi_nor_is_locked_sr(nor, ofs, len, nor->bouncebuf[0]); +} + +static const struct spi_nor_locking_ops spi_nor_sr_locking_ops = { + .lock = spi_nor_sr_lock, + .unlock = spi_nor_sr_unlock, + .is_locked = spi_nor_sr_is_locked, +}; + +static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + int ret; + + ret = spi_nor_lock_and_prep(nor); + if (ret) + return ret; + + ret = nor->params->locking_ops->lock(nor, ofs, len); + + spi_nor_unlock_and_unprep(nor); + return ret; +} + +static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + int ret; + + ret = spi_nor_lock_and_prep(nor); + if (ret) + return ret; + + ret = nor->params->locking_ops->unlock(nor, ofs, len); + + spi_nor_unlock_and_unprep(nor); + return ret; +} + +static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + int ret; + + ret = spi_nor_lock_and_prep(nor); + if (ret) + return ret; + + ret = nor->params->locking_ops->is_locked(nor, ofs, len); + + spi_nor_unlock_and_unprep(nor); + return ret; +} + +/** + * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status + * Register 1. + * @nor: pointer to a 'struct spi_nor' + * + * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor) +{ + int ret; + + ret = spi_nor_read_sr(nor, nor->bouncebuf); + if (ret) + return ret; + + if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6) + return 0; + + nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6; + + return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]); +} + +/** + * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status + * Register 2. + * @nor: pointer to a 'struct spi_nor'. + * + * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor) +{ + int ret; + + if (nor->flags & SNOR_F_NO_READ_CR) + return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1); + + ret = spi_nor_read_cr(nor, nor->bouncebuf); + if (ret) + return ret; + + if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1) + return 0; + + nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1; + + return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]); +} + +/** + * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2. + * @nor: pointer to a 'struct spi_nor' + * + * Set the Quad Enable (QE) bit in the Status Register 2. + * + * This is one of the procedures to set the QE bit described in the SFDP + * (JESD216 rev B) specification but no manufacturer using this procedure has + * been identified yet, hence the name of the function. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor) +{ + u8 *sr2 = nor->bouncebuf; + int ret; + u8 sr2_written; + + /* Check current Quad Enable bit value. */ + ret = spi_nor_read_sr2(nor, sr2); + if (ret) + return ret; + if (*sr2 & SR2_QUAD_EN_BIT7) + return 0; + + /* Update the Quad Enable bit. */ + *sr2 |= SR2_QUAD_EN_BIT7; + + ret = spi_nor_write_sr2(nor, sr2); + if (ret) + return ret; + + sr2_written = *sr2; + + /* Read back and check it. */ + ret = spi_nor_read_sr2(nor, sr2); + if (ret) + return ret; + + if (*sr2 != sr2_written) { + dev_dbg(nor->dev, "SR2: Read back test failed\n"); + return -EIO; + } + + return 0; +} + +static const struct spi_nor_manufacturer *manufacturers[] = { + &spi_nor_atmel, + &spi_nor_catalyst, + &spi_nor_eon, + &spi_nor_esmt, + &spi_nor_everspin, + &spi_nor_fujitsu, + &spi_nor_gigadevice, + &spi_nor_intel, + &spi_nor_issi, + &spi_nor_macronix, + &spi_nor_micron, + &spi_nor_st, + &spi_nor_spansion, + &spi_nor_sst, + &spi_nor_winbond, + &spi_nor_xilinx, + &spi_nor_xmc, +}; + +static const struct flash_info * +spi_nor_search_part_by_id(const struct flash_info *parts, unsigned int nparts, + const u8 *id) +{ + unsigned int i; + + for (i = 0; i < nparts; i++) { + if (parts[i].id_len && + !memcmp(parts[i].id, id, parts[i].id_len)) + return &parts[i]; + } + + return NULL; +} + +static const struct flash_info *spi_nor_read_id(struct spi_nor *nor) +{ + const struct flash_info *info; + u8 *id = nor->bouncebuf; + unsigned int i; + int ret; + + if (nor->spimem) { + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_IN(SPI_NOR_MAX_ID_LEN, id, 1)); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id, + SPI_NOR_MAX_ID_LEN); + } + if (ret) { + dev_dbg(nor->dev, "error %d reading JEDEC ID\n", ret); + return ERR_PTR(ret); + } + + for (i = 0; i < ARRAY_SIZE(manufacturers); i++) { + info = spi_nor_search_part_by_id(manufacturers[i]->parts, + manufacturers[i]->nparts, + id); + if (info) { + nor->manufacturer = manufacturers[i]; + return info; + } + } + + dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n", + SPI_NOR_MAX_ID_LEN, id); + return ERR_PTR(-ENODEV); +} + +static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + ssize_t ret; + + dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len); + + ret = spi_nor_lock_and_prep(nor); + if (ret) + return ret; + + while (len) { + loff_t addr = from; + + addr = spi_nor_convert_addr(nor, addr); + + ret = spi_nor_read_data(nor, addr, len, buf); + if (ret == 0) { + /* We shouldn't see 0-length reads */ + ret = -EIO; + goto read_err; + } + if (ret < 0) + goto read_err; + + WARN_ON(ret > len); + *retlen += ret; + buf += ret; + from += ret; + len -= ret; + } + ret = 0; + +read_err: + spi_nor_unlock_and_unprep(nor); + return ret; +} + +/* + * Write an address range to the nor chip. Data must be written in + * FLASH_PAGESIZE chunks. The address range may be any size provided + * it is within the physical boundaries. + */ +static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + size_t page_offset, page_remain, i; + ssize_t ret; + + dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len); + + ret = spi_nor_lock_and_prep(nor); + if (ret) + return ret; + + for (i = 0; i < len; ) { + ssize_t written; + loff_t addr = to + i; + + /* + * If page_size is a power of two, the offset can be quickly + * calculated with an AND operation. On the other cases we + * need to do a modulus operation (more expensive). + * Power of two numbers have only one bit set and we can use + * the instruction hweight32 to detect if we need to do a + * modulus (do_div()) or not. + */ + if (hweight32(nor->page_size) == 1) { + page_offset = addr & (nor->page_size - 1); + } else { + uint64_t aux = addr; + + page_offset = do_div(aux, nor->page_size); + } + /* the size of data remaining on the first page */ + page_remain = min_t(size_t, + nor->page_size - page_offset, len - i); + + addr = spi_nor_convert_addr(nor, addr); + + ret = spi_nor_write_enable(nor); + if (ret) + goto write_err; + + ret = spi_nor_write_data(nor, addr, page_remain, buf + i); + if (ret < 0) + goto write_err; + written = ret; + + ret = spi_nor_wait_till_ready(nor); + if (ret) + goto write_err; + *retlen += written; + i += written; + } + +write_err: + spi_nor_unlock_and_unprep(nor); + return ret; +} + +static int spi_nor_check(struct spi_nor *nor) +{ + if (!nor->dev || + (!nor->spimem && !nor->controller_ops) || + (!nor->spimem && nor->controller_ops && + (!nor->controller_ops->read || + !nor->controller_ops->write || + !nor->controller_ops->read_reg || + !nor->controller_ops->write_reg))) { + pr_err("spi-nor: please fill all the necessary fields!\n"); + return -EINVAL; + } + + if (nor->spimem && nor->controller_ops) { + dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n"); + return -EINVAL; + } + + return 0; +} + +static void +spi_nor_set_read_settings(struct spi_nor_read_command *read, + u8 num_mode_clocks, + u8 num_wait_states, + u8 opcode, + enum spi_nor_protocol proto) +{ + read->num_mode_clocks = num_mode_clocks; + read->num_wait_states = num_wait_states; + read->opcode = opcode; + read->proto = proto; +} + +void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode, + enum spi_nor_protocol proto) +{ + pp->opcode = opcode; + pp->proto = proto; +} + +static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size) +{ + size_t i; + + for (i = 0; i < size; i++) + if (table[i][0] == (int)hwcaps) + return table[i][1]; + + return -EINVAL; +} + +int spi_nor_hwcaps_read2cmd(u32 hwcaps) +{ + static const int hwcaps_read2cmd[][2] = { + { SNOR_HWCAPS_READ, SNOR_CMD_READ }, + { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST }, + { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR }, + { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 }, + { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 }, + { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 }, + { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR }, + { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 }, + { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 }, + { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 }, + { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR }, + { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 }, + { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 }, + { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 }, + { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR }, + }; + + return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd, + ARRAY_SIZE(hwcaps_read2cmd)); +} + +static int spi_nor_hwcaps_pp2cmd(u32 hwcaps) +{ + static const int hwcaps_pp2cmd[][2] = { + { SNOR_HWCAPS_PP, SNOR_CMD_PP }, + { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 }, + { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 }, + { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 }, + { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 }, + { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 }, + { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 }, + }; + + return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd, + ARRAY_SIZE(hwcaps_pp2cmd)); +} + +/** + * spi_nor_spimem_check_op - check if the operation is supported + * by controller + *@nor: pointer to a 'struct spi_nor' + *@op: pointer to op template to be checked + * + * Returns 0 if operation is supported, -ENOTSUPP otherwise. + */ +static int spi_nor_spimem_check_op(struct spi_nor *nor, + struct spi_mem_op *op) +{ + /* + * First test with 4 address bytes. The opcode itself might + * be a 3B addressing opcode but we don't care, because + * SPI controller implementation should not check the opcode, + * but just the sequence. + */ + op->addr.nbytes = 4; + if (!spi_mem_supports_op(nor->spimem, op)) { + if (nor->mtd.size > SZ_16M) + return -ENOTSUPP; + + /* If flash size <= 16MB, 3 address bytes are sufficient */ + op->addr.nbytes = 3; + if (!spi_mem_supports_op(nor->spimem, op)) + return -ENOTSUPP; + } + + return 0; +} + +/** + * spi_nor_spimem_check_readop - check if the read op is supported + * by controller + *@nor: pointer to a 'struct spi_nor' + *@read: pointer to op template to be checked + * + * Returns 0 if operation is supported, -ENOTSUPP otherwise. + */ +static int spi_nor_spimem_check_readop(struct spi_nor *nor, + const struct spi_nor_read_command *read) +{ + struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 1), + SPI_MEM_OP_ADDR(3, 0, 1), + SPI_MEM_OP_DUMMY(0, 1), + SPI_MEM_OP_DATA_IN(0, NULL, 1)); + + op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(read->proto); + op.addr.buswidth = spi_nor_get_protocol_addr_nbits(read->proto); + op.data.buswidth = spi_nor_get_protocol_data_nbits(read->proto); + op.dummy.buswidth = op.addr.buswidth; + op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) * + op.dummy.buswidth / 8; + + return spi_nor_spimem_check_op(nor, &op); +} + +/** + * spi_nor_spimem_check_pp - check if the page program op is supported + * by controller + *@nor: pointer to a 'struct spi_nor' + *@pp: pointer to op template to be checked + * + * Returns 0 if operation is supported, -ENOTSUPP otherwise. + */ +static int spi_nor_spimem_check_pp(struct spi_nor *nor, + const struct spi_nor_pp_command *pp) +{ + struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 1), + SPI_MEM_OP_ADDR(3, 0, 1), + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_OUT(0, NULL, 1)); + + op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(pp->proto); + op.addr.buswidth = spi_nor_get_protocol_addr_nbits(pp->proto); + op.data.buswidth = spi_nor_get_protocol_data_nbits(pp->proto); + + return spi_nor_spimem_check_op(nor, &op); +} + +/** + * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol + * based on SPI controller capabilities + * @nor: pointer to a 'struct spi_nor' + * @hwcaps: pointer to resulting capabilities after adjusting + * according to controller and flash's capability + */ +static void +spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps) +{ + struct spi_nor_flash_parameter *params = nor->params; + unsigned int cap; + + /* DTR modes are not supported yet, mask them all. */ + *hwcaps &= ~SNOR_HWCAPS_DTR; + + /* X-X-X modes are not supported yet, mask them all. */ + *hwcaps &= ~SNOR_HWCAPS_X_X_X; + + for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) { + int rdidx, ppidx; + + if (!(*hwcaps & BIT(cap))) + continue; + + rdidx = spi_nor_hwcaps_read2cmd(BIT(cap)); + if (rdidx >= 0 && + spi_nor_spimem_check_readop(nor, ¶ms->reads[rdidx])) + *hwcaps &= ~BIT(cap); + + ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap)); + if (ppidx < 0) + continue; + + if (spi_nor_spimem_check_pp(nor, + ¶ms->page_programs[ppidx])) + *hwcaps &= ~BIT(cap); + } +} + +/** + * spi_nor_set_erase_type() - set a SPI NOR erase type + * @erase: pointer to a structure that describes a SPI NOR erase type + * @size: the size of the sector/block erased by the erase type + * @opcode: the SPI command op code to erase the sector/block + */ +void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size, + u8 opcode) +{ + erase->size = size; + erase->opcode = opcode; + /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */ + erase->size_shift = ffs(erase->size) - 1; + erase->size_mask = (1 << erase->size_shift) - 1; +} + +/** + * spi_nor_mask_erase_type() - mask out a SPI NOR erase type + * @erase: pointer to a structure that describes a SPI NOR erase type + */ +void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase) +{ + erase->size = 0; +} + +/** + * spi_nor_init_uniform_erase_map() - Initialize uniform erase map + * @map: the erase map of the SPI NOR + * @erase_mask: bitmask encoding erase types that can erase the entire + * flash memory + * @flash_size: the spi nor flash memory size + */ +void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map, + u8 erase_mask, u64 flash_size) +{ + /* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */ + map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) | + SNOR_LAST_REGION; + map->uniform_region.size = flash_size; + map->regions = &map->uniform_region; + map->uniform_erase_type = erase_mask; +} + +int spi_nor_post_bfpt_fixups(struct spi_nor *nor, + const struct sfdp_parameter_header *bfpt_header, + const struct sfdp_bfpt *bfpt, + struct spi_nor_flash_parameter *params) +{ + int ret; + + if (nor->manufacturer && nor->manufacturer->fixups && + nor->manufacturer->fixups->post_bfpt) { + ret = nor->manufacturer->fixups->post_bfpt(nor, bfpt_header, + bfpt, params); + if (ret) + return ret; + } + + if (nor->info->fixups && nor->info->fixups->post_bfpt) + return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt, + params); + + return 0; +} + +static int spi_nor_select_read(struct spi_nor *nor, + u32 shared_hwcaps) +{ + int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1; + const struct spi_nor_read_command *read; + + if (best_match < 0) + return -EINVAL; + + cmd = spi_nor_hwcaps_read2cmd(BIT(best_match)); + if (cmd < 0) + return -EINVAL; + + read = &nor->params->reads[cmd]; + nor->read_opcode = read->opcode; + nor->read_proto = read->proto; + + /* + * In the SPI NOR framework, we don't need to make the difference + * between mode clock cycles and wait state clock cycles. + * Indeed, the value of the mode clock cycles is used by a QSPI + * flash memory to know whether it should enter or leave its 0-4-4 + * (Continuous Read / XIP) mode. + * eXecution In Place is out of the scope of the mtd sub-system. + * Hence we choose to merge both mode and wait state clock cycles + * into the so called dummy clock cycles. + */ + nor->read_dummy = read->num_mode_clocks + read->num_wait_states; + return 0; +} + +static int spi_nor_select_pp(struct spi_nor *nor, + u32 shared_hwcaps) +{ + int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1; + const struct spi_nor_pp_command *pp; + + if (best_match < 0) + return -EINVAL; + + cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match)); + if (cmd < 0) + return -EINVAL; + + pp = &nor->params->page_programs[cmd]; + nor->program_opcode = pp->opcode; + nor->write_proto = pp->proto; + return 0; +} + +/** + * spi_nor_select_uniform_erase() - select optimum uniform erase type + * @map: the erase map of the SPI NOR + * @wanted_size: the erase type size to search for. Contains the value of + * info->sector_size or of the "small sector" size in case + * CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined. + * + * Once the optimum uniform sector erase command is found, disable all the + * other. + * + * Return: pointer to erase type on success, NULL otherwise. + */ +static const struct spi_nor_erase_type * +spi_nor_select_uniform_erase(struct spi_nor_erase_map *map, + const u32 wanted_size) +{ + const struct spi_nor_erase_type *tested_erase, *erase = NULL; + int i; + u8 uniform_erase_type = map->uniform_erase_type; + + for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) { + if (!(uniform_erase_type & BIT(i))) + continue; + + tested_erase = &map->erase_type[i]; + + /* + * If the current erase size is the one, stop here: + * we have found the right uniform Sector Erase command. + */ + if (tested_erase->size == wanted_size) { + erase = tested_erase; + break; + } + + /* + * Otherwise, the current erase size is still a valid canditate. + * Select the biggest valid candidate. + */ + if (!erase && tested_erase->size) + erase = tested_erase; + /* keep iterating to find the wanted_size */ + } + + if (!erase) + return NULL; + + /* Disable all other Sector Erase commands. */ + map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK; + map->uniform_erase_type |= BIT(erase - map->erase_type); + return erase; +} + +static int spi_nor_select_erase(struct spi_nor *nor) +{ + struct spi_nor_erase_map *map = &nor->params->erase_map; + const struct spi_nor_erase_type *erase = NULL; + struct mtd_info *mtd = &nor->mtd; + u32 wanted_size = nor->info->sector_size; + int i; + + /* + * The previous implementation handling Sector Erase commands assumed + * that the SPI flash memory has an uniform layout then used only one + * of the supported erase sizes for all Sector Erase commands. + * So to be backward compatible, the new implementation also tries to + * manage the SPI flash memory as uniform with a single erase sector + * size, when possible. + */ +#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS + /* prefer "small sector" erase if possible */ + wanted_size = 4096u; +#endif + + if (spi_nor_has_uniform_erase(nor)) { + erase = spi_nor_select_uniform_erase(map, wanted_size); + if (!erase) + return -EINVAL; + nor->erase_opcode = erase->opcode; + mtd->erasesize = erase->size; + return 0; + } + + /* + * For non-uniform SPI flash memory, set mtd->erasesize to the + * maximum erase sector size. No need to set nor->erase_opcode. + */ + for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) { + if (map->erase_type[i].size) { + erase = &map->erase_type[i]; + break; + } + } + + if (!erase) + return -EINVAL; + + mtd->erasesize = erase->size; + return 0; +} + +static int spi_nor_default_setup(struct spi_nor *nor, + const struct spi_nor_hwcaps *hwcaps) +{ + struct spi_nor_flash_parameter *params = nor->params; + u32 ignored_mask, shared_mask; + int err; + + /* + * Keep only the hardware capabilities supported by both the SPI + * controller and the SPI flash memory. + */ + shared_mask = hwcaps->mask & params->hwcaps.mask; + + if (nor->spimem) { + /* + * When called from spi_nor_probe(), all caps are set and we + * need to discard some of them based on what the SPI + * controller actually supports (using spi_mem_supports_op()). + */ + spi_nor_spimem_adjust_hwcaps(nor, &shared_mask); + } else { + /* + * SPI n-n-n protocols are not supported when the SPI + * controller directly implements the spi_nor interface. + * Yet another reason to switch to spi-mem. + */ + ignored_mask = SNOR_HWCAPS_X_X_X; + if (shared_mask & ignored_mask) { + dev_dbg(nor->dev, + "SPI n-n-n protocols are not supported.\n"); + shared_mask &= ~ignored_mask; + } + } + + /* Select the (Fast) Read command. */ + err = spi_nor_select_read(nor, shared_mask); + if (err) { + dev_dbg(nor->dev, + "can't select read settings supported by both the SPI controller and memory.\n"); + return err; + } + + /* Select the Page Program command. */ + err = spi_nor_select_pp(nor, shared_mask); + if (err) { + dev_dbg(nor->dev, + "can't select write settings supported by both the SPI controller and memory.\n"); + return err; + } + + /* Select the Sector Erase command. */ + err = spi_nor_select_erase(nor); + if (err) { + dev_dbg(nor->dev, + "can't select erase settings supported by both the SPI controller and memory.\n"); + return err; + } + + return 0; +} + +static int spi_nor_setup(struct spi_nor *nor, + const struct spi_nor_hwcaps *hwcaps) +{ + if (!nor->params->setup) + return 0; + + return nor->params->setup(nor, hwcaps); +} + +/** + * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and + * settings based on MFR register and ->default_init() hook. + * @nor: pointer to a 'struct spi_nor'. + */ +static void spi_nor_manufacturer_init_params(struct spi_nor *nor) +{ + if (nor->manufacturer && nor->manufacturer->fixups && + nor->manufacturer->fixups->default_init) + nor->manufacturer->fixups->default_init(nor); + + if (nor->info->fixups && nor->info->fixups->default_init) + nor->info->fixups->default_init(nor); +} + +/** + * spi_nor_sfdp_init_params() - Initialize the flash's parameters and settings + * based on JESD216 SFDP standard. + * @nor: pointer to a 'struct spi_nor'. + * + * The method has a roll-back mechanism: in case the SFDP parsing fails, the + * legacy flash parameters and settings will be restored. + */ +static void spi_nor_sfdp_init_params(struct spi_nor *nor) +{ + struct spi_nor_flash_parameter sfdp_params; + + memcpy(&sfdp_params, nor->params, sizeof(sfdp_params)); + + if (spi_nor_parse_sfdp(nor, nor->params)) { + memcpy(nor->params, &sfdp_params, sizeof(*nor->params)); + nor->addr_width = 0; + nor->flags &= ~SNOR_F_4B_OPCODES; + } +} + +/** + * spi_nor_info_init_params() - Initialize the flash's parameters and settings + * based on nor->info data. + * @nor: pointer to a 'struct spi_nor'. + */ +static void spi_nor_info_init_params(struct spi_nor *nor) +{ + struct spi_nor_flash_parameter *params = nor->params; + struct spi_nor_erase_map *map = ¶ms->erase_map; + const struct flash_info *info = nor->info; + struct device_node *np = spi_nor_get_flash_node(nor); + u8 i, erase_mask; + + /* Initialize legacy flash parameters and settings. */ + params->quad_enable = spi_nor_sr2_bit1_quad_enable; + params->set_4byte_addr_mode = spansion_set_4byte_addr_mode; + params->setup = spi_nor_default_setup; + /* Default to 16-bit Write Status (01h) Command */ + nor->flags |= SNOR_F_HAS_16BIT_SR; + + /* Set SPI NOR sizes. */ + params->size = (u64)info->sector_size * info->n_sectors; + params->page_size = info->page_size; + + if (!(info->flags & SPI_NOR_NO_FR)) { + /* Default to Fast Read for DT and non-DT platform devices. */ + params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST; + + /* Mask out Fast Read if not requested at DT instantiation. */ + if (np && !of_property_read_bool(np, "m25p,fast-read")) + params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST; + } + + /* (Fast) Read settings. */ + params->hwcaps.mask |= SNOR_HWCAPS_READ; + spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ], + 0, 0, SPINOR_OP_READ, + SNOR_PROTO_1_1_1); + + if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST) + spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_FAST], + 0, 8, SPINOR_OP_READ_FAST, + SNOR_PROTO_1_1_1); + + if (info->flags & SPI_NOR_DUAL_READ) { + params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2; + spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_2], + 0, 8, SPINOR_OP_READ_1_1_2, + SNOR_PROTO_1_1_2); + } + + if (info->flags & SPI_NOR_QUAD_READ) { + params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4; + spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_4], + 0, 8, SPINOR_OP_READ_1_1_4, + SNOR_PROTO_1_1_4); + } + + if (info->flags & SPI_NOR_OCTAL_READ) { + params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8; + spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_8], + 0, 8, SPINOR_OP_READ_1_1_8, + SNOR_PROTO_1_1_8); + } + + /* Page Program settings. */ + params->hwcaps.mask |= SNOR_HWCAPS_PP; + spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP], + SPINOR_OP_PP, SNOR_PROTO_1_1_1); + + /* + * Sector Erase settings. Sort Erase Types in ascending order, with the + * smallest erase size starting at BIT(0). + */ + erase_mask = 0; + i = 0; + if (info->flags & SECT_4K_PMC) { + erase_mask |= BIT(i); + spi_nor_set_erase_type(&map->erase_type[i], 4096u, + SPINOR_OP_BE_4K_PMC); + i++; + } else if (info->flags & SECT_4K) { + erase_mask |= BIT(i); + spi_nor_set_erase_type(&map->erase_type[i], 4096u, + SPINOR_OP_BE_4K); + i++; + } + erase_mask |= BIT(i); + spi_nor_set_erase_type(&map->erase_type[i], info->sector_size, + SPINOR_OP_SE); + spi_nor_init_uniform_erase_map(map, erase_mask, params->size); +} + +/** + * spi_nor_post_sfdp_fixups() - Updates the flash's parameters and settings + * after SFDP has been parsed (is also called for SPI NORs that do not + * support RDSFDP). + * @nor: pointer to a 'struct spi_nor' + * + * Typically used to tweak various parameters that could not be extracted by + * other means (i.e. when information provided by the SFDP/flash_info tables + * are incomplete or wrong). + */ +static void spi_nor_post_sfdp_fixups(struct spi_nor *nor) +{ + if (nor->manufacturer && nor->manufacturer->fixups && + nor->manufacturer->fixups->post_sfdp) + nor->manufacturer->fixups->post_sfdp(nor); + + if (nor->info->fixups && nor->info->fixups->post_sfdp) + nor->info->fixups->post_sfdp(nor); +} + +/** + * spi_nor_late_init_params() - Late initialization of default flash parameters. + * @nor: pointer to a 'struct spi_nor' + * + * Used to set default flash parameters and settings when the ->default_init() + * hook or the SFDP parser let voids. + */ +static void spi_nor_late_init_params(struct spi_nor *nor) +{ + /* + * NOR protection support. When locking_ops are not provided, we pick + * the default ones. + */ + if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops) + nor->params->locking_ops = &spi_nor_sr_locking_ops; +} + +/** + * spi_nor_init_params() - Initialize the flash's parameters and settings. + * @nor: pointer to a 'struct spi_nor'. + * + * The flash parameters and settings are initialized based on a sequence of + * calls that are ordered by priority: + * + * 1/ Default flash parameters initialization. The initializations are done + * based on nor->info data: + * spi_nor_info_init_params() + * + * which can be overwritten by: + * 2/ Manufacturer flash parameters initialization. The initializations are + * done based on MFR register, or when the decisions can not be done solely + * based on MFR, by using specific flash_info tweeks, ->default_init(): + * spi_nor_manufacturer_init_params() + * + * which can be overwritten by: + * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and + * should be more accurate that the above. + * spi_nor_sfdp_init_params() + * + * Please note that there is a ->post_bfpt() fixup hook that can overwrite + * the flash parameters and settings immediately after parsing the Basic + * Flash Parameter Table. + * + * which can be overwritten by: + * 4/ Post SFDP flash parameters initialization. Used to tweak various + * parameters that could not be extracted by other means (i.e. when + * information provided by the SFDP/flash_info tables are incomplete or + * wrong). + * spi_nor_post_sfdp_fixups() + * + * 5/ Late default flash parameters initialization, used when the + * ->default_init() hook or the SFDP parser do not set specific params. + * spi_nor_late_init_params() + */ +static int spi_nor_init_params(struct spi_nor *nor) +{ + nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL); + if (!nor->params) + return -ENOMEM; + + spi_nor_info_init_params(nor); + + spi_nor_manufacturer_init_params(nor); + + if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) && + !(nor->info->flags & SPI_NOR_SKIP_SFDP)) + spi_nor_sfdp_init_params(nor); + + spi_nor_post_sfdp_fixups(nor); + + spi_nor_late_init_params(nor); + + return 0; +} + +/** + * spi_nor_quad_enable() - enable Quad I/O if needed. + * @nor: pointer to a 'struct spi_nor' + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_quad_enable(struct spi_nor *nor) +{ + if (!nor->params->quad_enable) + return 0; + + if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 || + spi_nor_get_protocol_width(nor->write_proto) == 4)) + return 0; + + return nor->params->quad_enable(nor); +} + +/** + * spi_nor_try_unlock_all() - Tries to unlock the entire flash memory array. + * @nor: pointer to a 'struct spi_nor'. + * + * Some SPI NOR flashes are write protected by default after a power-on reset + * cycle, in order to avoid inadvertent writes during power-up. Backward + * compatibility imposes to unlock the entire flash memory array at power-up + * by default. + * + * Unprotecting the entire flash array will fail for boards which are hardware + * write-protected. Thus any errors are ignored. + */ +static void spi_nor_try_unlock_all(struct spi_nor *nor) +{ + int ret; + + if (!(nor->flags & SNOR_F_HAS_LOCK)) + return; + + ret = spi_nor_unlock(&nor->mtd, 0, nor->params->size); + if (ret) + dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n"); +} + +static int spi_nor_init(struct spi_nor *nor) +{ + int err; + + err = spi_nor_quad_enable(nor); + if (err) { + dev_dbg(nor->dev, "quad mode not supported\n"); + return err; + } + + spi_nor_try_unlock_all(nor); + + if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES)) { + /* + * If the RESET# pin isn't hooked up properly, or the system + * otherwise doesn't perform a reset command in the boot + * sequence, it's impossible to 100% protect against unexpected + * reboots (e.g., crashes). Warn the user (or hopefully, system + * designer) that this is bad. + */ + WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET, + "enabling reset hack; may not recover from unexpected reboots\n"); + nor->params->set_4byte_addr_mode(nor, true); + } + + return 0; +} + +/* mtd resume handler */ +static void spi_nor_resume(struct mtd_info *mtd) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + struct device *dev = nor->dev; + int ret; + + /* re-initialize the nor chip */ + ret = spi_nor_init(nor); + if (ret) + dev_err(dev, "resume() failed\n"); +} + +static int spi_nor_get_device(struct mtd_info *mtd) +{ + struct mtd_info *master = mtd_get_master(mtd); + struct spi_nor *nor = mtd_to_spi_nor(master); + struct device *dev; + + if (nor->spimem) + dev = nor->spimem->spi->controller->dev.parent; + else + dev = nor->dev; + + if (!try_module_get(dev->driver->owner)) + return -ENODEV; + + return 0; +} + +static void spi_nor_put_device(struct mtd_info *mtd) +{ + struct mtd_info *master = mtd_get_master(mtd); + struct spi_nor *nor = mtd_to_spi_nor(master); + struct device *dev; + + if (nor->spimem) + dev = nor->spimem->spi->controller->dev.parent; + else + dev = nor->dev; + + module_put(dev->driver->owner); +} + +void spi_nor_restore(struct spi_nor *nor) +{ + /* restore the addressing mode */ + if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) && + nor->flags & SNOR_F_BROKEN_RESET) + nor->params->set_4byte_addr_mode(nor, false); +} +EXPORT_SYMBOL_GPL(spi_nor_restore); + +static const struct flash_info *spi_nor_match_id(struct spi_nor *nor, + const char *name) +{ + unsigned int i, j; + + for (i = 0; i < ARRAY_SIZE(manufacturers); i++) { + for (j = 0; j < manufacturers[i]->nparts; j++) { + if (!strcmp(name, manufacturers[i]->parts[j].name)) { + nor->manufacturer = manufacturers[i]; + return &manufacturers[i]->parts[j]; + } + } + } + + return NULL; +} + +static int spi_nor_set_addr_width(struct spi_nor *nor) +{ + if (nor->addr_width) { + /* already configured from SFDP */ + } else if (nor->info->addr_width) { + nor->addr_width = nor->info->addr_width; + } else { + nor->addr_width = 3; + } + + if (nor->addr_width == 3 && nor->mtd.size > 0x1000000) { + /* enable 4-byte addressing if the device exceeds 16MiB */ + nor->addr_width = 4; + } + + if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) { + dev_dbg(nor->dev, "address width is too large: %u\n", + nor->addr_width); + return -EINVAL; + } + + /* Set 4byte opcodes when possible. */ + if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES && + !(nor->flags & SNOR_F_HAS_4BAIT)) + spi_nor_set_4byte_opcodes(nor); + + return 0; +} + +static void spi_nor_debugfs_init(struct spi_nor *nor, + const struct flash_info *info) +{ + struct mtd_info *mtd = &nor->mtd; + + mtd->dbg.partname = info->name; + mtd->dbg.partid = devm_kasprintf(nor->dev, GFP_KERNEL, "spi-nor:%*phN", + info->id_len, info->id); +} + +static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor, + const char *name) +{ + const struct flash_info *info = NULL; + + if (name) + info = spi_nor_match_id(nor, name); + /* Try to auto-detect if chip name wasn't specified or not found */ + if (!info) + info = spi_nor_read_id(nor); + if (IS_ERR_OR_NULL(info)) + return ERR_PTR(-ENOENT); + + /* + * If caller has specified name of flash model that can normally be + * detected using JEDEC, let's verify it. + */ + if (name && info->id_len) { + const struct flash_info *jinfo; + + jinfo = spi_nor_read_id(nor); + if (IS_ERR(jinfo)) { + return jinfo; + } else if (jinfo != info) { + /* + * JEDEC knows better, so overwrite platform ID. We + * can't trust partitions any longer, but we'll let + * mtd apply them anyway, since some partitions may be + * marked read-only, and we don't want to lose that + * information, even if it's not 100% accurate. + */ + dev_warn(nor->dev, "found %s, expected %s\n", + jinfo->name, info->name); + info = jinfo; + } + } + + return info; +} + +int spi_nor_scan(struct spi_nor *nor, const char *name, + const struct spi_nor_hwcaps *hwcaps) +{ + const struct flash_info *info; + struct device *dev = nor->dev; + struct mtd_info *mtd = &nor->mtd; + struct device_node *np = spi_nor_get_flash_node(nor); + int ret; + int i; + + ret = spi_nor_check(nor); + if (ret) + return ret; + + /* Reset SPI protocol for all commands. */ + nor->reg_proto = SNOR_PROTO_1_1_1; + nor->read_proto = SNOR_PROTO_1_1_1; + nor->write_proto = SNOR_PROTO_1_1_1; + + /* + * We need the bounce buffer early to read/write registers when going + * through the spi-mem layer (buffers have to be DMA-able). + * For spi-mem drivers, we'll reallocate a new buffer if + * nor->page_size turns out to be greater than PAGE_SIZE (which + * shouldn't happen before long since NOR pages are usually less + * than 1KB) after spi_nor_scan() returns. + */ + nor->bouncebuf_size = PAGE_SIZE; + nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size, + GFP_KERNEL); + if (!nor->bouncebuf) + return -ENOMEM; + + info = spi_nor_get_flash_info(nor, name); + if (IS_ERR(info)) + return PTR_ERR(info); + + nor->info = info; + + spi_nor_debugfs_init(nor, info); + + mutex_init(&nor->lock); + + /* + * Make sure the XSR_RDY flag is set before calling + * spi_nor_wait_till_ready(). Xilinx S3AN share MFR + * with Atmel SPI NOR. + */ + if (info->flags & SPI_NOR_XSR_RDY) + nor->flags |= SNOR_F_READY_XSR_RDY; + + if (info->flags & SPI_NOR_HAS_LOCK) + nor->flags |= SNOR_F_HAS_LOCK; + + mtd->_write = spi_nor_write; + + /* Init flash parameters based on flash_info struct and SFDP */ + ret = spi_nor_init_params(nor); + if (ret) + return ret; + + if (!mtd->name) + mtd->name = dev_name(dev); + mtd->priv = nor; + mtd->type = MTD_NORFLASH; + mtd->writesize = 1; + mtd->flags = MTD_CAP_NORFLASH; + mtd->size = nor->params->size; + mtd->_erase = spi_nor_erase; + mtd->_read = spi_nor_read; + mtd->_resume = spi_nor_resume; + mtd->_get_device = spi_nor_get_device; + mtd->_put_device = spi_nor_put_device; + + if (nor->params->locking_ops) { + mtd->_lock = spi_nor_lock; + mtd->_unlock = spi_nor_unlock; + mtd->_is_locked = spi_nor_is_locked; + } + + if (info->flags & USE_FSR) + nor->flags |= SNOR_F_USE_FSR; + if (info->flags & SPI_NOR_HAS_TB) { + nor->flags |= SNOR_F_HAS_SR_TB; + if (info->flags & SPI_NOR_TB_SR_BIT6) + nor->flags |= SNOR_F_HAS_SR_TB_BIT6; + } + + if (info->flags & NO_CHIP_ERASE) + nor->flags |= SNOR_F_NO_OP_CHIP_ERASE; + if (info->flags & USE_CLSR) + nor->flags |= SNOR_F_USE_CLSR; + + if (info->flags & SPI_NOR_4BIT_BP) { + nor->flags |= SNOR_F_HAS_4BIT_BP; + if (info->flags & SPI_NOR_BP3_SR_BIT6) + nor->flags |= SNOR_F_HAS_SR_BP3_BIT6; + } + + if (info->flags & SPI_NOR_NO_ERASE) + mtd->flags |= MTD_NO_ERASE; + + mtd->dev.parent = dev; + nor->page_size = nor->params->page_size; + mtd->writebufsize = nor->page_size; + + if (of_property_read_bool(np, "broken-flash-reset")) + nor->flags |= SNOR_F_BROKEN_RESET; + + /* + * Configure the SPI memory: + * - select op codes for (Fast) Read, Page Program and Sector Erase. + * - set the number of dummy cycles (mode cycles + wait states). + * - set the SPI protocols for register and memory accesses. + */ + ret = spi_nor_setup(nor, hwcaps); + if (ret) + return ret; + + if (info->flags & SPI_NOR_4B_OPCODES) + nor->flags |= SNOR_F_4B_OPCODES; + + ret = spi_nor_set_addr_width(nor); + if (ret) + return ret; + + /* Send all the required SPI flash commands to initialize device */ + ret = spi_nor_init(nor); + if (ret) + return ret; + + dev_info(dev, "%s (%lld Kbytes)\n", info->name, + (long long)mtd->size >> 10); + + dev_dbg(dev, + "mtd .name = %s, .size = 0x%llx (%lldMiB), " + ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", + mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20), + mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions); + + if (mtd->numeraseregions) + for (i = 0; i < mtd->numeraseregions; i++) + dev_dbg(dev, + "mtd.eraseregions[%d] = { .offset = 0x%llx, " + ".erasesize = 0x%.8x (%uKiB), " + ".numblocks = %d }\n", + i, (long long)mtd->eraseregions[i].offset, + mtd->eraseregions[i].erasesize, + mtd->eraseregions[i].erasesize / 1024, + mtd->eraseregions[i].numblocks); + return 0; +} +EXPORT_SYMBOL_GPL(spi_nor_scan); + +static int spi_nor_create_read_dirmap(struct spi_nor *nor) +{ + struct spi_mem_dirmap_info info = { + .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1), + SPI_MEM_OP_ADDR(nor->addr_width, 0, 1), + SPI_MEM_OP_DUMMY(nor->read_dummy, 1), + SPI_MEM_OP_DATA_IN(0, NULL, 1)), + .offset = 0, + .length = nor->mtd.size, + }; + struct spi_mem_op *op = &info.op_tmpl; + + /* get transfer protocols. */ + op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto); + op->addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto); + op->dummy.buswidth = op->addr.buswidth; + op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto); + + /* convert the dummy cycles to the number of bytes */ + op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8; + + nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem, + &info); + return PTR_ERR_OR_ZERO(nor->dirmap.rdesc); +} + +static int spi_nor_create_write_dirmap(struct spi_nor *nor) +{ + struct spi_mem_dirmap_info info = { + .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1), + SPI_MEM_OP_ADDR(nor->addr_width, 0, 1), + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_OUT(0, NULL, 1)), + .offset = 0, + .length = nor->mtd.size, + }; + struct spi_mem_op *op = &info.op_tmpl; + + /* get transfer protocols. */ + op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto); + op->addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto); + op->dummy.buswidth = op->addr.buswidth; + op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto); + + if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second) + op->addr.nbytes = 0; + + nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem, + &info); + return PTR_ERR_OR_ZERO(nor->dirmap.wdesc); +} + +static int spi_nor_probe(struct spi_mem *spimem) +{ + struct spi_device *spi = spimem->spi; + struct flash_platform_data *data = dev_get_platdata(&spi->dev); + struct spi_nor *nor; + /* + * Enable all caps by default. The core will mask them after + * checking what's really supported using spi_mem_supports_op(). + */ + const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL }; + char *flash_name; + int ret; + + nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL); + if (!nor) + return -ENOMEM; + + nor->spimem = spimem; + nor->dev = &spi->dev; + spi_nor_set_flash_node(nor, spi->dev.of_node); + + spi_mem_set_drvdata(spimem, nor); + + if (data && data->name) + nor->mtd.name = data->name; + + if (!nor->mtd.name) + nor->mtd.name = spi_mem_get_name(spimem); + + /* + * For some (historical?) reason many platforms provide two different + * names in flash_platform_data: "name" and "type". Quite often name is + * set to "m25p80" and then "type" provides a real chip name. + * If that's the case, respect "type" and ignore a "name". + */ + if (data && data->type) + flash_name = data->type; + else if (!strcmp(spi->modalias, "spi-nor")) + flash_name = NULL; /* auto-detect */ + else + flash_name = spi->modalias; + + ret = spi_nor_scan(nor, flash_name, &hwcaps); + if (ret) + return ret; + + /* + * None of the existing parts have > 512B pages, but let's play safe + * and add this logic so that if anyone ever adds support for such + * a NOR we don't end up with buffer overflows. + */ + if (nor->page_size > PAGE_SIZE) { + nor->bouncebuf_size = nor->page_size; + devm_kfree(nor->dev, nor->bouncebuf); + nor->bouncebuf = devm_kmalloc(nor->dev, + nor->bouncebuf_size, + GFP_KERNEL); + if (!nor->bouncebuf) + return -ENOMEM; + } + + ret = spi_nor_create_read_dirmap(nor); + if (ret) + return ret; + + ret = spi_nor_create_write_dirmap(nor); + if (ret) + return ret; + + return mtd_device_register(&nor->mtd, data ? data->parts : NULL, + data ? data->nr_parts : 0); +} + +static int spi_nor_remove(struct spi_mem *spimem) +{ + struct spi_nor *nor = spi_mem_get_drvdata(spimem); + + spi_nor_restore(nor); + + /* Clean up MTD stuff. */ + return mtd_device_unregister(&nor->mtd); +} + +static void spi_nor_shutdown(struct spi_mem *spimem) +{ + struct spi_nor *nor = spi_mem_get_drvdata(spimem); + + spi_nor_restore(nor); +} + +/* + * Do NOT add to this array without reading the following: + * + * Historically, many flash devices are bound to this driver by their name. But + * since most of these flash are compatible to some extent, and their + * differences can often be differentiated by the JEDEC read-ID command, we + * encourage new users to add support to the spi-nor library, and simply bind + * against a generic string here (e.g., "jedec,spi-nor"). + * + * Many flash names are kept here in this list (as well as in spi-nor.c) to + * keep them available as module aliases for existing platforms. + */ +static const struct spi_device_id spi_nor_dev_ids[] = { + /* + * Allow non-DT platform devices to bind to the "spi-nor" modalias, and + * hack around the fact that the SPI core does not provide uevent + * matching for .of_match_table + */ + {"spi-nor"}, + + /* + * Entries not used in DTs that should be safe to drop after replacing + * them with "spi-nor" in platform data. + */ + {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"}, + + /* + * Entries that were used in DTs without "jedec,spi-nor" fallback and + * should be kept for backward compatibility. + */ + {"at25df321a"}, {"at25df641"}, {"at26df081a"}, + {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"}, + {"mx25l25635e"},{"mx66l51235l"}, + {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"}, + {"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"}, + {"s25fl064k"}, + {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"}, + {"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"}, + {"m25p64"}, {"m25p128"}, + {"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"}, + {"w25q80bl"}, {"w25q128"}, {"w25q256"}, + + /* Flashes that can't be detected using JEDEC */ + {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"}, + {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"}, + {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"}, + + /* Everspin MRAMs (non-JEDEC) */ + { "mr25h128" }, /* 128 Kib, 40 MHz */ + { "mr25h256" }, /* 256 Kib, 40 MHz */ + { "mr25h10" }, /* 1 Mib, 40 MHz */ + { "mr25h40" }, /* 4 Mib, 40 MHz */ + + { }, +}; +MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids); + +static const struct of_device_id spi_nor_of_table[] = { + /* + * Generic compatibility for SPI NOR that can be identified by the + * JEDEC READ ID opcode (0x9F). Use this, if possible. + */ + { .compatible = "jedec,spi-nor" }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, spi_nor_of_table); + +/* + * REVISIT: many of these chips have deep power-down modes, which + * should clearly be entered on suspend() to minimize power use. + * And also when they're otherwise idle... + */ +static struct spi_mem_driver spi_nor_driver = { + .spidrv = { + .driver = { + .name = "spi-nor", + .of_match_table = spi_nor_of_table, + }, + .id_table = spi_nor_dev_ids, + }, + .probe = spi_nor_probe, + .remove = spi_nor_remove, + .shutdown = spi_nor_shutdown, +}; +module_spi_mem_driver(spi_nor_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>"); +MODULE_AUTHOR("Mike Lavender"); +MODULE_DESCRIPTION("framework for SPI NOR"); diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h new file mode 100644 index 000000000..788775bb6 --- /dev/null +++ b/drivers/mtd/spi-nor/core.h @@ -0,0 +1,443 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#ifndef __LINUX_MTD_SPI_NOR_INTERNAL_H +#define __LINUX_MTD_SPI_NOR_INTERNAL_H + +#include "sfdp.h" + +#define SPI_NOR_MAX_ID_LEN 6 + +enum spi_nor_option_flags { + SNOR_F_USE_FSR = BIT(0), + SNOR_F_HAS_SR_TB = BIT(1), + SNOR_F_NO_OP_CHIP_ERASE = BIT(2), + SNOR_F_READY_XSR_RDY = BIT(3), + SNOR_F_USE_CLSR = BIT(4), + SNOR_F_BROKEN_RESET = BIT(5), + SNOR_F_4B_OPCODES = BIT(6), + SNOR_F_HAS_4BAIT = BIT(7), + SNOR_F_HAS_LOCK = BIT(8), + SNOR_F_HAS_16BIT_SR = BIT(9), + SNOR_F_NO_READ_CR = BIT(10), + SNOR_F_HAS_SR_TB_BIT6 = BIT(11), + SNOR_F_HAS_4BIT_BP = BIT(12), + SNOR_F_HAS_SR_BP3_BIT6 = BIT(13), +}; + +struct spi_nor_read_command { + u8 num_mode_clocks; + u8 num_wait_states; + u8 opcode; + enum spi_nor_protocol proto; +}; + +struct spi_nor_pp_command { + u8 opcode; + enum spi_nor_protocol proto; +}; + +enum spi_nor_read_command_index { + SNOR_CMD_READ, + SNOR_CMD_READ_FAST, + SNOR_CMD_READ_1_1_1_DTR, + + /* Dual SPI */ + SNOR_CMD_READ_1_1_2, + SNOR_CMD_READ_1_2_2, + SNOR_CMD_READ_2_2_2, + SNOR_CMD_READ_1_2_2_DTR, + + /* Quad SPI */ + SNOR_CMD_READ_1_1_4, + SNOR_CMD_READ_1_4_4, + SNOR_CMD_READ_4_4_4, + SNOR_CMD_READ_1_4_4_DTR, + + /* Octal SPI */ + SNOR_CMD_READ_1_1_8, + SNOR_CMD_READ_1_8_8, + SNOR_CMD_READ_8_8_8, + SNOR_CMD_READ_1_8_8_DTR, + + SNOR_CMD_READ_MAX +}; + +enum spi_nor_pp_command_index { + SNOR_CMD_PP, + + /* Quad SPI */ + SNOR_CMD_PP_1_1_4, + SNOR_CMD_PP_1_4_4, + SNOR_CMD_PP_4_4_4, + + /* Octal SPI */ + SNOR_CMD_PP_1_1_8, + SNOR_CMD_PP_1_8_8, + SNOR_CMD_PP_8_8_8, + + SNOR_CMD_PP_MAX +}; + +/** + * struct spi_nor_erase_type - Structure to describe a SPI NOR erase type + * @size: the size of the sector/block erased by the erase type. + * JEDEC JESD216B imposes erase sizes to be a power of 2. + * @size_shift: @size is a power of 2, the shift is stored in + * @size_shift. + * @size_mask: the size mask based on @size_shift. + * @opcode: the SPI command op code to erase the sector/block. + * @idx: Erase Type index as sorted in the Basic Flash Parameter + * Table. It will be used to synchronize the supported + * Erase Types with the ones identified in the SFDP + * optional tables. + */ +struct spi_nor_erase_type { + u32 size; + u32 size_shift; + u32 size_mask; + u8 opcode; + u8 idx; +}; + +/** + * struct spi_nor_erase_command - Used for non-uniform erases + * The structure is used to describe a list of erase commands to be executed + * once we validate that the erase can be performed. The elements in the list + * are run-length encoded. + * @list: for inclusion into the list of erase commands. + * @count: how many times the same erase command should be + * consecutively used. + * @size: the size of the sector/block erased by the command. + * @opcode: the SPI command op code to erase the sector/block. + */ +struct spi_nor_erase_command { + struct list_head list; + u32 count; + u32 size; + u8 opcode; +}; + +/** + * struct spi_nor_erase_region - Structure to describe a SPI NOR erase region + * @offset: the offset in the data array of erase region start. + * LSB bits are used as a bitmask encoding flags to + * determine if this region is overlaid, if this region is + * the last in the SPI NOR flash memory and to indicate + * all the supported erase commands inside this region. + * The erase types are sorted in ascending order with the + * smallest Erase Type size being at BIT(0). + * @size: the size of the region in bytes. + */ +struct spi_nor_erase_region { + u64 offset; + u64 size; +}; + +#define SNOR_ERASE_TYPE_MAX 4 +#define SNOR_ERASE_TYPE_MASK GENMASK_ULL(SNOR_ERASE_TYPE_MAX - 1, 0) + +#define SNOR_LAST_REGION BIT(4) +#define SNOR_OVERLAID_REGION BIT(5) + +#define SNOR_ERASE_FLAGS_MAX 6 +#define SNOR_ERASE_FLAGS_MASK GENMASK_ULL(SNOR_ERASE_FLAGS_MAX - 1, 0) + +/** + * struct spi_nor_erase_map - Structure to describe the SPI NOR erase map + * @regions: array of erase regions. The regions are consecutive in + * address space. Walking through the regions is done + * incrementally. + * @uniform_region: a pre-allocated erase region for SPI NOR with a uniform + * sector size (legacy implementation). + * @erase_type: an array of erase types shared by all the regions. + * The erase types are sorted in ascending order, with the + * smallest Erase Type size being the first member in the + * erase_type array. + * @uniform_erase_type: bitmask encoding erase types that can erase the + * entire memory. This member is completed at init by + * uniform and non-uniform SPI NOR flash memories if they + * support at least one erase type that can erase the + * entire memory. + */ +struct spi_nor_erase_map { + struct spi_nor_erase_region *regions; + struct spi_nor_erase_region uniform_region; + struct spi_nor_erase_type erase_type[SNOR_ERASE_TYPE_MAX]; + u8 uniform_erase_type; +}; + +/** + * struct spi_nor_locking_ops - SPI NOR locking methods + * @lock: lock a region of the SPI NOR. + * @unlock: unlock a region of the SPI NOR. + * @is_locked: check if a region of the SPI NOR is completely locked + */ +struct spi_nor_locking_ops { + int (*lock)(struct spi_nor *nor, loff_t ofs, uint64_t len); + int (*unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len); + int (*is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len); +}; + +/** + * struct spi_nor_flash_parameter - SPI NOR flash parameters and settings. + * Includes legacy flash parameters and settings that can be overwritten + * by the spi_nor_fixups hooks, or dynamically when parsing the JESD216 + * Serial Flash Discoverable Parameters (SFDP) tables. + * + * @size: the flash memory density in bytes. + * @page_size: the page size of the SPI NOR flash memory. + * @hwcaps: describes the read and page program hardware + * capabilities. + * @reads: read capabilities ordered by priority: the higher index + * in the array, the higher priority. + * @page_programs: page program capabilities ordered by priority: the + * higher index in the array, the higher priority. + * @erase_map: the erase map parsed from the SFDP Sector Map Parameter + * Table. + * @quad_enable: enables SPI NOR quad mode. + * @set_4byte_addr_mode: puts the SPI NOR in 4 byte addressing mode. + * @convert_addr: converts an absolute address into something the flash + * will understand. Particularly useful when pagesize is + * not a power-of-2. + * @setup: configures the SPI NOR memory. Useful for SPI NOR + * flashes that have peculiarities to the SPI NOR standard + * e.g. different opcodes, specific address calculation, + * page size, etc. + * @locking_ops: SPI NOR locking methods. + */ +struct spi_nor_flash_parameter { + u64 size; + u32 page_size; + + struct spi_nor_hwcaps hwcaps; + struct spi_nor_read_command reads[SNOR_CMD_READ_MAX]; + struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX]; + + struct spi_nor_erase_map erase_map; + + int (*quad_enable)(struct spi_nor *nor); + int (*set_4byte_addr_mode)(struct spi_nor *nor, bool enable); + u32 (*convert_addr)(struct spi_nor *nor, u32 addr); + int (*setup)(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps); + + const struct spi_nor_locking_ops *locking_ops; +}; + +/** + * struct spi_nor_fixups - SPI NOR fixup hooks + * @default_init: called after default flash parameters init. Used to tweak + * flash parameters when information provided by the flash_info + * table is incomplete or wrong. + * @post_bfpt: called after the BFPT table has been parsed + * @post_sfdp: called after SFDP has been parsed (is also called for SPI NORs + * that do not support RDSFDP). Typically used to tweak various + * parameters that could not be extracted by other means (i.e. + * when information provided by the SFDP/flash_info tables are + * incomplete or wrong). + * + * Those hooks can be used to tweak the SPI NOR configuration when the SFDP + * table is broken or not available. + */ +struct spi_nor_fixups { + void (*default_init)(struct spi_nor *nor); + int (*post_bfpt)(struct spi_nor *nor, + const struct sfdp_parameter_header *bfpt_header, + const struct sfdp_bfpt *bfpt, + struct spi_nor_flash_parameter *params); + void (*post_sfdp)(struct spi_nor *nor); +}; + +struct flash_info { + char *name; + + /* + * This array stores the ID bytes. + * The first three bytes are the JEDIC ID. + * JEDEC ID zero means "no ID" (mostly older chips). + */ + u8 id[SPI_NOR_MAX_ID_LEN]; + u8 id_len; + + /* The size listed here is what works with SPINOR_OP_SE, which isn't + * necessarily called a "sector" by the vendor. + */ + unsigned sector_size; + u16 n_sectors; + + u16 page_size; + u16 addr_width; + + u32 flags; +#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */ +#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */ +#define SST_WRITE BIT(2) /* use SST byte programming */ +#define SPI_NOR_NO_FR BIT(3) /* Can't do fastread */ +#define SECT_4K_PMC BIT(4) /* SPINOR_OP_BE_4K_PMC works uniformly */ +#define SPI_NOR_DUAL_READ BIT(5) /* Flash supports Dual Read */ +#define SPI_NOR_QUAD_READ BIT(6) /* Flash supports Quad Read */ +#define USE_FSR BIT(7) /* use flag status register */ +#define SPI_NOR_HAS_LOCK BIT(8) /* Flash supports lock/unlock via SR */ +#define SPI_NOR_HAS_TB BIT(9) /* + * Flash SR has Top/Bottom (TB) protect + * bit. Must be used with + * SPI_NOR_HAS_LOCK. + */ +#define SPI_NOR_XSR_RDY BIT(10) /* + * S3AN flashes have specific opcode to + * read the status register. + */ +#define SPI_NOR_4B_OPCODES BIT(11) /* + * Use dedicated 4byte address op codes + * to support memory size above 128Mib. + */ +#define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */ +#define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */ +#define USE_CLSR BIT(14) /* use CLSR command */ +#define SPI_NOR_OCTAL_READ BIT(15) /* Flash supports Octal Read */ +#define SPI_NOR_TB_SR_BIT6 BIT(16) /* + * Top/Bottom (TB) is bit 6 of + * status register. Must be used with + * SPI_NOR_HAS_TB. + */ +#define SPI_NOR_4BIT_BP BIT(17) /* + * Flash SR has 4 bit fields (BP0-3) + * for block protection. + */ +#define SPI_NOR_BP3_SR_BIT6 BIT(18) /* + * BP3 is bit 6 of status register. + * Must be used with SPI_NOR_4BIT_BP. + */ + + /* Part specific fixup hooks. */ + const struct spi_nor_fixups *fixups; +}; + +/* Used when the "_ext_id" is two bytes at most */ +#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \ + .id = { \ + ((_jedec_id) >> 16) & 0xff, \ + ((_jedec_id) >> 8) & 0xff, \ + (_jedec_id) & 0xff, \ + ((_ext_id) >> 8) & 0xff, \ + (_ext_id) & 0xff, \ + }, \ + .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))), \ + .sector_size = (_sector_size), \ + .n_sectors = (_n_sectors), \ + .page_size = 256, \ + .flags = (_flags), + +#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \ + .id = { \ + ((_jedec_id) >> 16) & 0xff, \ + ((_jedec_id) >> 8) & 0xff, \ + (_jedec_id) & 0xff, \ + ((_ext_id) >> 16) & 0xff, \ + ((_ext_id) >> 8) & 0xff, \ + (_ext_id) & 0xff, \ + }, \ + .id_len = 6, \ + .sector_size = (_sector_size), \ + .n_sectors = (_n_sectors), \ + .page_size = 256, \ + .flags = (_flags), + +#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \ + .sector_size = (_sector_size), \ + .n_sectors = (_n_sectors), \ + .page_size = (_page_size), \ + .addr_width = (_addr_width), \ + .flags = (_flags), + +#define S3AN_INFO(_jedec_id, _n_sectors, _page_size) \ + .id = { \ + ((_jedec_id) >> 16) & 0xff, \ + ((_jedec_id) >> 8) & 0xff, \ + (_jedec_id) & 0xff \ + }, \ + .id_len = 3, \ + .sector_size = (8*_page_size), \ + .n_sectors = (_n_sectors), \ + .page_size = _page_size, \ + .addr_width = 3, \ + .flags = SPI_NOR_NO_FR | SPI_NOR_XSR_RDY, + +/** + * struct spi_nor_manufacturer - SPI NOR manufacturer object + * @name: manufacturer name + * @parts: array of parts supported by this manufacturer + * @nparts: number of entries in the parts array + * @fixups: hooks called at various points in time during spi_nor_scan() + */ +struct spi_nor_manufacturer { + const char *name; + const struct flash_info *parts; + unsigned int nparts; + const struct spi_nor_fixups *fixups; +}; + +/* Manufacturer drivers. */ +extern const struct spi_nor_manufacturer spi_nor_atmel; +extern const struct spi_nor_manufacturer spi_nor_catalyst; +extern const struct spi_nor_manufacturer spi_nor_eon; +extern const struct spi_nor_manufacturer spi_nor_esmt; +extern const struct spi_nor_manufacturer spi_nor_everspin; +extern const struct spi_nor_manufacturer spi_nor_fujitsu; +extern const struct spi_nor_manufacturer spi_nor_gigadevice; +extern const struct spi_nor_manufacturer spi_nor_intel; +extern const struct spi_nor_manufacturer spi_nor_issi; +extern const struct spi_nor_manufacturer spi_nor_macronix; +extern const struct spi_nor_manufacturer spi_nor_micron; +extern const struct spi_nor_manufacturer spi_nor_st; +extern const struct spi_nor_manufacturer spi_nor_spansion; +extern const struct spi_nor_manufacturer spi_nor_sst; +extern const struct spi_nor_manufacturer spi_nor_winbond; +extern const struct spi_nor_manufacturer spi_nor_xilinx; +extern const struct spi_nor_manufacturer spi_nor_xmc; + +int spi_nor_write_enable(struct spi_nor *nor); +int spi_nor_write_disable(struct spi_nor *nor); +int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable); +int spi_nor_write_ear(struct spi_nor *nor, u8 ear); +int spi_nor_wait_till_ready(struct spi_nor *nor); +int spi_nor_lock_and_prep(struct spi_nor *nor); +void spi_nor_unlock_and_unprep(struct spi_nor *nor); +int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor); +int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor); +int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor); +int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1); + +int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr); +ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, + u8 *buf); +ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len, + const u8 *buf); + +int spi_nor_hwcaps_read2cmd(u32 hwcaps); +u8 spi_nor_convert_3to4_read(u8 opcode); +void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode, + enum spi_nor_protocol proto); + +void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size, + u8 opcode); +void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase); +struct spi_nor_erase_region * +spi_nor_region_next(struct spi_nor_erase_region *region); +void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map, + u8 erase_mask, u64 flash_size); + +int spi_nor_post_bfpt_fixups(struct spi_nor *nor, + const struct sfdp_parameter_header *bfpt_header, + const struct sfdp_bfpt *bfpt, + struct spi_nor_flash_parameter *params); + +static struct spi_nor __maybe_unused *mtd_to_spi_nor(struct mtd_info *mtd) +{ + return mtd->priv; +} + +#endif /* __LINUX_MTD_SPI_NOR_INTERNAL_H */ diff --git a/drivers/mtd/spi-nor/eon.c b/drivers/mtd/spi-nor/eon.c new file mode 100644 index 000000000..ddb8e3650 --- /dev/null +++ b/drivers/mtd/spi-nor/eon.c @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +static const struct flash_info eon_parts[] = { + /* EON -- en25xxx */ + { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) }, + { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) }, + { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) }, + { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, + { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) }, + { "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16, + SECT_4K | SPI_NOR_DUAL_READ) }, + { "en25qh16", INFO(0x1c7015, 0, 64 * 1024, 32, + SECT_4K | SPI_NOR_DUAL_READ) }, + { "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64, 0) }, + { "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_DUAL_READ) }, + { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) }, + { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) }, + { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) }, +}; + +const struct spi_nor_manufacturer spi_nor_eon = { + .name = "eon", + .parts = eon_parts, + .nparts = ARRAY_SIZE(eon_parts), +}; diff --git a/drivers/mtd/spi-nor/esmt.c b/drivers/mtd/spi-nor/esmt.c new file mode 100644 index 000000000..c93170008 --- /dev/null +++ b/drivers/mtd/spi-nor/esmt.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +static const struct flash_info esmt_parts[] = { + /* ESMT */ + { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_HAS_LOCK) }, + { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_HAS_LOCK) }, + { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_HAS_LOCK) }, +}; + +const struct spi_nor_manufacturer spi_nor_esmt = { + .name = "esmt", + .parts = esmt_parts, + .nparts = ARRAY_SIZE(esmt_parts), +}; diff --git a/drivers/mtd/spi-nor/everspin.c b/drivers/mtd/spi-nor/everspin.c new file mode 100644 index 000000000..04a177a32 --- /dev/null +++ b/drivers/mtd/spi-nor/everspin.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +static const struct flash_info everspin_parts[] = { + /* Everspin */ + { "mr25h128", CAT25_INFO(16 * 1024, 1, 256, 2, + SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, + { "mr25h256", CAT25_INFO(32 * 1024, 1, 256, 2, + SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, + { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, + SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, + { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3, + SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, +}; + +const struct spi_nor_manufacturer spi_nor_everspin = { + .name = "everspin", + .parts = everspin_parts, + .nparts = ARRAY_SIZE(everspin_parts), +}; diff --git a/drivers/mtd/spi-nor/fujitsu.c b/drivers/mtd/spi-nor/fujitsu.c new file mode 100644 index 000000000..e385d93e7 --- /dev/null +++ b/drivers/mtd/spi-nor/fujitsu.c @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +static const struct flash_info fujitsu_parts[] = { + /* Fujitsu */ + { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) }, +}; + +const struct spi_nor_manufacturer spi_nor_fujitsu = { + .name = "fujitsu", + .parts = fujitsu_parts, + .nparts = ARRAY_SIZE(fujitsu_parts), +}; diff --git a/drivers/mtd/spi-nor/gigadevice.c b/drivers/mtd/spi-nor/gigadevice.c new file mode 100644 index 000000000..447d84bb2 --- /dev/null +++ b/drivers/mtd/spi-nor/gigadevice.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +static void gd25q256_default_init(struct spi_nor *nor) +{ + /* + * Some manufacturer like GigaDevice may use different + * bit to set QE on different memories, so the MFR can't + * indicate the quad_enable method for this case, we need + * to set it in the default_init fixup hook. + */ + nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable; +} + +static struct spi_nor_fixups gd25q256_fixups = { + .default_init = gd25q256_default_init, +}; + +static const struct flash_info gigadevice_parts[] = { + { "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, + { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, + { "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, + { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, + { "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, + { "gd25lq128d", INFO(0xc86018, 0, 64 * 1024, 256, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, + { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, + { "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | + SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6) + .fixups = &gd25q256_fixups }, +}; + +const struct spi_nor_manufacturer spi_nor_gigadevice = { + .name = "gigadevice", + .parts = gigadevice_parts, + .nparts = ARRAY_SIZE(gigadevice_parts), +}; diff --git a/drivers/mtd/spi-nor/intel.c b/drivers/mtd/spi-nor/intel.c new file mode 100644 index 000000000..d8196f101 --- /dev/null +++ b/drivers/mtd/spi-nor/intel.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +static const struct flash_info intel_parts[] = { + /* Intel/Numonyx -- xxxs33b */ + { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, + { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) }, + { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) }, +}; + +static void intel_default_init(struct spi_nor *nor) +{ + nor->flags |= SNOR_F_HAS_LOCK; +} + +static const struct spi_nor_fixups intel_fixups = { + .default_init = intel_default_init, +}; + +const struct spi_nor_manufacturer spi_nor_intel = { + .name = "intel", + .parts = intel_parts, + .nparts = ARRAY_SIZE(intel_parts), + .fixups = &intel_fixups, +}; diff --git a/drivers/mtd/spi-nor/issi.c b/drivers/mtd/spi-nor/issi.c new file mode 100644 index 000000000..ffcb60e54 --- /dev/null +++ b/drivers/mtd/spi-nor/issi.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +static int +is25lp256_post_bfpt_fixups(struct spi_nor *nor, + const struct sfdp_parameter_header *bfpt_header, + const struct sfdp_bfpt *bfpt, + struct spi_nor_flash_parameter *params) +{ + /* + * IS25LP256 supports 4B opcodes, but the BFPT advertises a + * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY address width. + * Overwrite the address width advertised by the BFPT. + */ + if ((bfpt->dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) == + BFPT_DWORD1_ADDRESS_BYTES_3_ONLY) + nor->addr_width = 4; + + return 0; +} + +static struct spi_nor_fixups is25lp256_fixups = { + .post_bfpt = is25lp256_post_bfpt_fixups, +}; + +static const struct flash_info issi_parts[] = { + /* ISSI */ + { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) }, + { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "is25lp016d", INFO(0x9d6015, 0, 64 * 1024, 32, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "is25lp032", INFO(0x9d6016, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_DUAL_READ) }, + { "is25lp064", INFO(0x9d6017, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_DUAL_READ) }, + { "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256, + SECT_4K | SPI_NOR_DUAL_READ) }, + { "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_4B_OPCODES) + .fixups = &is25lp256_fixups }, + { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "is25wp256", INFO(0x9d7019, 0, 64 * 1024, 512, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_4B_OPCODES) + .fixups = &is25lp256_fixups }, + + /* PMC */ + { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) }, + { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) }, + { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) }, +}; + +static void issi_default_init(struct spi_nor *nor) +{ + nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable; +} + +static const struct spi_nor_fixups issi_fixups = { + .default_init = issi_default_init, +}; + +const struct spi_nor_manufacturer spi_nor_issi = { + .name = "issi", + .parts = issi_parts, + .nparts = ARRAY_SIZE(issi_parts), + .fixups = &issi_fixups, +}; diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c new file mode 100644 index 000000000..662b21278 --- /dev/null +++ b/drivers/mtd/spi-nor/macronix.c @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +static int +mx25l25635_post_bfpt_fixups(struct spi_nor *nor, + const struct sfdp_parameter_header *bfpt_header, + const struct sfdp_bfpt *bfpt, + struct spi_nor_flash_parameter *params) +{ + /* + * MX25L25635F supports 4B opcodes but MX25L25635E does not. + * Unfortunately, Macronix has re-used the same JEDEC ID for both + * variants which prevents us from defining a new entry in the parts + * table. + * We need a way to differentiate MX25L25635E and MX25L25635F, and it + * seems that the F version advertises support for Fast Read 4-4-4 in + * its BFPT table. + */ + if (bfpt->dwords[BFPT_DWORD(5)] & BFPT_DWORD5_FAST_READ_4_4_4) + nor->flags |= SNOR_F_4B_OPCODES; + + return 0; +} + +static struct spi_nor_fixups mx25l25635_fixups = { + .post_bfpt = mx25l25635_post_bfpt_fixups, +}; + +static const struct flash_info macronix_parts[] = { + /* Macronix */ + { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) }, + { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) }, + { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) }, + { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) }, + { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) }, + { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) }, + { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) }, + { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) }, + { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) }, + { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) }, + { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) }, + { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) }, + { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, SECT_4K) }, + { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) }, + { "mx25r1635f", INFO(0xc22815, 0, 64 * 1024, 32, + SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "mx25r3235f", INFO(0xc22816, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256, + SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, + SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + .fixups = &mx25l25635_fixups }, + { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, + SECT_4K | SPI_NOR_4B_OPCODES) }, + { "mx25u51245g", INFO(0xc2253a, 0, 64 * 1024, 1024, + SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, + { "mx25v8035f", INFO(0xc22314, 0, 64 * 1024, 16, + SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, + { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, + SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_4B_OPCODES) }, + { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, + SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, + { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, + SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, + SPI_NOR_QUAD_READ) }, + { "mx66u2g45g", INFO(0xc2253c, 0, 64 * 1024, 4096, + SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, +}; + +static void macronix_default_init(struct spi_nor *nor) +{ + nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable; + nor->params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode; +} + +static const struct spi_nor_fixups macronix_fixups = { + .default_init = macronix_default_init, +}; + +const struct spi_nor_manufacturer spi_nor_macronix = { + .name = "macronix", + .parts = macronix_parts, + .nparts = ARRAY_SIZE(macronix_parts), + .fixups = ¯onix_fixups, +}; diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c new file mode 100644 index 000000000..ef3695080 --- /dev/null +++ b/drivers/mtd/spi-nor/micron-st.c @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +static const struct flash_info micron_parts[] = { + { "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512, + SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ | + SPI_NOR_4B_OPCODES) }, + { "mt35xu02g", INFO(0x2c5b1c, 0, 128 * 1024, 2048, + SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ | + SPI_NOR_4B_OPCODES) }, +}; + +static const struct flash_info st_parts[] = { + { "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32, + SECT_4K | SPI_NOR_QUAD_READ) }, + { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, + SPI_NOR_QUAD_READ) }, + { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, + SPI_NOR_QUAD_READ) }, + { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_QUAD_READ) }, + { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_QUAD_READ) }, + { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, + SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | + SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) }, + { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, + SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, + { "mt25ql256a", INFO6(0x20ba19, 0x104400, 64 * 1024, 512, + SECT_4K | USE_FSR | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, + { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | + USE_FSR | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "mt25qu256a", INFO6(0x20bb19, 0x104400, 64 * 1024, 512, + SECT_4K | USE_FSR | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, + { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512, + SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, + { "mt25ql512a", INFO6(0x20ba20, 0x104400, 64 * 1024, 1024, + SECT_4K | USE_FSR | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, + { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, + SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | + SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) }, + { "mt25qu512a", INFO6(0x20bb20, 0x104400, 64 * 1024, 1024, + SECT_4K | USE_FSR | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, + { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, + SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | + SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) }, + { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, + SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | + SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6 | + NO_CHIP_ERASE) }, + { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, + SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | + NO_CHIP_ERASE) }, + { "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096, + SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | + NO_CHIP_ERASE) }, + { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096, + SECT_4K | USE_FSR | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ | NO_CHIP_ERASE) }, + + { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) }, + { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) }, + { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) }, + { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) }, + { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) }, + { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) }, + { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) }, + { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) }, + { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) }, + + { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) }, + { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) }, + { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) }, + { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) }, + { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) }, + { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) }, + { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) }, + { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) }, + { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) }, + + { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) }, + { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) }, + { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) }, + + { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) }, + { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) }, + { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) }, + + { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) }, + { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) }, + { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) }, + { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) }, + { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) }, + { "m25px80", INFO(0x207114, 0, 64 * 1024, 16, 0) }, +}; + +/** + * st_micron_set_4byte_addr_mode() - Set 4-byte address mode for ST and Micron + * flashes. + * @nor: pointer to 'struct spi_nor'. + * @enable: true to enter the 4-byte address mode, false to exit the 4-byte + * address mode. + * + * Return: 0 on success, -errno otherwise. + */ +static int st_micron_set_4byte_addr_mode(struct spi_nor *nor, bool enable) +{ + int ret; + + ret = spi_nor_write_enable(nor); + if (ret) + return ret; + + ret = spi_nor_set_4byte_addr_mode(nor, enable); + if (ret) + return ret; + + return spi_nor_write_disable(nor); +} + +static void micron_st_default_init(struct spi_nor *nor) +{ + nor->flags |= SNOR_F_HAS_LOCK; + nor->flags &= ~SNOR_F_HAS_16BIT_SR; + nor->params->quad_enable = NULL; + nor->params->set_4byte_addr_mode = st_micron_set_4byte_addr_mode; +} + +static const struct spi_nor_fixups micron_st_fixups = { + .default_init = micron_st_default_init, +}; + +const struct spi_nor_manufacturer spi_nor_micron = { + .name = "micron", + .parts = micron_parts, + .nparts = ARRAY_SIZE(micron_parts), + .fixups = µn_st_fixups, +}; + +const struct spi_nor_manufacturer spi_nor_st = { + .name = "st", + .parts = st_parts, + .nparts = ARRAY_SIZE(st_parts), + .fixups = µn_st_fixups, +}; diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c new file mode 100644 index 000000000..9dc0528ea --- /dev/null +++ b/drivers/mtd/spi-nor/sfdp.c @@ -0,0 +1,1206 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/slab.h> +#include <linux/sort.h> +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +#define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb) +#define SFDP_PARAM_HEADER_PTP(p) \ + (((p)->parameter_table_pointer[2] << 16) | \ + ((p)->parameter_table_pointer[1] << 8) | \ + ((p)->parameter_table_pointer[0] << 0)) + +#define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */ +#define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */ +#define SFDP_4BAIT_ID 0xff84 /* 4-byte Address Instruction Table */ + +#define SFDP_SIGNATURE 0x50444653U + +struct sfdp_header { + u32 signature; /* Ox50444653U <=> "SFDP" */ + u8 minor; + u8 major; + u8 nph; /* 0-base number of parameter headers */ + u8 unused; + + /* Basic Flash Parameter Table. */ + struct sfdp_parameter_header bfpt_header; +}; + +/* Fast Read settings. */ +struct sfdp_bfpt_read { + /* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */ + u32 hwcaps; + + /* + * The <supported_bit> bit in <supported_dword> BFPT DWORD tells us + * whether the Fast Read x-y-z command is supported. + */ + u32 supported_dword; + u32 supported_bit; + + /* + * The half-word at offset <setting_shift> in <setting_dword> BFPT DWORD + * encodes the op code, the number of mode clocks and the number of wait + * states to be used by Fast Read x-y-z command. + */ + u32 settings_dword; + u32 settings_shift; + + /* The SPI protocol for this Fast Read x-y-z command. */ + enum spi_nor_protocol proto; +}; + +struct sfdp_bfpt_erase { + /* + * The half-word at offset <shift> in DWORD <dwoard> encodes the + * op code and erase sector size to be used by Sector Erase commands. + */ + u32 dword; + u32 shift; +}; + +#define SMPT_CMD_ADDRESS_LEN_MASK GENMASK(23, 22) +#define SMPT_CMD_ADDRESS_LEN_0 (0x0UL << 22) +#define SMPT_CMD_ADDRESS_LEN_3 (0x1UL << 22) +#define SMPT_CMD_ADDRESS_LEN_4 (0x2UL << 22) +#define SMPT_CMD_ADDRESS_LEN_USE_CURRENT (0x3UL << 22) + +#define SMPT_CMD_READ_DUMMY_MASK GENMASK(19, 16) +#define SMPT_CMD_READ_DUMMY_SHIFT 16 +#define SMPT_CMD_READ_DUMMY(_cmd) \ + (((_cmd) & SMPT_CMD_READ_DUMMY_MASK) >> SMPT_CMD_READ_DUMMY_SHIFT) +#define SMPT_CMD_READ_DUMMY_IS_VARIABLE 0xfUL + +#define SMPT_CMD_READ_DATA_MASK GENMASK(31, 24) +#define SMPT_CMD_READ_DATA_SHIFT 24 +#define SMPT_CMD_READ_DATA(_cmd) \ + (((_cmd) & SMPT_CMD_READ_DATA_MASK) >> SMPT_CMD_READ_DATA_SHIFT) + +#define SMPT_CMD_OPCODE_MASK GENMASK(15, 8) +#define SMPT_CMD_OPCODE_SHIFT 8 +#define SMPT_CMD_OPCODE(_cmd) \ + (((_cmd) & SMPT_CMD_OPCODE_MASK) >> SMPT_CMD_OPCODE_SHIFT) + +#define SMPT_MAP_REGION_COUNT_MASK GENMASK(23, 16) +#define SMPT_MAP_REGION_COUNT_SHIFT 16 +#define SMPT_MAP_REGION_COUNT(_header) \ + ((((_header) & SMPT_MAP_REGION_COUNT_MASK) >> \ + SMPT_MAP_REGION_COUNT_SHIFT) + 1) + +#define SMPT_MAP_ID_MASK GENMASK(15, 8) +#define SMPT_MAP_ID_SHIFT 8 +#define SMPT_MAP_ID(_header) \ + (((_header) & SMPT_MAP_ID_MASK) >> SMPT_MAP_ID_SHIFT) + +#define SMPT_MAP_REGION_SIZE_MASK GENMASK(31, 8) +#define SMPT_MAP_REGION_SIZE_SHIFT 8 +#define SMPT_MAP_REGION_SIZE(_region) \ + (((((_region) & SMPT_MAP_REGION_SIZE_MASK) >> \ + SMPT_MAP_REGION_SIZE_SHIFT) + 1) * 256) + +#define SMPT_MAP_REGION_ERASE_TYPE_MASK GENMASK(3, 0) +#define SMPT_MAP_REGION_ERASE_TYPE(_region) \ + ((_region) & SMPT_MAP_REGION_ERASE_TYPE_MASK) + +#define SMPT_DESC_TYPE_MAP BIT(1) +#define SMPT_DESC_END BIT(0) + +#define SFDP_4BAIT_DWORD_MAX 2 + +struct sfdp_4bait { + /* The hardware capability. */ + u32 hwcaps; + + /* + * The <supported_bit> bit in DWORD1 of the 4BAIT tells us whether + * the associated 4-byte address op code is supported. + */ + u32 supported_bit; +}; + +/** + * spi_nor_read_raw() - raw read of serial flash memory. read_opcode, + * addr_width and read_dummy members of the struct spi_nor + * should be previously + * set. + * @nor: pointer to a 'struct spi_nor' + * @addr: offset in the serial flash memory + * @len: number of bytes to read + * @buf: buffer where the data is copied into (dma-safe memory) + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf) +{ + ssize_t ret; + + while (len) { + ret = spi_nor_read_data(nor, addr, len, buf); + if (ret < 0) + return ret; + if (!ret || ret > len) + return -EIO; + + buf += ret; + addr += ret; + len -= ret; + } + return 0; +} + +/** + * spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters. + * @nor: pointer to a 'struct spi_nor' + * @addr: offset in the SFDP area to start reading data from + * @len: number of bytes to read + * @buf: buffer where the SFDP data are copied into (dma-safe memory) + * + * Whatever the actual numbers of bytes for address and dummy cycles are + * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always + * followed by a 3-byte address and 8 dummy clock cycles. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr, + size_t len, void *buf) +{ + u8 addr_width, read_opcode, read_dummy; + int ret; + + read_opcode = nor->read_opcode; + addr_width = nor->addr_width; + read_dummy = nor->read_dummy; + + nor->read_opcode = SPINOR_OP_RDSFDP; + nor->addr_width = 3; + nor->read_dummy = 8; + + ret = spi_nor_read_raw(nor, addr, len, buf); + + nor->read_opcode = read_opcode; + nor->addr_width = addr_width; + nor->read_dummy = read_dummy; + + return ret; +} + +/** + * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters. + * @nor: pointer to a 'struct spi_nor' + * @addr: offset in the SFDP area to start reading data from + * @len: number of bytes to read + * @buf: buffer where the SFDP data are copied into + * + * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not + * guaranteed to be dma-safe. + * + * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp() + * otherwise. + */ +static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr, + size_t len, void *buf) +{ + void *dma_safe_buf; + int ret; + + dma_safe_buf = kmalloc(len, GFP_KERNEL); + if (!dma_safe_buf) + return -ENOMEM; + + ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf); + memcpy(buf, dma_safe_buf, len); + kfree(dma_safe_buf); + + return ret; +} + +static void +spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read, + u16 half, + enum spi_nor_protocol proto) +{ + read->num_mode_clocks = (half >> 5) & 0x07; + read->num_wait_states = (half >> 0) & 0x1f; + read->opcode = (half >> 8) & 0xff; + read->proto = proto; +} + +static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = { + /* Fast Read 1-1-2 */ + { + SNOR_HWCAPS_READ_1_1_2, + BFPT_DWORD(1), BIT(16), /* Supported bit */ + BFPT_DWORD(4), 0, /* Settings */ + SNOR_PROTO_1_1_2, + }, + + /* Fast Read 1-2-2 */ + { + SNOR_HWCAPS_READ_1_2_2, + BFPT_DWORD(1), BIT(20), /* Supported bit */ + BFPT_DWORD(4), 16, /* Settings */ + SNOR_PROTO_1_2_2, + }, + + /* Fast Read 2-2-2 */ + { + SNOR_HWCAPS_READ_2_2_2, + BFPT_DWORD(5), BIT(0), /* Supported bit */ + BFPT_DWORD(6), 16, /* Settings */ + SNOR_PROTO_2_2_2, + }, + + /* Fast Read 1-1-4 */ + { + SNOR_HWCAPS_READ_1_1_4, + BFPT_DWORD(1), BIT(22), /* Supported bit */ + BFPT_DWORD(3), 16, /* Settings */ + SNOR_PROTO_1_1_4, + }, + + /* Fast Read 1-4-4 */ + { + SNOR_HWCAPS_READ_1_4_4, + BFPT_DWORD(1), BIT(21), /* Supported bit */ + BFPT_DWORD(3), 0, /* Settings */ + SNOR_PROTO_1_4_4, + }, + + /* Fast Read 4-4-4 */ + { + SNOR_HWCAPS_READ_4_4_4, + BFPT_DWORD(5), BIT(4), /* Supported bit */ + BFPT_DWORD(7), 16, /* Settings */ + SNOR_PROTO_4_4_4, + }, +}; + +static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = { + /* Erase Type 1 in DWORD8 bits[15:0] */ + {BFPT_DWORD(8), 0}, + + /* Erase Type 2 in DWORD8 bits[31:16] */ + {BFPT_DWORD(8), 16}, + + /* Erase Type 3 in DWORD9 bits[15:0] */ + {BFPT_DWORD(9), 0}, + + /* Erase Type 4 in DWORD9 bits[31:16] */ + {BFPT_DWORD(9), 16}, +}; + +/** + * spi_nor_set_erase_settings_from_bfpt() - set erase type settings from BFPT + * @erase: pointer to a structure that describes a SPI NOR erase type + * @size: the size of the sector/block erased by the erase type + * @opcode: the SPI command op code to erase the sector/block + * @i: erase type index as sorted in the Basic Flash Parameter Table + * + * The supported Erase Types will be sorted at init in ascending order, with + * the smallest Erase Type size being the first member in the erase_type array + * of the spi_nor_erase_map structure. Save the Erase Type index as sorted in + * the Basic Flash Parameter Table since it will be used later on to + * synchronize with the supported Erase Types defined in SFDP optional tables. + */ +static void +spi_nor_set_erase_settings_from_bfpt(struct spi_nor_erase_type *erase, + u32 size, u8 opcode, u8 i) +{ + erase->idx = i; + spi_nor_set_erase_type(erase, size, opcode); +} + +/** + * spi_nor_map_cmp_erase_type() - compare the map's erase types by size + * @l: member in the left half of the map's erase_type array + * @r: member in the right half of the map's erase_type array + * + * Comparison function used in the sort() call to sort in ascending order the + * map's erase types, the smallest erase type size being the first member in the + * sorted erase_type array. + * + * Return: the result of @l->size - @r->size + */ +static int spi_nor_map_cmp_erase_type(const void *l, const void *r) +{ + const struct spi_nor_erase_type *left = l, *right = r; + + return left->size - right->size; +} + +/** + * spi_nor_sort_erase_mask() - sort erase mask + * @map: the erase map of the SPI NOR + * @erase_mask: the erase type mask to be sorted + * + * Replicate the sort done for the map's erase types in BFPT: sort the erase + * mask in ascending order with the smallest erase type size starting from + * BIT(0) in the sorted erase mask. + * + * Return: sorted erase mask. + */ +static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask) +{ + struct spi_nor_erase_type *erase_type = map->erase_type; + int i; + u8 sorted_erase_mask = 0; + + if (!erase_mask) + return 0; + + /* Replicate the sort done for the map's erase types. */ + for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) + if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx)) + sorted_erase_mask |= BIT(i); + + return sorted_erase_mask; +} + +/** + * spi_nor_regions_sort_erase_types() - sort erase types in each region + * @map: the erase map of the SPI NOR + * + * Function assumes that the erase types defined in the erase map are already + * sorted in ascending order, with the smallest erase type size being the first + * member in the erase_type array. It replicates the sort done for the map's + * erase types. Each region's erase bitmask will indicate which erase types are + * supported from the sorted erase types defined in the erase map. + * Sort the all region's erase type at init in order to speed up the process of + * finding the best erase command at runtime. + */ +static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map) +{ + struct spi_nor_erase_region *region = map->regions; + u8 region_erase_mask, sorted_erase_mask; + + while (region) { + region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK; + + sorted_erase_mask = spi_nor_sort_erase_mask(map, + region_erase_mask); + + /* Overwrite erase mask. */ + region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) | + sorted_erase_mask; + + region = spi_nor_region_next(region); + } +} + +/** + * spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table. + * @nor: pointer to a 'struct spi_nor' + * @bfpt_header: pointer to the 'struct sfdp_parameter_header' describing + * the Basic Flash Parameter Table length and version + * @params: pointer to the 'struct spi_nor_flash_parameter' to be + * filled + * + * The Basic Flash Parameter Table is the main and only mandatory table as + * defined by the SFDP (JESD216) specification. + * It provides us with the total size (memory density) of the data array and + * the number of address bytes for Fast Read, Page Program and Sector Erase + * commands. + * For Fast READ commands, it also gives the number of mode clock cycles and + * wait states (regrouped in the number of dummy clock cycles) for each + * supported instruction op code. + * For Page Program, the page size is now available since JESD216 rev A, however + * the supported instruction op codes are still not provided. + * For Sector Erase commands, this table stores the supported instruction op + * codes and the associated sector sizes. + * Finally, the Quad Enable Requirements (QER) are also available since JESD216 + * rev A. The QER bits encode the manufacturer dependent procedure to be + * executed to set the Quad Enable (QE) bit in some internal register of the + * Quad SPI memory. Indeed the QE bit, when it exists, must be set before + * sending any Quad SPI command to the memory. Actually, setting the QE bit + * tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2 + * and IO3 hence enabling 4 (Quad) I/O lines. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_parse_bfpt(struct spi_nor *nor, + const struct sfdp_parameter_header *bfpt_header, + struct spi_nor_flash_parameter *params) +{ + struct spi_nor_erase_map *map = ¶ms->erase_map; + struct spi_nor_erase_type *erase_type = map->erase_type; + struct sfdp_bfpt bfpt; + size_t len; + int i, cmd, err; + u32 addr, val; + u16 half; + u8 erase_mask; + + /* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */ + if (bfpt_header->length < BFPT_DWORD_MAX_JESD216) + return -EINVAL; + + /* Read the Basic Flash Parameter Table. */ + len = min_t(size_t, sizeof(bfpt), + bfpt_header->length * sizeof(u32)); + addr = SFDP_PARAM_HEADER_PTP(bfpt_header); + memset(&bfpt, 0, sizeof(bfpt)); + err = spi_nor_read_sfdp_dma_unsafe(nor, addr, len, &bfpt); + if (err < 0) + return err; + + /* Fix endianness of the BFPT DWORDs. */ + le32_to_cpu_array(bfpt.dwords, BFPT_DWORD_MAX); + + /* Number of address bytes. */ + switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) { + case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY: + case BFPT_DWORD1_ADDRESS_BYTES_3_OR_4: + nor->addr_width = 3; + break; + + case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY: + nor->addr_width = 4; + break; + + default: + break; + } + + /* Flash Memory Density (in bits). */ + val = bfpt.dwords[BFPT_DWORD(2)]; + if (val & BIT(31)) { + val &= ~BIT(31); + + /* + * Prevent overflows on params->size. Anyway, a NOR of 2^64 + * bits is unlikely to exist so this error probably means + * the BFPT we are reading is corrupted/wrong. + */ + if (val > 63) + return -EINVAL; + + params->size = 1ULL << val; + } else { + params->size = val + 1; + } + params->size >>= 3; /* Convert to bytes. */ + + /* Fast Read settings. */ + for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) { + const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i]; + struct spi_nor_read_command *read; + + if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) { + params->hwcaps.mask &= ~rd->hwcaps; + continue; + } + + params->hwcaps.mask |= rd->hwcaps; + cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps); + read = ¶ms->reads[cmd]; + half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift; + spi_nor_set_read_settings_from_bfpt(read, half, rd->proto); + } + + /* + * Sector Erase settings. Reinitialize the uniform erase map using the + * Erase Types defined in the bfpt table. + */ + erase_mask = 0; + memset(¶ms->erase_map, 0, sizeof(params->erase_map)); + for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) { + const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i]; + u32 erasesize; + u8 opcode; + + half = bfpt.dwords[er->dword] >> er->shift; + erasesize = half & 0xff; + + /* erasesize == 0 means this Erase Type is not supported. */ + if (!erasesize) + continue; + + erasesize = 1U << erasesize; + opcode = (half >> 8) & 0xff; + erase_mask |= BIT(i); + spi_nor_set_erase_settings_from_bfpt(&erase_type[i], erasesize, + opcode, i); + } + spi_nor_init_uniform_erase_map(map, erase_mask, params->size); + /* + * Sort all the map's Erase Types in ascending order with the smallest + * erase size being the first member in the erase_type array. + */ + sort(erase_type, SNOR_ERASE_TYPE_MAX, sizeof(erase_type[0]), + spi_nor_map_cmp_erase_type, NULL); + /* + * Sort the erase types in the uniform region in order to update the + * uniform_erase_type bitmask. The bitmask will be used later on when + * selecting the uniform erase. + */ + spi_nor_regions_sort_erase_types(map); + map->uniform_erase_type = map->uniform_region.offset & + SNOR_ERASE_TYPE_MASK; + + /* Stop here if not JESD216 rev A or later. */ + if (bfpt_header->length == BFPT_DWORD_MAX_JESD216) + return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, + params); + + /* Page size: this field specifies 'N' so the page size = 2^N bytes. */ + val = bfpt.dwords[BFPT_DWORD(11)]; + val &= BFPT_DWORD11_PAGE_SIZE_MASK; + val >>= BFPT_DWORD11_PAGE_SIZE_SHIFT; + params->page_size = 1U << val; + + /* Quad Enable Requirements. */ + switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) { + case BFPT_DWORD15_QER_NONE: + params->quad_enable = NULL; + break; + + case BFPT_DWORD15_QER_SR2_BIT1_BUGGY: + /* + * Writing only one byte to the Status Register has the + * side-effect of clearing Status Register 2. + */ + case BFPT_DWORD15_QER_SR2_BIT1_NO_RD: + /* + * Read Configuration Register (35h) instruction is not + * supported. + */ + nor->flags |= SNOR_F_HAS_16BIT_SR | SNOR_F_NO_READ_CR; + params->quad_enable = spi_nor_sr2_bit1_quad_enable; + break; + + case BFPT_DWORD15_QER_SR1_BIT6: + nor->flags &= ~SNOR_F_HAS_16BIT_SR; + params->quad_enable = spi_nor_sr1_bit6_quad_enable; + break; + + case BFPT_DWORD15_QER_SR2_BIT7: + nor->flags &= ~SNOR_F_HAS_16BIT_SR; + params->quad_enable = spi_nor_sr2_bit7_quad_enable; + break; + + case BFPT_DWORD15_QER_SR2_BIT1: + /* + * JESD216 rev B or later does not specify if writing only one + * byte to the Status Register clears or not the Status + * Register 2, so let's be cautious and keep the default + * assumption of a 16-bit Write Status (01h) command. + */ + nor->flags |= SNOR_F_HAS_16BIT_SR; + + params->quad_enable = spi_nor_sr2_bit1_quad_enable; + break; + + default: + dev_dbg(nor->dev, "BFPT QER reserved value used\n"); + break; + } + + /* Stop here if not JESD216 rev C or later. */ + if (bfpt_header->length == BFPT_DWORD_MAX_JESD216B) + return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, + params); + + return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params); +} + +/** + * spi_nor_smpt_addr_width() - return the address width used in the + * configuration detection command. + * @nor: pointer to a 'struct spi_nor' + * @settings: configuration detection command descriptor, dword1 + */ +static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings) +{ + switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) { + case SMPT_CMD_ADDRESS_LEN_0: + return 0; + case SMPT_CMD_ADDRESS_LEN_3: + return 3; + case SMPT_CMD_ADDRESS_LEN_4: + return 4; + case SMPT_CMD_ADDRESS_LEN_USE_CURRENT: + default: + return nor->addr_width; + } +} + +/** + * spi_nor_smpt_read_dummy() - return the configuration detection command read + * latency, in clock cycles. + * @nor: pointer to a 'struct spi_nor' + * @settings: configuration detection command descriptor, dword1 + * + * Return: the number of dummy cycles for an SMPT read + */ +static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings) +{ + u8 read_dummy = SMPT_CMD_READ_DUMMY(settings); + + if (read_dummy == SMPT_CMD_READ_DUMMY_IS_VARIABLE) + return nor->read_dummy; + return read_dummy; +} + +/** + * spi_nor_get_map_in_use() - get the configuration map in use + * @nor: pointer to a 'struct spi_nor' + * @smpt: pointer to the sector map parameter table + * @smpt_len: sector map parameter table length + * + * Return: pointer to the map in use, ERR_PTR(-errno) otherwise. + */ +static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt, + u8 smpt_len) +{ + const u32 *ret; + u8 *buf; + u32 addr; + int err; + u8 i; + u8 addr_width, read_opcode, read_dummy; + u8 read_data_mask, map_id; + + /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */ + buf = kmalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + addr_width = nor->addr_width; + read_dummy = nor->read_dummy; + read_opcode = nor->read_opcode; + + map_id = 0; + /* Determine if there are any optional Detection Command Descriptors */ + for (i = 0; i < smpt_len; i += 2) { + if (smpt[i] & SMPT_DESC_TYPE_MAP) + break; + + read_data_mask = SMPT_CMD_READ_DATA(smpt[i]); + nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]); + nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]); + nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]); + addr = smpt[i + 1]; + + err = spi_nor_read_raw(nor, addr, 1, buf); + if (err) { + ret = ERR_PTR(err); + goto out; + } + + /* + * Build an index value that is used to select the Sector Map + * Configuration that is currently in use. + */ + map_id = map_id << 1 | !!(*buf & read_data_mask); + } + + /* + * If command descriptors are provided, they always precede map + * descriptors in the table. There is no need to start the iteration + * over smpt array all over again. + * + * Find the matching configuration map. + */ + ret = ERR_PTR(-EINVAL); + while (i < smpt_len) { + if (SMPT_MAP_ID(smpt[i]) == map_id) { + ret = smpt + i; + break; + } + + /* + * If there are no more configuration map descriptors and no + * configuration ID matched the configuration identifier, the + * sector address map is unknown. + */ + if (smpt[i] & SMPT_DESC_END) + break; + + /* increment the table index to the next map */ + i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1; + } + + /* fall through */ +out: + kfree(buf); + nor->addr_width = addr_width; + nor->read_dummy = read_dummy; + nor->read_opcode = read_opcode; + return ret; +} + +static void spi_nor_region_mark_end(struct spi_nor_erase_region *region) +{ + region->offset |= SNOR_LAST_REGION; +} + +static void spi_nor_region_mark_overlay(struct spi_nor_erase_region *region) +{ + region->offset |= SNOR_OVERLAID_REGION; +} + +/** + * spi_nor_region_check_overlay() - set overlay bit when the region is overlaid + * @region: pointer to a structure that describes a SPI NOR erase region + * @erase: pointer to a structure that describes a SPI NOR erase type + * @erase_type: erase type bitmask + */ +static void +spi_nor_region_check_overlay(struct spi_nor_erase_region *region, + const struct spi_nor_erase_type *erase, + const u8 erase_type) +{ + int i; + + for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { + if (!(erase[i].size && erase_type & BIT(erase[i].idx))) + continue; + if (region->size & erase[i].size_mask) { + spi_nor_region_mark_overlay(region); + return; + } + } +} + +/** + * spi_nor_init_non_uniform_erase_map() - initialize the non-uniform erase map + * @nor: pointer to a 'struct spi_nor' + * @params: pointer to a duplicate 'struct spi_nor_flash_parameter' that is + * used for storing SFDP parsed data + * @smpt: pointer to the sector map parameter table + * + * Return: 0 on success, -errno otherwise. + */ +static int +spi_nor_init_non_uniform_erase_map(struct spi_nor *nor, + struct spi_nor_flash_parameter *params, + const u32 *smpt) +{ + struct spi_nor_erase_map *map = ¶ms->erase_map; + struct spi_nor_erase_type *erase = map->erase_type; + struct spi_nor_erase_region *region; + u64 offset; + u32 region_count; + int i, j; + u8 uniform_erase_type, save_uniform_erase_type; + u8 erase_type, regions_erase_type; + + region_count = SMPT_MAP_REGION_COUNT(*smpt); + /* + * The regions will be freed when the driver detaches from the + * device. + */ + region = devm_kcalloc(nor->dev, region_count, sizeof(*region), + GFP_KERNEL); + if (!region) + return -ENOMEM; + map->regions = region; + + uniform_erase_type = 0xff; + regions_erase_type = 0; + offset = 0; + /* Populate regions. */ + for (i = 0; i < region_count; i++) { + j = i + 1; /* index for the region dword */ + region[i].size = SMPT_MAP_REGION_SIZE(smpt[j]); + erase_type = SMPT_MAP_REGION_ERASE_TYPE(smpt[j]); + region[i].offset = offset | erase_type; + + spi_nor_region_check_overlay(®ion[i], erase, erase_type); + + /* + * Save the erase types that are supported in all regions and + * can erase the entire flash memory. + */ + uniform_erase_type &= erase_type; + + /* + * regions_erase_type mask will indicate all the erase types + * supported in this configuration map. + */ + regions_erase_type |= erase_type; + + offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) + + region[i].size; + } + spi_nor_region_mark_end(®ion[i - 1]); + + save_uniform_erase_type = map->uniform_erase_type; + map->uniform_erase_type = spi_nor_sort_erase_mask(map, + uniform_erase_type); + + if (!regions_erase_type) { + /* + * Roll back to the previous uniform_erase_type mask, SMPT is + * broken. + */ + map->uniform_erase_type = save_uniform_erase_type; + return -EINVAL; + } + + /* + * BFPT advertises all the erase types supported by all the possible + * map configurations. Mask out the erase types that are not supported + * by the current map configuration. + */ + for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) + if (!(regions_erase_type & BIT(erase[i].idx))) + spi_nor_mask_erase_type(&erase[i]); + + return 0; +} + +/** + * spi_nor_parse_smpt() - parse Sector Map Parameter Table + * @nor: pointer to a 'struct spi_nor' + * @smpt_header: sector map parameter table header + * @params: pointer to a duplicate 'struct spi_nor_flash_parameter' + * that is used for storing SFDP parsed data + * + * This table is optional, but when available, we parse it to identify the + * location and size of sectors within the main data array of the flash memory + * device and to identify which Erase Types are supported by each sector. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_parse_smpt(struct spi_nor *nor, + const struct sfdp_parameter_header *smpt_header, + struct spi_nor_flash_parameter *params) +{ + const u32 *sector_map; + u32 *smpt; + size_t len; + u32 addr; + int ret; + + /* Read the Sector Map Parameter Table. */ + len = smpt_header->length * sizeof(*smpt); + smpt = kmalloc(len, GFP_KERNEL); + if (!smpt) + return -ENOMEM; + + addr = SFDP_PARAM_HEADER_PTP(smpt_header); + ret = spi_nor_read_sfdp(nor, addr, len, smpt); + if (ret) + goto out; + + /* Fix endianness of the SMPT DWORDs. */ + le32_to_cpu_array(smpt, smpt_header->length); + + sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length); + if (IS_ERR(sector_map)) { + ret = PTR_ERR(sector_map); + goto out; + } + + ret = spi_nor_init_non_uniform_erase_map(nor, params, sector_map); + if (ret) + goto out; + + spi_nor_regions_sort_erase_types(¶ms->erase_map); + /* fall through */ +out: + kfree(smpt); + return ret; +} + +/** + * spi_nor_parse_4bait() - parse the 4-Byte Address Instruction Table + * @nor: pointer to a 'struct spi_nor'. + * @param_header: pointer to the 'struct sfdp_parameter_header' describing + * the 4-Byte Address Instruction Table length and version. + * @params: pointer to the 'struct spi_nor_flash_parameter' to be. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_parse_4bait(struct spi_nor *nor, + const struct sfdp_parameter_header *param_header, + struct spi_nor_flash_parameter *params) +{ + static const struct sfdp_4bait reads[] = { + { SNOR_HWCAPS_READ, BIT(0) }, + { SNOR_HWCAPS_READ_FAST, BIT(1) }, + { SNOR_HWCAPS_READ_1_1_2, BIT(2) }, + { SNOR_HWCAPS_READ_1_2_2, BIT(3) }, + { SNOR_HWCAPS_READ_1_1_4, BIT(4) }, + { SNOR_HWCAPS_READ_1_4_4, BIT(5) }, + { SNOR_HWCAPS_READ_1_1_1_DTR, BIT(13) }, + { SNOR_HWCAPS_READ_1_2_2_DTR, BIT(14) }, + { SNOR_HWCAPS_READ_1_4_4_DTR, BIT(15) }, + }; + static const struct sfdp_4bait programs[] = { + { SNOR_HWCAPS_PP, BIT(6) }, + { SNOR_HWCAPS_PP_1_1_4, BIT(7) }, + { SNOR_HWCAPS_PP_1_4_4, BIT(8) }, + }; + static const struct sfdp_4bait erases[SNOR_ERASE_TYPE_MAX] = { + { 0u /* not used */, BIT(9) }, + { 0u /* not used */, BIT(10) }, + { 0u /* not used */, BIT(11) }, + { 0u /* not used */, BIT(12) }, + }; + struct spi_nor_pp_command *params_pp = params->page_programs; + struct spi_nor_erase_map *map = ¶ms->erase_map; + struct spi_nor_erase_type *erase_type = map->erase_type; + u32 *dwords; + size_t len; + u32 addr, discard_hwcaps, read_hwcaps, pp_hwcaps, erase_mask; + int i, ret; + + if (param_header->major != SFDP_JESD216_MAJOR || + param_header->length < SFDP_4BAIT_DWORD_MAX) + return -EINVAL; + + /* Read the 4-byte Address Instruction Table. */ + len = sizeof(*dwords) * SFDP_4BAIT_DWORD_MAX; + + /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */ + dwords = kmalloc(len, GFP_KERNEL); + if (!dwords) + return -ENOMEM; + + addr = SFDP_PARAM_HEADER_PTP(param_header); + ret = spi_nor_read_sfdp(nor, addr, len, dwords); + if (ret) + goto out; + + /* Fix endianness of the 4BAIT DWORDs. */ + le32_to_cpu_array(dwords, SFDP_4BAIT_DWORD_MAX); + + /* + * Compute the subset of (Fast) Read commands for which the 4-byte + * version is supported. + */ + discard_hwcaps = 0; + read_hwcaps = 0; + for (i = 0; i < ARRAY_SIZE(reads); i++) { + const struct sfdp_4bait *read = &reads[i]; + + discard_hwcaps |= read->hwcaps; + if ((params->hwcaps.mask & read->hwcaps) && + (dwords[0] & read->supported_bit)) + read_hwcaps |= read->hwcaps; + } + + /* + * Compute the subset of Page Program commands for which the 4-byte + * version is supported. + */ + pp_hwcaps = 0; + for (i = 0; i < ARRAY_SIZE(programs); i++) { + const struct sfdp_4bait *program = &programs[i]; + + /* + * The 4 Byte Address Instruction (Optional) Table is the only + * SFDP table that indicates support for Page Program Commands. + * Bypass the params->hwcaps.mask and consider 4BAIT the biggest + * authority for specifying Page Program support. + */ + discard_hwcaps |= program->hwcaps; + if (dwords[0] & program->supported_bit) + pp_hwcaps |= program->hwcaps; + } + + /* + * Compute the subset of Sector Erase commands for which the 4-byte + * version is supported. + */ + erase_mask = 0; + for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { + const struct sfdp_4bait *erase = &erases[i]; + + if (dwords[0] & erase->supported_bit) + erase_mask |= BIT(i); + } + + /* Replicate the sort done for the map's erase types in BFPT. */ + erase_mask = spi_nor_sort_erase_mask(map, erase_mask); + + /* + * We need at least one 4-byte op code per read, program and erase + * operation; the .read(), .write() and .erase() hooks share the + * nor->addr_width value. + */ + if (!read_hwcaps || !pp_hwcaps || !erase_mask) + goto out; + + /* + * Discard all operations from the 4-byte instruction set which are + * not supported by this memory. + */ + params->hwcaps.mask &= ~discard_hwcaps; + params->hwcaps.mask |= (read_hwcaps | pp_hwcaps); + + /* Use the 4-byte address instruction set. */ + for (i = 0; i < SNOR_CMD_READ_MAX; i++) { + struct spi_nor_read_command *read_cmd = ¶ms->reads[i]; + + read_cmd->opcode = spi_nor_convert_3to4_read(read_cmd->opcode); + } + + /* 4BAIT is the only SFDP table that indicates page program support. */ + if (pp_hwcaps & SNOR_HWCAPS_PP) + spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP], + SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1); + if (pp_hwcaps & SNOR_HWCAPS_PP_1_1_4) + spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_1_1_4], + SPINOR_OP_PP_1_1_4_4B, + SNOR_PROTO_1_1_4); + if (pp_hwcaps & SNOR_HWCAPS_PP_1_4_4) + spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_1_4_4], + SPINOR_OP_PP_1_4_4_4B, + SNOR_PROTO_1_4_4); + + for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { + if (erase_mask & BIT(i)) + erase_type[i].opcode = (dwords[1] >> + erase_type[i].idx * 8) & 0xFF; + else + spi_nor_mask_erase_type(&erase_type[i]); + } + + /* + * We set SNOR_F_HAS_4BAIT in order to skip spi_nor_set_4byte_opcodes() + * later because we already did the conversion to 4byte opcodes. Also, + * this latest function implements a legacy quirk for the erase size of + * Spansion memory. However this quirk is no longer needed with new + * SFDP compliant memories. + */ + nor->addr_width = 4; + nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT; + + /* fall through */ +out: + kfree(dwords); + return ret; +} + +/** + * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters. + * @nor: pointer to a 'struct spi_nor' + * @params: pointer to the 'struct spi_nor_flash_parameter' to be + * filled + * + * The Serial Flash Discoverable Parameters are described by the JEDEC JESD216 + * specification. This is a standard which tends to supported by almost all + * (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at + * runtime the main parameters needed to perform basic SPI flash operations such + * as Fast Read, Page Program or Sector Erase commands. + * + * Return: 0 on success, -errno otherwise. + */ +int spi_nor_parse_sfdp(struct spi_nor *nor, + struct spi_nor_flash_parameter *params) +{ + const struct sfdp_parameter_header *param_header, *bfpt_header; + struct sfdp_parameter_header *param_headers = NULL; + struct sfdp_header header; + struct device *dev = nor->dev; + size_t psize; + int i, err; + + /* Get the SFDP header. */ + err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header); + if (err < 0) + return err; + + /* Check the SFDP header version. */ + if (le32_to_cpu(header.signature) != SFDP_SIGNATURE || + header.major != SFDP_JESD216_MAJOR) + return -EINVAL; + + /* + * Verify that the first and only mandatory parameter header is a + * Basic Flash Parameter Table header as specified in JESD216. + */ + bfpt_header = &header.bfpt_header; + if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID || + bfpt_header->major != SFDP_JESD216_MAJOR) + return -EINVAL; + + /* + * Allocate memory then read all parameter headers with a single + * Read SFDP command. These parameter headers will actually be parsed + * twice: a first time to get the latest revision of the basic flash + * parameter table, then a second time to handle the supported optional + * tables. + * Hence we read the parameter headers once for all to reduce the + * processing time. Also we use kmalloc() instead of devm_kmalloc() + * because we don't need to keep these parameter headers: the allocated + * memory is always released with kfree() before exiting this function. + */ + if (header.nph) { + psize = header.nph * sizeof(*param_headers); + + param_headers = kmalloc(psize, GFP_KERNEL); + if (!param_headers) + return -ENOMEM; + + err = spi_nor_read_sfdp(nor, sizeof(header), + psize, param_headers); + if (err < 0) { + dev_dbg(dev, "failed to read SFDP parameter headers\n"); + goto exit; + } + } + + /* + * Check other parameter headers to get the latest revision of + * the basic flash parameter table. + */ + for (i = 0; i < header.nph; i++) { + param_header = ¶m_headers[i]; + + if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID && + param_header->major == SFDP_JESD216_MAJOR && + (param_header->minor > bfpt_header->minor || + (param_header->minor == bfpt_header->minor && + param_header->length > bfpt_header->length))) + bfpt_header = param_header; + } + + err = spi_nor_parse_bfpt(nor, bfpt_header, params); + if (err) + goto exit; + + /* Parse optional parameter tables. */ + for (i = 0; i < header.nph; i++) { + param_header = ¶m_headers[i]; + + switch (SFDP_PARAM_HEADER_ID(param_header)) { + case SFDP_SECTOR_MAP_ID: + err = spi_nor_parse_smpt(nor, param_header, params); + break; + + case SFDP_4BAIT_ID: + err = spi_nor_parse_4bait(nor, param_header, params); + break; + + default: + break; + } + + if (err) { + dev_warn(dev, "Failed to parse optional parameter table: %04x\n", + SFDP_PARAM_HEADER_ID(param_header)); + /* + * Let's not drop all information we extracted so far + * if optional table parsers fail. In case of failing, + * each optional parser is responsible to roll back to + * the previously known spi_nor data. + */ + err = 0; + } + } + +exit: + kfree(param_headers); + return err; +} diff --git a/drivers/mtd/spi-nor/sfdp.h b/drivers/mtd/spi-nor/sfdp.h new file mode 100644 index 000000000..7f9846b3a --- /dev/null +++ b/drivers/mtd/spi-nor/sfdp.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#ifndef __LINUX_MTD_SFDP_H +#define __LINUX_MTD_SFDP_H + +/* SFDP revisions */ +#define SFDP_JESD216_MAJOR 1 +#define SFDP_JESD216_MINOR 0 +#define SFDP_JESD216A_MINOR 5 +#define SFDP_JESD216B_MINOR 6 + +/* Basic Flash Parameter Table */ + +/* + * JESD216 rev D defines a Basic Flash Parameter Table of 20 DWORDs. + * They are indexed from 1 but C arrays are indexed from 0. + */ +#define BFPT_DWORD(i) ((i) - 1) +#define BFPT_DWORD_MAX 20 + +struct sfdp_bfpt { + u32 dwords[BFPT_DWORD_MAX]; +}; + +/* The first version of JESD216 defined only 9 DWORDs. */ +#define BFPT_DWORD_MAX_JESD216 9 +#define BFPT_DWORD_MAX_JESD216B 16 + +/* 1st DWORD. */ +#define BFPT_DWORD1_FAST_READ_1_1_2 BIT(16) +#define BFPT_DWORD1_ADDRESS_BYTES_MASK GENMASK(18, 17) +#define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY (0x0UL << 17) +#define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4 (0x1UL << 17) +#define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY (0x2UL << 17) +#define BFPT_DWORD1_DTR BIT(19) +#define BFPT_DWORD1_FAST_READ_1_2_2 BIT(20) +#define BFPT_DWORD1_FAST_READ_1_4_4 BIT(21) +#define BFPT_DWORD1_FAST_READ_1_1_4 BIT(22) + +/* 5th DWORD. */ +#define BFPT_DWORD5_FAST_READ_2_2_2 BIT(0) +#define BFPT_DWORD5_FAST_READ_4_4_4 BIT(4) + +/* 11th DWORD. */ +#define BFPT_DWORD11_PAGE_SIZE_SHIFT 4 +#define BFPT_DWORD11_PAGE_SIZE_MASK GENMASK(7, 4) + +/* 15th DWORD. */ + +/* + * (from JESD216 rev B) + * Quad Enable Requirements (QER): + * - 000b: Device does not have a QE bit. Device detects 1-1-4 and 1-4-4 + * reads based on instruction. DQ3/HOLD# functions are hold during + * instruction phase. + * - 001b: QE is bit 1 of status register 2. It is set via Write Status with + * two data bytes where bit 1 of the second byte is one. + * [...] + * Writing only one byte to the status register has the side-effect of + * clearing status register 2, including the QE bit. The 100b code is + * used if writing one byte to the status register does not modify + * status register 2. + * - 010b: QE is bit 6 of status register 1. It is set via Write Status with + * one data byte where bit 6 is one. + * [...] + * - 011b: QE is bit 7 of status register 2. It is set via Write status + * register 2 instruction 3Eh with one data byte where bit 7 is one. + * [...] + * The status register 2 is read using instruction 3Fh. + * - 100b: QE is bit 1 of status register 2. It is set via Write Status with + * two data bytes where bit 1 of the second byte is one. + * [...] + * In contrast to the 001b code, writing one byte to the status + * register does not modify status register 2. + * - 101b: QE is bit 1 of status register 2. Status register 1 is read using + * Read Status instruction 05h. Status register2 is read using + * instruction 35h. QE is set via Write Status instruction 01h with + * two data bytes where bit 1 of the second byte is one. + * [...] + */ +#define BFPT_DWORD15_QER_MASK GENMASK(22, 20) +#define BFPT_DWORD15_QER_NONE (0x0UL << 20) /* Micron */ +#define BFPT_DWORD15_QER_SR2_BIT1_BUGGY (0x1UL << 20) +#define BFPT_DWORD15_QER_SR1_BIT6 (0x2UL << 20) /* Macronix */ +#define BFPT_DWORD15_QER_SR2_BIT7 (0x3UL << 20) +#define BFPT_DWORD15_QER_SR2_BIT1_NO_RD (0x4UL << 20) +#define BFPT_DWORD15_QER_SR2_BIT1 (0x5UL << 20) /* Spansion */ + +struct sfdp_parameter_header { + u8 id_lsb; + u8 minor; + u8 major; + u8 length; /* in double words */ + u8 parameter_table_pointer[3]; /* byte address */ + u8 id_msb; +}; + +int spi_nor_parse_sfdp(struct spi_nor *nor, + struct spi_nor_flash_parameter *params); + +#endif /* __LINUX_MTD_SFDP_H */ diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c new file mode 100644 index 000000000..8429b4af9 --- /dev/null +++ b/drivers/mtd/spi-nor/spansion.c @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +static int +s25fs_s_post_bfpt_fixups(struct spi_nor *nor, + const struct sfdp_parameter_header *bfpt_header, + const struct sfdp_bfpt *bfpt, + struct spi_nor_flash_parameter *params) +{ + /* + * The S25FS-S chip family reports 512-byte pages in BFPT but + * in reality the write buffer still wraps at the safe default + * of 256 bytes. Overwrite the page size advertised by BFPT + * to get the writes working. + */ + params->page_size = 256; + + return 0; +} + +static struct spi_nor_fixups s25fs_s_fixups = { + .post_bfpt = s25fs_s_post_bfpt_fixups, +}; + +static const struct flash_info spansion_parts[] = { + /* Spansion/Cypress -- single (large) sector size only, at least + * for the chips listed here (without boot sectors). + */ + { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, + SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, + SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64, + SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + USE_CLSR) }, + { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, + SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + USE_CLSR) }, + { "s25fl256s0", INFO6(0x010219, 0x4d0080, 256 * 1024, 128, + SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + USE_CLSR) }, + { "s25fl256s1", INFO6(0x010219, 0x4d0180, 64 * 1024, 512, + SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + USE_CLSR) }, + { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256, + SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | USE_CLSR) }, + { "s25fs128s1", INFO6(0x012018, 0x4d0181, 64 * 1024, 256, + SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) + .fixups = &s25fs_s_fixups, }, + { "s25fs256s0", INFO6(0x010219, 0x4d0081, 256 * 1024, 128, + SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + USE_CLSR) }, + { "s25fs256s1", INFO6(0x010219, 0x4d0181, 64 * 1024, 512, + SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + USE_CLSR) }, + { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256, + SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) + .fixups = &s25fs_s_fixups, }, + { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) }, + { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) }, + { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, + SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + USE_CLSR) }, + { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, + SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + USE_CLSR) }, + { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) }, + { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) }, + { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) }, + { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) }, + { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) }, + { "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) }, + { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) }, + { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, + SECT_4K | SPI_NOR_DUAL_READ) }, + { "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16, + SECT_4K | SPI_NOR_DUAL_READ) }, + { "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_4B_OPCODES) }, + { "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_4B_OPCODES) }, + { "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_4B_OPCODES) }, + { "cy15x104q", INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1, + SPI_NOR_NO_ERASE) }, +}; + +static void spansion_post_sfdp_fixups(struct spi_nor *nor) +{ + if (nor->params->size <= SZ_16M) + return; + + nor->flags |= SNOR_F_4B_OPCODES; + /* No small sector erase for 4-byte command set */ + nor->erase_opcode = SPINOR_OP_SE; + nor->mtd.erasesize = nor->info->sector_size; +} + +static const struct spi_nor_fixups spansion_fixups = { + .post_sfdp = spansion_post_sfdp_fixups, +}; + +const struct spi_nor_manufacturer spi_nor_spansion = { + .name = "spansion", + .parts = spansion_parts, + .nparts = ARRAY_SIZE(spansion_parts), + .fixups = &spansion_fixups, +}; diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c new file mode 100644 index 000000000..0ab07624f --- /dev/null +++ b/drivers/mtd/spi-nor/sst.c @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +static const struct flash_info sst_parts[] = { + /* SST -- large erase sizes are "overlays", "sectors" are 4K */ + { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, + SECT_4K | SST_WRITE) }, + { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, + SECT_4K | SST_WRITE) }, + { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, + SECT_4K | SST_WRITE) }, + { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, + SECT_4K | SST_WRITE) }, + { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_4BIT_BP) }, + { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, + SECT_4K | SST_WRITE) }, + { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, + SECT_4K | SST_WRITE) }, + { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, + SECT_4K | SST_WRITE) }, + { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K) }, + { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K) }, + { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, + SECT_4K | SST_WRITE) }, + { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, + SECT_4K | SST_WRITE) }, + { "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32, + SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, + { "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32, + SECT_4K | SPI_NOR_DUAL_READ) }, + { "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, +}; + +static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + size_t actual = 0; + int ret; + + dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len); + + ret = spi_nor_lock_and_prep(nor); + if (ret) + return ret; + + ret = spi_nor_write_enable(nor); + if (ret) + goto out; + + nor->sst_write_second = false; + + /* Start write from odd address. */ + if (to % 2) { + nor->program_opcode = SPINOR_OP_BP; + + /* write one byte. */ + ret = spi_nor_write_data(nor, to, 1, buf); + if (ret < 0) + goto out; + WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret); + ret = spi_nor_wait_till_ready(nor); + if (ret) + goto out; + + to++; + actual++; + } + + /* Write out most of the data here. */ + for (; actual < len - 1; actual += 2) { + nor->program_opcode = SPINOR_OP_AAI_WP; + + /* write two bytes. */ + ret = spi_nor_write_data(nor, to, 2, buf + actual); + if (ret < 0) + goto out; + WARN(ret != 2, "While writing 2 bytes written %i bytes\n", ret); + ret = spi_nor_wait_till_ready(nor); + if (ret) + goto out; + to += 2; + nor->sst_write_second = true; + } + nor->sst_write_second = false; + + ret = spi_nor_write_disable(nor); + if (ret) + goto out; + + ret = spi_nor_wait_till_ready(nor); + if (ret) + goto out; + + /* Write out trailing byte if it exists. */ + if (actual != len) { + ret = spi_nor_write_enable(nor); + if (ret) + goto out; + + nor->program_opcode = SPINOR_OP_BP; + ret = spi_nor_write_data(nor, to, 1, buf + actual); + if (ret < 0) + goto out; + WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret); + ret = spi_nor_wait_till_ready(nor); + if (ret) + goto out; + + actual += 1; + + ret = spi_nor_write_disable(nor); + } +out: + *retlen += actual; + spi_nor_unlock_and_unprep(nor); + return ret; +} + +static void sst_default_init(struct spi_nor *nor) +{ + nor->flags |= SNOR_F_HAS_LOCK; +} + +static void sst_post_sfdp_fixups(struct spi_nor *nor) +{ + if (nor->info->flags & SST_WRITE) + nor->mtd._write = sst_write; +} + +static const struct spi_nor_fixups sst_fixups = { + .default_init = sst_default_init, + .post_sfdp = sst_post_sfdp_fixups, +}; + +const struct spi_nor_manufacturer spi_nor_sst = { + .name = "sst", + .parts = sst_parts, + .nparts = ARRAY_SIZE(sst_parts), + .fixups = &sst_fixups, +}; diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c new file mode 100644 index 000000000..e5dfa786f --- /dev/null +++ b/drivers/mtd/spi-nor/winbond.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +static int +w25q256_post_bfpt_fixups(struct spi_nor *nor, + const struct sfdp_parameter_header *bfpt_header, + const struct sfdp_bfpt *bfpt, + struct spi_nor_flash_parameter *params) +{ + /* + * W25Q256JV supports 4B opcodes but W25Q256FV does not. + * Unfortunately, Winbond has re-used the same JEDEC ID for both + * variants which prevents us from defining a new entry in the parts + * table. + * To differentiate between W25Q256JV and W25Q256FV check SFDP header + * version: only JV has JESD216A compliant structure (version 5). + */ + if (bfpt_header->major == SFDP_JESD216_MAJOR && + bfpt_header->minor == SFDP_JESD216A_MINOR) + nor->flags |= SNOR_F_4B_OPCODES; + + return 0; +} + +static struct spi_nor_fixups w25q256_fixups = { + .post_bfpt = w25q256_post_bfpt_fixups, +}; + +static const struct flash_info winbond_parts[] = { + /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */ + { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) }, + { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) }, + { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) }, + { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) }, + { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) }, + { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) }, + { "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, + { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) }, + { "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024, 32, + SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | + SPI_NOR_HAS_TB) }, + { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) }, + { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) }, + { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4, SECT_4K) }, + { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) }, + { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, + { "w25q32jv", INFO(0xef7016, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + }, + { "w25q32jwm", INFO(0xef8016, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, + { "w25q64jwm", INFO(0xef8017, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, + { "w25q128jwm", INFO(0xef8018, 0, 64 * 1024, 256, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, + { "w25q256jwm", INFO(0xef8019, 0, 64 * 1024, 512, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, + { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) }, + { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, + { "w25q64jvm", INFO(0xef7017, 0, 64 * 1024, 128, SECT_4K) }, + { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, + { "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, + { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) }, + { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) }, + { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) }, + { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + .fixups = &w25q256_fixups }, + { "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024, + SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) }, +}; + +/** + * winbond_set_4byte_addr_mode() - Set 4-byte address mode for Winbond flashes. + * @nor: pointer to 'struct spi_nor'. + * @enable: true to enter the 4-byte address mode, false to exit the 4-byte + * address mode. + * + * Return: 0 on success, -errno otherwise. + */ +static int winbond_set_4byte_addr_mode(struct spi_nor *nor, bool enable) +{ + int ret; + + ret = spi_nor_set_4byte_addr_mode(nor, enable); + if (ret || enable) + return ret; + + /* + * On Winbond W25Q256FV, leaving 4byte mode causes the Extended Address + * Register to be set to 1, so all 3-byte-address reads come from the + * second 16M. We must clear the register to enable normal behavior. + */ + ret = spi_nor_write_enable(nor); + if (ret) + return ret; + + ret = spi_nor_write_ear(nor, 0); + if (ret) + return ret; + + return spi_nor_write_disable(nor); +} + +static void winbond_default_init(struct spi_nor *nor) +{ + nor->params->set_4byte_addr_mode = winbond_set_4byte_addr_mode; +} + +static const struct spi_nor_fixups winbond_fixups = { + .default_init = winbond_default_init, +}; + +const struct spi_nor_manufacturer spi_nor_winbond = { + .name = "winbond", + .parts = winbond_parts, + .nparts = ARRAY_SIZE(winbond_parts), + .fixups = &winbond_fixups, +}; diff --git a/drivers/mtd/spi-nor/xilinx.c b/drivers/mtd/spi-nor/xilinx.c new file mode 100644 index 000000000..1138bdbf4 --- /dev/null +++ b/drivers/mtd/spi-nor/xilinx.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +static const struct flash_info xilinx_parts[] = { + /* Xilinx S3AN Internal Flash */ + { "3S50AN", S3AN_INFO(0x1f2200, 64, 264) }, + { "3S200AN", S3AN_INFO(0x1f2400, 256, 264) }, + { "3S400AN", S3AN_INFO(0x1f2400, 256, 264) }, + { "3S700AN", S3AN_INFO(0x1f2500, 512, 264) }, + { "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) }, +}; + +/* + * This code converts an address to the Default Address Mode, that has non + * power of two page sizes. We must support this mode because it is the default + * mode supported by Xilinx tools, it can access the whole flash area and + * changing over to the Power-of-two mode is irreversible and corrupts the + * original data. + * Addr can safely be unsigned int, the biggest S3AN device is smaller than + * 4 MiB. + */ +static u32 s3an_convert_addr(struct spi_nor *nor, u32 addr) +{ + u32 offset, page; + + offset = addr % nor->page_size; + page = addr / nor->page_size; + page <<= (nor->page_size > 512) ? 10 : 9; + + return page | offset; +} + +static int xilinx_nor_setup(struct spi_nor *nor, + const struct spi_nor_hwcaps *hwcaps) +{ + int ret; + + ret = spi_nor_xread_sr(nor, nor->bouncebuf); + if (ret) + return ret; + + nor->erase_opcode = SPINOR_OP_XSE; + nor->program_opcode = SPINOR_OP_XPP; + nor->read_opcode = SPINOR_OP_READ; + nor->flags |= SNOR_F_NO_OP_CHIP_ERASE; + + /* + * This flashes have a page size of 264 or 528 bytes (known as + * Default addressing mode). It can be changed to a more standard + * Power of two mode where the page size is 256/512. This comes + * with a price: there is 3% less of space, the data is corrupted + * and the page size cannot be changed back to default addressing + * mode. + * + * The current addressing mode can be read from the XRDSR register + * and should not be changed, because is a destructive operation. + */ + if (nor->bouncebuf[0] & XSR_PAGESIZE) { + /* Flash in Power of 2 mode */ + nor->page_size = (nor->page_size == 264) ? 256 : 512; + nor->mtd.writebufsize = nor->page_size; + nor->mtd.size = 8 * nor->page_size * nor->info->n_sectors; + nor->mtd.erasesize = 8 * nor->page_size; + } else { + /* Flash in Default addressing mode */ + nor->params->convert_addr = s3an_convert_addr; + nor->mtd.erasesize = nor->info->sector_size; + } + + return 0; +} + +static void xilinx_post_sfdp_fixups(struct spi_nor *nor) +{ + nor->params->setup = xilinx_nor_setup; +} + +static const struct spi_nor_fixups xilinx_fixups = { + .post_sfdp = xilinx_post_sfdp_fixups, +}; + +const struct spi_nor_manufacturer spi_nor_xilinx = { + .name = "xilinx", + .parts = xilinx_parts, + .nparts = ARRAY_SIZE(xilinx_parts), + .fixups = &xilinx_fixups, +}; diff --git a/drivers/mtd/spi-nor/xmc.c b/drivers/mtd/spi-nor/xmc.c new file mode 100644 index 000000000..2c7773b68 --- /dev/null +++ b/drivers/mtd/spi-nor/xmc.c @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2005, Intec Automation Inc. + * Copyright (C) 2014, Freescale Semiconductor, Inc. + */ + +#include <linux/mtd/spi-nor.h> + +#include "core.h" + +static const struct flash_info xmc_parts[] = { + /* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */ + { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, +}; + +const struct spi_nor_manufacturer spi_nor_xmc = { + .name = "xmc", + .parts = xmc_parts, + .nparts = ARRAY_SIZE(xmc_parts), +}; |